Rev 1246 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1246 | Rev 1403 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
4 | * Distributed under the GNU GPL license version 2. |
4 | * Distributed under the GNU GPL license version 2. |
5 | * |
5 | * |
6 | * Modified by George Anzinger to reuse immediately and to use |
6 | * Modified by George Anzinger to reuse immediately and to use |
7 | * find bit instructions. Also removed _irq on spinlocks. |
7 | * find bit instructions. Also removed _irq on spinlocks. |
8 | * |
8 | * |
9 | * Modified by Nadia Derbey to make it RCU safe. |
9 | * Modified by Nadia Derbey to make it RCU safe. |
10 | * |
10 | * |
11 | * Small id to pointer translation service. |
11 | * Small id to pointer translation service. |
12 | * |
12 | * |
13 | * It uses a radix tree like structure as a sparse array indexed |
13 | * It uses a radix tree like structure as a sparse array indexed |
14 | * by the id to obtain the pointer. The bitmap makes allocating |
14 | * by the id to obtain the pointer. The bitmap makes allocating |
15 | * a new id quick. |
15 | * a new id quick. |
16 | * |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a |
17 | * You call it to allocate an id (an int) an associate with that id a |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
19 | * id to a user for him to pass back at a later time. You then pass |
19 | * id to a user for him to pass back at a later time. You then pass |
20 | * that id to this code and it returns your pointer. |
20 | * that id to this code and it returns your pointer. |
21 | 21 | ||
22 | * You can release ids at any time. When all ids are released, most of |
22 | * You can release ids at any time. When all ids are released, most of |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
24 | * don't need to go to the memory "store" during an id allocate, just |
24 | * don't need to go to the memory "store" during an id allocate, just |
25 | * so you don't need to be too concerned about locking and conflicts |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. |
26 | * with the slab allocator. |
27 | */ |
27 | */ |
- | 28 | ||
- | 29 | #include |
|
28 | 30 | ||
29 | #include |
31 | #include |
30 | #include |
32 | //#include |
31 | #include "drm.h" |
33 | #include "drm.h" |
32 | #include "drmP.h" |
34 | #include "drmP.h" |
33 | #include "drm_crtc.h" |
35 | #include "drm_crtc.h" |
34 | - | ||
35 | #define ADDR "=m" (*(volatile long *) addr) |
- | |
36 | - | ||
37 | static inline void __set_bit(int nr, volatile void *addr) |
- | |
38 | { |
- | |
39 | asm volatile("bts %1,%0" |
- | |
40 | : ADDR |
- | |
41 | : "Ir" (nr) : "memory"); |
- | |
42 | } |
- | |
43 | - | ||
44 | static inline void __clear_bit(int nr, volatile void *addr) |
- | |
45 | { |
- | |
46 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
- | |
47 | } |
- | |
48 | - | ||
49 | static inline int constant_test_bit(int nr, const volatile void *addr) |
- | |
50 | { |
- | |
51 | return ((1UL << (nr % 32)) & |
- | |
52 | (((unsigned long *)addr)[nr / 32])) != 0; |
- | |
53 | } |
- | |
54 | - | ||
55 | static inline int variable_test_bit(int nr, volatile const void *addr) |
- | |
56 | { |
- | |
57 | int oldbit; |
- | |
58 | - | ||
59 | asm volatile("bt %2,%1\n\t" |
- | |
60 | "sbb %0,%0" |
- | |
61 | : "=r" (oldbit) |
- | |
62 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
- | |
63 | - | ||
64 | return oldbit; |
- | |
65 | }; |
- | |
66 | - | ||
67 | - | ||
68 | #define test_bit(nr,addr) \ |
- | |
69 | (__builtin_constant_p(nr) ? \ |
- | |
70 | constant_test_bit((nr),(addr)) : \ |
- | |
71 | variable_test_bit((nr),(addr))) |
- | |
72 | - | ||
73 | - | ||
74 | static inline int fls(int x) |
- | |
75 | { |
- | |
76 | int r; |
- | |
77 | - | ||
78 | __asm__("bsrl %1,%0\n\t" |
- | |
79 | "jnz 1f\n\t" |
- | |
80 | "movl $-1,%0\n" |
- | |
81 | "1:" : "=r" (r) : "rm" (x)); |
- | |
82 | return r+1; |
- | |
83 | } |
- | |
84 | - | ||
85 | static inline unsigned long __ffs(unsigned long word) |
- | |
86 | { |
- | |
87 | __asm__("bsfl %1,%0" |
- | |
88 | :"=r" (word) |
- | |
89 | :"rm" (word)); |
- | |
90 | return word; |
- | |
91 | } |
- | |
92 | - | ||
93 | - | ||
94 | static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) |
- | |
95 | { |
- | |
96 | unsigned x = 0; |
- | |
97 | - | ||
98 | while (x < size) { |
- | |
99 | unsigned long val = *addr++; |
- | |
100 | if (val) |
- | |
101 | return __ffs(val) + x; |
- | |
102 | x += (sizeof(*addr)<<3); |
- | |
103 | } |
- | |
104 | return x; |
- | |
105 | } |
- | |
106 | - | ||
107 | - | ||
108 | int find_next_bit(const unsigned long *addr, int size, int offset) |
- | |
109 | { |
- | |
110 | const unsigned long *p = addr + (offset >> 5); |
- | |
111 | int set = 0, bit = offset & 31, res; |
- | |
112 | - | ||
113 | if (bit) |
- | |
114 | { |
- | |
115 | /* |
- | |
116 | * Look for nonzero in the first 32 bits: |
- | |
117 | */ |
- | |
118 | __asm__("bsfl %1,%0\n\t" |
- | |
119 | "jne 1f\n\t" |
- | |
120 | "movl $32, %0\n" |
- | |
121 | "1:" |
- | |
122 | : "=r" (set) |
- | |
123 | : "r" (*p >> bit)); |
- | |
124 | if (set < (32 - bit)) |
- | |
125 | return set + offset; |
- | |
126 | set = 32 - bit; |
- | |
127 | p++; |
- | |
128 | } |
- | |
129 | /* |
- | |
130 | * No set bit yet, search remaining full words for a bit |
- | |
131 | */ |
- | |
132 | res = find_first_bit (p, size - 32 * (p - addr)); |
- | |
133 | return (offset + set + res); |
- | |
134 | } |
- | |
135 | 36 | ||
136 | 37 | ||
137 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
38 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
138 | 39 | ||
139 | #define rcu_dereference(p) ({ \ |
40 | #define rcu_dereference(p) ({ \ |
140 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
41 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
141 | (_________p1); \ |
42 | (_________p1); \ |
142 | }) |
43 | }) |
143 | 44 | ||
144 | #define rcu_assign_pointer(p, v) \ |
45 | #define rcu_assign_pointer(p, v) \ |
145 | ({ \ |
46 | ({ \ |
146 | if (!__builtin_constant_p(v) || \ |
47 | if (!__builtin_constant_p(v) || \ |
147 | ((v) != NULL)) \ |
48 | ((v) != NULL)) \ |
148 | (p) = (v); \ |
49 | (p) = (v); \ |
149 | }) |
50 | }) |
150 | 51 | ||
151 | //static struct kmem_cache *idr_layer_cache; |
52 | //static struct kmem_cache *idr_layer_cache; |
152 | 53 | ||
153 | 54 | ||
154 | 55 | ||
155 | 56 | ||
156 | 57 | ||
157 | static struct idr_layer *get_from_free_list(struct idr *idp) |
58 | static struct idr_layer *get_from_free_list(struct idr *idp) |
158 | { |
59 | { |
159 | struct idr_layer *p; |
60 | struct idr_layer *p; |
160 | unsigned long flags; |
61 | unsigned long flags; |
161 | 62 | ||
162 | // spin_lock_irqsave(&idp->lock, flags); |
63 | // spin_lock_irqsave(&idp->lock, flags); |
163 | if ((p = idp->id_free)) { |
64 | if ((p = idp->id_free)) { |
164 | idp->id_free = p->ary[0]; |
65 | idp->id_free = p->ary[0]; |
165 | idp->id_free_cnt--; |
66 | idp->id_free_cnt--; |
166 | p->ary[0] = NULL; |
67 | p->ary[0] = NULL; |
167 | } |
68 | } |
168 | // spin_unlock_irqrestore(&idp->lock, flags); |
69 | // spin_unlock_irqrestore(&idp->lock, flags); |
169 | return(p); |
70 | return(p); |
170 | } |
71 | } |
171 | 72 | ||
172 | 73 | ||
173 | static void idr_layer_rcu_free(struct rcu_head *head) |
74 | static void idr_layer_rcu_free(struct rcu_head *head) |
174 | { |
75 | { |
175 | struct idr_layer *layer; |
76 | struct idr_layer *layer; |
176 | 77 | ||
177 | layer = container_of(head, struct idr_layer, rcu_head); |
78 | layer = container_of(head, struct idr_layer, rcu_head); |
178 | kfree(layer); |
79 | kfree(layer); |
179 | } |
80 | } |
180 | 81 | ||
181 | 82 | ||
182 | 83 | ||
183 | static inline void free_layer(struct idr_layer *p) |
84 | static inline void free_layer(struct idr_layer *p) |
184 | { |
85 | { |
185 | kfree(p); |
86 | kfree(p); |
186 | } |
87 | } |
187 | 88 | ||
188 | 89 | ||
189 | /* only called when idp->lock is held */ |
90 | /* only called when idp->lock is held */ |
190 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
91 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
191 | { |
92 | { |
192 | p->ary[0] = idp->id_free; |
93 | p->ary[0] = idp->id_free; |
193 | idp->id_free = p; |
94 | idp->id_free = p; |
194 | idp->id_free_cnt++; |
95 | idp->id_free_cnt++; |
195 | } |
96 | } |
196 | 97 | ||
197 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
98 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
198 | { |
99 | { |
199 | unsigned long flags; |
100 | unsigned long flags; |
200 | 101 | ||
201 | /* |
102 | /* |
202 | * Depends on the return element being zeroed. |
103 | * Depends on the return element being zeroed. |
203 | */ |
104 | */ |
204 | // spin_lock_irqsave(&idp->lock, flags); |
105 | // spin_lock_irqsave(&idp->lock, flags); |
205 | __move_to_free_list(idp, p); |
106 | __move_to_free_list(idp, p); |
206 | // spin_unlock_irqrestore(&idp->lock, flags); |
107 | // spin_unlock_irqrestore(&idp->lock, flags); |
207 | } |
108 | } |
208 | 109 | ||
209 | static void idr_mark_full(struct idr_layer **pa, int id) |
110 | static void idr_mark_full(struct idr_layer **pa, int id) |
210 | { |
111 | { |
211 | struct idr_layer *p = pa[0]; |
112 | struct idr_layer *p = pa[0]; |
212 | int l = 0; |
113 | int l = 0; |
213 | 114 | ||
214 | __set_bit(id & IDR_MASK, &p->bitmap); |
115 | __set_bit(id & IDR_MASK, &p->bitmap); |
215 | /* |
116 | /* |
216 | * If this layer is full mark the bit in the layer above to |
117 | * If this layer is full mark the bit in the layer above to |
217 | * show that this part of the radix tree is full. This may |
118 | * show that this part of the radix tree is full. This may |
218 | * complete the layer above and require walking up the radix |
119 | * complete the layer above and require walking up the radix |
219 | * tree. |
120 | * tree. |
220 | */ |
121 | */ |
221 | while (p->bitmap == IDR_FULL) { |
122 | while (p->bitmap == IDR_FULL) { |
222 | if (!(p = pa[++l])) |
123 | if (!(p = pa[++l])) |
223 | break; |
124 | break; |
224 | id = id >> IDR_BITS; |
125 | id = id >> IDR_BITS; |
225 | __set_bit((id & IDR_MASK), &p->bitmap); |
126 | __set_bit((id & IDR_MASK), &p->bitmap); |
226 | } |
127 | } |
227 | } |
128 | } |
228 | 129 | ||
229 | 130 | ||
230 | 131 | ||
231 | /** |
132 | /** |
232 | * idr_pre_get - reserver resources for idr allocation |
133 | * idr_pre_get - reserver resources for idr allocation |
233 | * @idp: idr handle |
134 | * @idp: idr handle |
234 | * @gfp_mask: memory allocation flags |
135 | * @gfp_mask: memory allocation flags |
235 | * |
136 | * |
236 | * This function should be called prior to locking and calling the |
137 | * This function should be called prior to locking and calling the |
237 | * idr_get_new* functions. It preallocates enough memory to satisfy |
138 | * idr_get_new* functions. It preallocates enough memory to satisfy |
238 | * the worst possible allocation. |
139 | * the worst possible allocation. |
239 | * |
140 | * |
240 | * If the system is REALLY out of memory this function returns 0, |
141 | * If the system is REALLY out of memory this function returns 0, |
241 | * otherwise 1. |
142 | * otherwise 1. |
242 | */ |
143 | */ |
243 | int idr_pre_get(struct idr *idp, u32_t gfp_mask) |
144 | int idr_pre_get(struct idr *idp, u32_t gfp_mask) |
244 | { |
145 | { |
245 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
146 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
246 | struct idr_layer *new; |
147 | struct idr_layer *new; |
247 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
148 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
248 | if (new == NULL) |
149 | if (new == NULL) |
249 | return (0); |
150 | return (0); |
250 | move_to_free_list(idp, new); |
151 | move_to_free_list(idp, new); |
251 | } |
152 | } |
252 | return 1; |
153 | return 1; |
253 | } |
154 | } |
254 | EXPORT_SYMBOL(idr_pre_get); |
155 | EXPORT_SYMBOL(idr_pre_get); |
255 | 156 | ||
256 | 157 | ||
257 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
158 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
258 | { |
159 | { |
259 | int n, m, sh; |
160 | int n, m, sh; |
260 | struct idr_layer *p, *new; |
161 | struct idr_layer *p, *new; |
261 | int l, id, oid; |
162 | int l, id, oid; |
262 | unsigned long bm; |
163 | unsigned long bm; |
263 | 164 | ||
264 | id = *starting_id; |
165 | id = *starting_id; |
265 | restart: |
166 | restart: |
266 | p = idp->top; |
167 | p = idp->top; |
267 | l = idp->layers; |
168 | l = idp->layers; |
268 | pa[l--] = NULL; |
169 | pa[l--] = NULL; |
269 | while (1) { |
170 | while (1) { |
270 | /* |
171 | /* |
271 | * We run around this while until we reach the leaf node... |
172 | * We run around this while until we reach the leaf node... |
272 | */ |
173 | */ |
273 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
174 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
274 | bm = ~p->bitmap; |
175 | bm = ~p->bitmap; |
275 | m = find_next_bit(&bm, IDR_SIZE, n); |
176 | m = find_next_bit(&bm, IDR_SIZE, n); |
276 | if (m == IDR_SIZE) { |
177 | if (m == IDR_SIZE) { |
277 | /* no space available go back to previous layer. */ |
178 | /* no space available go back to previous layer. */ |
278 | l++; |
179 | l++; |
279 | oid = id; |
180 | oid = id; |
280 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
181 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
281 | 182 | ||
282 | /* if already at the top layer, we need to grow */ |
183 | /* if already at the top layer, we need to grow */ |
283 | if (!(p = pa[l])) { |
184 | if (!(p = pa[l])) { |
284 | *starting_id = id; |
185 | *starting_id = id; |
285 | return IDR_NEED_TO_GROW; |
186 | return IDR_NEED_TO_GROW; |
286 | } |
187 | } |
287 | 188 | ||
288 | /* If we need to go up one layer, continue the |
189 | /* If we need to go up one layer, continue the |
289 | * loop; otherwise, restart from the top. |
190 | * loop; otherwise, restart from the top. |
290 | */ |
191 | */ |
291 | sh = IDR_BITS * (l + 1); |
192 | sh = IDR_BITS * (l + 1); |
292 | if (oid >> sh == id >> sh) |
193 | if (oid >> sh == id >> sh) |
293 | continue; |
194 | continue; |
294 | else |
195 | else |
295 | goto restart; |
196 | goto restart; |
296 | } |
197 | } |
297 | if (m != n) { |
198 | if (m != n) { |
298 | sh = IDR_BITS*l; |
199 | sh = IDR_BITS*l; |
299 | id = ((id >> sh) ^ n ^ m) << sh; |
200 | id = ((id >> sh) ^ n ^ m) << sh; |
300 | } |
201 | } |
301 | if ((id >= MAX_ID_BIT) || (id < 0)) |
202 | if ((id >= MAX_ID_BIT) || (id < 0)) |
302 | return IDR_NOMORE_SPACE; |
203 | return IDR_NOMORE_SPACE; |
303 | if (l == 0) |
204 | if (l == 0) |
304 | break; |
205 | break; |
305 | /* |
206 | /* |
306 | * Create the layer below if it is missing. |
207 | * Create the layer below if it is missing. |
307 | */ |
208 | */ |
308 | if (!p->ary[m]) { |
209 | if (!p->ary[m]) { |
309 | new = get_from_free_list(idp); |
210 | new = get_from_free_list(idp); |
310 | if (!new) |
211 | if (!new) |
311 | return -1; |
212 | return -1; |
312 | new->layer = l-1; |
213 | new->layer = l-1; |
313 | rcu_assign_pointer(p->ary[m], new); |
214 | rcu_assign_pointer(p->ary[m], new); |
314 | p->count++; |
215 | p->count++; |
315 | } |
216 | } |
316 | pa[l--] = p; |
217 | pa[l--] = p; |
317 | p = p->ary[m]; |
218 | p = p->ary[m]; |
318 | } |
219 | } |
319 | 220 | ||
320 | pa[l] = p; |
221 | pa[l] = p; |
321 | return id; |
222 | return id; |
322 | } |
223 | } |
323 | 224 | ||
324 | 225 | ||
325 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
226 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
326 | struct idr_layer **pa) |
227 | struct idr_layer **pa) |
327 | { |
228 | { |
328 | struct idr_layer *p, *new; |
229 | struct idr_layer *p, *new; |
329 | int layers, v, id; |
230 | int layers, v, id; |
330 | unsigned long flags; |
231 | unsigned long flags; |
331 | 232 | ||
332 | id = starting_id; |
233 | id = starting_id; |
333 | build_up: |
234 | build_up: |
334 | p = idp->top; |
235 | p = idp->top; |
335 | layers = idp->layers; |
236 | layers = idp->layers; |
336 | if (unlikely(!p)) { |
237 | if (unlikely(!p)) { |
337 | if (!(p = get_from_free_list(idp))) |
238 | if (!(p = get_from_free_list(idp))) |
338 | return -1; |
239 | return -1; |
339 | p->layer = 0; |
240 | p->layer = 0; |
340 | layers = 1; |
241 | layers = 1; |
341 | } |
242 | } |
342 | /* |
243 | /* |
343 | * Add a new layer to the top of the tree if the requested |
244 | * Add a new layer to the top of the tree if the requested |
344 | * id is larger than the currently allocated space. |
245 | * id is larger than the currently allocated space. |
345 | */ |
246 | */ |
346 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
247 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
347 | layers++; |
248 | layers++; |
348 | if (!p->count) { |
249 | if (!p->count) { |
349 | /* special case: if the tree is currently empty, |
250 | /* special case: if the tree is currently empty, |
350 | * then we grow the tree by moving the top node |
251 | * then we grow the tree by moving the top node |
351 | * upwards. |
252 | * upwards. |
352 | */ |
253 | */ |
353 | p->layer++; |
254 | p->layer++; |
354 | continue; |
255 | continue; |
355 | } |
256 | } |
356 | if (!(new = get_from_free_list(idp))) { |
257 | if (!(new = get_from_free_list(idp))) { |
357 | /* |
258 | /* |
358 | * The allocation failed. If we built part of |
259 | * The allocation failed. If we built part of |
359 | * the structure tear it down. |
260 | * the structure tear it down. |
360 | */ |
261 | */ |
361 | // spin_lock_irqsave(&idp->lock, flags); |
262 | // spin_lock_irqsave(&idp->lock, flags); |
362 | for (new = p; p && p != idp->top; new = p) { |
263 | for (new = p; p && p != idp->top; new = p) { |
363 | p = p->ary[0]; |
264 | p = p->ary[0]; |
364 | new->ary[0] = NULL; |
265 | new->ary[0] = NULL; |
365 | new->bitmap = new->count = 0; |
266 | new->bitmap = new->count = 0; |
366 | __move_to_free_list(idp, new); |
267 | __move_to_free_list(idp, new); |
367 | } |
268 | } |
368 | // spin_unlock_irqrestore(&idp->lock, flags); |
269 | // spin_unlock_irqrestore(&idp->lock, flags); |
369 | return -1; |
270 | return -1; |
370 | } |
271 | } |
371 | new->ary[0] = p; |
272 | new->ary[0] = p; |
372 | new->count = 1; |
273 | new->count = 1; |
373 | new->layer = layers-1; |
274 | new->layer = layers-1; |
374 | if (p->bitmap == IDR_FULL) |
275 | if (p->bitmap == IDR_FULL) |
375 | __set_bit(0, &new->bitmap); |
276 | __set_bit(0, &new->bitmap); |
376 | p = new; |
277 | p = new; |
377 | } |
278 | } |
378 | rcu_assign_pointer(idp->top, p); |
279 | rcu_assign_pointer(idp->top, p); |
379 | idp->layers = layers; |
280 | idp->layers = layers; |
380 | v = sub_alloc(idp, &id, pa); |
281 | v = sub_alloc(idp, &id, pa); |
381 | if (v == IDR_NEED_TO_GROW) |
282 | if (v == IDR_NEED_TO_GROW) |
382 | goto build_up; |
283 | goto build_up; |
383 | return(v); |
284 | return(v); |
384 | } |
285 | } |
385 | 286 | ||
386 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
287 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
387 | { |
288 | { |
388 | struct idr_layer *pa[MAX_LEVEL]; |
289 | struct idr_layer *pa[MAX_LEVEL]; |
389 | int id; |
290 | int id; |
390 | 291 | ||
391 | id = idr_get_empty_slot(idp, starting_id, pa); |
292 | id = idr_get_empty_slot(idp, starting_id, pa); |
392 | if (id >= 0) { |
293 | if (id >= 0) { |
393 | /* |
294 | /* |
394 | * Successfully found an empty slot. Install the user |
295 | * Successfully found an empty slot. Install the user |
395 | * pointer and mark the slot full. |
296 | * pointer and mark the slot full. |
396 | */ |
297 | */ |
397 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
298 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
398 | (struct idr_layer *)ptr); |
299 | (struct idr_layer *)ptr); |
399 | pa[0]->count++; |
300 | pa[0]->count++; |
400 | idr_mark_full(pa, id); |
301 | idr_mark_full(pa, id); |
401 | } |
302 | } |
402 | 303 | ||
403 | return id; |
304 | return id; |
404 | } |
305 | } |
405 | 306 | ||
406 | /** |
307 | /** |
407 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
308 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
408 | * @idp: idr handle |
309 | * @idp: idr handle |
409 | * @ptr: pointer you want associated with the ide |
310 | * @ptr: pointer you want associated with the ide |
410 | * @start_id: id to start search at |
311 | * @start_id: id to start search at |
411 | * @id: pointer to the allocated handle |
312 | * @id: pointer to the allocated handle |
412 | * |
313 | * |
413 | * This is the allocate id function. It should be called with any |
314 | * This is the allocate id function. It should be called with any |
414 | * required locks. |
315 | * required locks. |
415 | * |
316 | * |
416 | * If memory is required, it will return -EAGAIN, you should unlock |
317 | * If memory is required, it will return -EAGAIN, you should unlock |
417 | * and go back to the idr_pre_get() call. If the idr is full, it will |
318 | * and go back to the idr_pre_get() call. If the idr is full, it will |
418 | * return -ENOSPC. |
319 | * return -ENOSPC. |
419 | * |
320 | * |
420 | * @id returns a value in the range @starting_id ... 0x7fffffff |
321 | * @id returns a value in the range @starting_id ... 0x7fffffff |
421 | */ |
322 | */ |
422 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
323 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
423 | { |
324 | { |
424 | int rv; |
325 | int rv; |
425 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
326 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
426 | /* |
327 | /* |
427 | * This is a cheap hack until the IDR code can be fixed to |
328 | * This is a cheap hack until the IDR code can be fixed to |
428 | * return proper error values. |
329 | * return proper error values. |
429 | */ |
330 | */ |
430 | if (rv < 0) |
331 | if (rv < 0) |
431 | { |
332 | { |
432 | dbgprintf("fail\n"); |
333 | dbgprintf("fail\n"); |
433 | return _idr_rc_to_errno(rv); |
334 | return _idr_rc_to_errno(rv); |
434 | }; |
335 | }; |
435 | *id = rv; |
336 | *id = rv; |
436 | return 0; |
337 | return 0; |
437 | } |
338 | } |
438 | EXPORT_SYMBOL(idr_get_new_above); |
339 | EXPORT_SYMBOL(idr_get_new_above); |
439 | 340 | ||
440 | /** |
341 | /** |
441 | * idr_get_new - allocate new idr entry |
342 | * idr_get_new - allocate new idr entry |
442 | * @idp: idr handle |
343 | * @idp: idr handle |
443 | * @ptr: pointer you want associated with the ide |
344 | * @ptr: pointer you want associated with the ide |
444 | * @id: pointer to the allocated handle |
345 | * @id: pointer to the allocated handle |
445 | * |
346 | * |
446 | * This is the allocate id function. It should be called with any |
347 | * This is the allocate id function. It should be called with any |
447 | * required locks. |
348 | * required locks. |
448 | * |
349 | * |
449 | * If memory is required, it will return -EAGAIN, you should unlock |
350 | * If memory is required, it will return -EAGAIN, you should unlock |
450 | * and go back to the idr_pre_get() call. If the idr is full, it will |
351 | * and go back to the idr_pre_get() call. If the idr is full, it will |
451 | * return -ENOSPC. |
352 | * return -ENOSPC. |
452 | * |
353 | * |
453 | * @id returns a value in the range 0 ... 0x7fffffff |
354 | * @id returns a value in the range 0 ... 0x7fffffff |
454 | */ |
355 | */ |
455 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
356 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
456 | { |
357 | { |
457 | int rv; |
358 | int rv; |
458 | 359 | ||
459 | rv = idr_get_new_above_int(idp, ptr, 0); |
360 | rv = idr_get_new_above_int(idp, ptr, 0); |
460 | /* |
361 | /* |
461 | * This is a cheap hack until the IDR code can be fixed to |
362 | * This is a cheap hack until the IDR code can be fixed to |
462 | * return proper error values. |
363 | * return proper error values. |
463 | */ |
364 | */ |
464 | if (rv < 0) |
365 | if (rv < 0) |
465 | return _idr_rc_to_errno(rv); |
366 | return _idr_rc_to_errno(rv); |
466 | *id = rv; |
367 | *id = rv; |
467 | return 0; |
368 | return 0; |
468 | } |
369 | } |
469 | EXPORT_SYMBOL(idr_get_new); |
370 | EXPORT_SYMBOL(idr_get_new); |
470 | 371 | ||
471 | static void idr_remove_warning(int id) |
372 | static void idr_remove_warning(int id) |
472 | { |
373 | { |
473 | printk(KERN_WARNING |
374 | printk(KERN_WARNING |
474 | "idr_remove called for id=%d which is not allocated.\n", id); |
375 | "idr_remove called for id=%d which is not allocated.\n", id); |
475 | // dump_stack(); |
376 | // dump_stack(); |
476 | } |
377 | } |
477 | 378 | ||
478 | static void sub_remove(struct idr *idp, int shift, int id) |
379 | static void sub_remove(struct idr *idp, int shift, int id) |
479 | { |
380 | { |
480 | struct idr_layer *p = idp->top; |
381 | struct idr_layer *p = idp->top; |
481 | struct idr_layer **pa[MAX_LEVEL]; |
382 | struct idr_layer **pa[MAX_LEVEL]; |
482 | struct idr_layer ***paa = &pa[0]; |
383 | struct idr_layer ***paa = &pa[0]; |
483 | struct idr_layer *to_free; |
384 | struct idr_layer *to_free; |
484 | int n; |
385 | int n; |
485 | 386 | ||
486 | *paa = NULL; |
387 | *paa = NULL; |
487 | *++paa = &idp->top; |
388 | *++paa = &idp->top; |
488 | 389 | ||
489 | while ((shift > 0) && p) { |
390 | while ((shift > 0) && p) { |
490 | n = (id >> shift) & IDR_MASK; |
391 | n = (id >> shift) & IDR_MASK; |
491 | __clear_bit(n, &p->bitmap); |
392 | __clear_bit(n, &p->bitmap); |
492 | *++paa = &p->ary[n]; |
393 | *++paa = &p->ary[n]; |
493 | p = p->ary[n]; |
394 | p = p->ary[n]; |
494 | shift -= IDR_BITS; |
395 | shift -= IDR_BITS; |
495 | } |
396 | } |
496 | n = id & IDR_MASK; |
397 | n = id & IDR_MASK; |
497 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
398 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
498 | __clear_bit(n, &p->bitmap); |
399 | __clear_bit(n, &p->bitmap); |
499 | rcu_assign_pointer(p->ary[n], NULL); |
400 | rcu_assign_pointer(p->ary[n], NULL); |
500 | to_free = NULL; |
401 | to_free = NULL; |
501 | while(*paa && ! --((**paa)->count)){ |
402 | while(*paa && ! --((**paa)->count)){ |
502 | if (to_free) |
403 | if (to_free) |
503 | free_layer(to_free); |
404 | free_layer(to_free); |
504 | to_free = **paa; |
405 | to_free = **paa; |
505 | **paa-- = NULL; |
406 | **paa-- = NULL; |
506 | } |
407 | } |
507 | if (!*paa) |
408 | if (!*paa) |
508 | idp->layers = 0; |
409 | idp->layers = 0; |
509 | if (to_free) |
410 | if (to_free) |
510 | free_layer(to_free); |
411 | free_layer(to_free); |
511 | } else |
412 | } else |
512 | idr_remove_warning(id); |
413 | idr_remove_warning(id); |
513 | } |
414 | } |
514 | 415 | ||
515 | /** |
416 | /** |
516 | * idr_remove - remove the given id and free it's slot |
417 | * idr_remove - remove the given id and free it's slot |
517 | * @idp: idr handle |
418 | * @idp: idr handle |
518 | * @id: unique key |
419 | * @id: unique key |
519 | */ |
420 | */ |
520 | void idr_remove(struct idr *idp, int id) |
421 | void idr_remove(struct idr *idp, int id) |
521 | { |
422 | { |
522 | struct idr_layer *p; |
423 | struct idr_layer *p; |
523 | struct idr_layer *to_free; |
424 | struct idr_layer *to_free; |
524 | 425 | ||
525 | /* Mask off upper bits we don't use for the search. */ |
426 | /* Mask off upper bits we don't use for the search. */ |
526 | id &= MAX_ID_MASK; |
427 | id &= MAX_ID_MASK; |
527 | 428 | ||
528 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
429 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
529 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
430 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
530 | idp->top->ary[0]) { |
431 | idp->top->ary[0]) { |
531 | /* |
432 | /* |
532 | * Single child at leftmost slot: we can shrink the tree. |
433 | * Single child at leftmost slot: we can shrink the tree. |
533 | * This level is not needed anymore since when layers are |
434 | * This level is not needed anymore since when layers are |
534 | * inserted, they are inserted at the top of the existing |
435 | * inserted, they are inserted at the top of the existing |
535 | * tree. |
436 | * tree. |
536 | */ |
437 | */ |
537 | to_free = idp->top; |
438 | to_free = idp->top; |
538 | p = idp->top->ary[0]; |
439 | p = idp->top->ary[0]; |
539 | rcu_assign_pointer(idp->top, p); |
440 | rcu_assign_pointer(idp->top, p); |
540 | --idp->layers; |
441 | --idp->layers; |
541 | to_free->bitmap = to_free->count = 0; |
442 | to_free->bitmap = to_free->count = 0; |
542 | free_layer(to_free); |
443 | free_layer(to_free); |
543 | } |
444 | } |
544 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
445 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
545 | p = get_from_free_list(idp); |
446 | p = get_from_free_list(idp); |
546 | /* |
447 | /* |
547 | * Note: we don't call the rcu callback here, since the only |
448 | * Note: we don't call the rcu callback here, since the only |
548 | * layers that fall into the freelist are those that have been |
449 | * layers that fall into the freelist are those that have been |
549 | * preallocated. |
450 | * preallocated. |
550 | */ |
451 | */ |
551 | kfree(p); |
452 | kfree(p); |
552 | } |
453 | } |
553 | return; |
454 | return; |
554 | } |
455 | } |
555 | EXPORT_SYMBOL(idr_remove); |
456 | EXPORT_SYMBOL(idr_remove); |
556 | 457 | ||
557 | 458 | ||
558 | /** |
459 | /** |
559 | * idr_remove_all - remove all ids from the given idr tree |
460 | * idr_remove_all - remove all ids from the given idr tree |
560 | * @idp: idr handle |
461 | * @idp: idr handle |
561 | * |
462 | * |
562 | * idr_destroy() only frees up unused, cached idp_layers, but this |
463 | * idr_destroy() only frees up unused, cached idp_layers, but this |
563 | * function will remove all id mappings and leave all idp_layers |
464 | * function will remove all id mappings and leave all idp_layers |
564 | * unused. |
465 | * unused. |
565 | * |
466 | * |
566 | * A typical clean-up sequence for objects stored in an idr tree, will |
467 | * A typical clean-up sequence for objects stored in an idr tree, will |
567 | * use idr_for_each() to free all objects, if necessay, then |
468 | * use idr_for_each() to free all objects, if necessay, then |
568 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
469 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
569 | * up the cached idr_layers. |
470 | * up the cached idr_layers. |
570 | */ |
471 | */ |
571 | void idr_remove_all(struct idr *idp) |
472 | void idr_remove_all(struct idr *idp) |
572 | { |
473 | { |
573 | int n, id, max; |
474 | int n, id, max; |
574 | struct idr_layer *p; |
475 | struct idr_layer *p; |
575 | struct idr_layer *pa[MAX_LEVEL]; |
476 | struct idr_layer *pa[MAX_LEVEL]; |
576 | struct idr_layer **paa = &pa[0]; |
477 | struct idr_layer **paa = &pa[0]; |
577 | 478 | ||
578 | n = idp->layers * IDR_BITS; |
479 | n = idp->layers * IDR_BITS; |
579 | p = idp->top; |
480 | p = idp->top; |
580 | rcu_assign_pointer(idp->top, NULL); |
481 | rcu_assign_pointer(idp->top, NULL); |
581 | max = 1 << n; |
482 | max = 1 << n; |
582 | 483 | ||
583 | id = 0; |
484 | id = 0; |
584 | while (id < max) { |
485 | while (id < max) { |
585 | while (n > IDR_BITS && p) { |
486 | while (n > IDR_BITS && p) { |
586 | n -= IDR_BITS; |
487 | n -= IDR_BITS; |
587 | *paa++ = p; |
488 | *paa++ = p; |
588 | p = p->ary[(id >> n) & IDR_MASK]; |
489 | p = p->ary[(id >> n) & IDR_MASK]; |
589 | } |
490 | } |
590 | 491 | ||
591 | id += 1 << n; |
492 | id += 1 << n; |
592 | while (n < fls(id)) { |
493 | while (n < fls(id)) { |
593 | if (p) |
494 | if (p) |
594 | free_layer(p); |
495 | free_layer(p); |
595 | n += IDR_BITS; |
496 | n += IDR_BITS; |
596 | p = *--paa; |
497 | p = *--paa; |
597 | } |
498 | } |
598 | } |
499 | } |
599 | idp->layers = 0; |
500 | idp->layers = 0; |
600 | } |
501 | } |
601 | EXPORT_SYMBOL(idr_remove_all); |
502 | EXPORT_SYMBOL(idr_remove_all); |
602 | 503 | ||
603 | /** |
504 | /** |
604 | * idr_destroy - release all cached layers within an idr tree |
505 | * idr_destroy - release all cached layers within an idr tree |
605 | * idp: idr handle |
506 | * idp: idr handle |
606 | */ |
507 | */ |
607 | void idr_destroy(struct idr *idp) |
508 | void idr_destroy(struct idr *idp) |
608 | { |
509 | { |
609 | while (idp->id_free_cnt) { |
510 | while (idp->id_free_cnt) { |
610 | struct idr_layer *p = get_from_free_list(idp); |
511 | struct idr_layer *p = get_from_free_list(idp); |
611 | kfree(p); |
512 | kfree(p); |
612 | } |
513 | } |
613 | } |
514 | } |
614 | EXPORT_SYMBOL(idr_destroy); |
515 | EXPORT_SYMBOL(idr_destroy); |
615 | 516 | ||
616 | 517 | ||
617 | /** |
518 | /** |
618 | * idr_find - return pointer for given id |
519 | * idr_find - return pointer for given id |
619 | * @idp: idr handle |
520 | * @idp: idr handle |
620 | * @id: lookup key |
521 | * @id: lookup key |
621 | * |
522 | * |
622 | * Return the pointer given the id it has been registered with. A %NULL |
523 | * Return the pointer given the id it has been registered with. A %NULL |
623 | * return indicates that @id is not valid or you passed %NULL in |
524 | * return indicates that @id is not valid or you passed %NULL in |
624 | * idr_get_new(). |
525 | * idr_get_new(). |
625 | * |
526 | * |
626 | * This function can be called under rcu_read_lock(), given that the leaf |
527 | * This function can be called under rcu_read_lock(), given that the leaf |
627 | * pointers lifetimes are correctly managed. |
528 | * pointers lifetimes are correctly managed. |
628 | */ |
529 | */ |
629 | void *idr_find(struct idr *idp, int id) |
530 | void *idr_find(struct idr *idp, int id) |
630 | { |
531 | { |
631 | int n; |
532 | int n; |
632 | struct idr_layer *p; |
533 | struct idr_layer *p; |
633 | 534 | ||
634 | p = rcu_dereference(idp->top); |
535 | p = rcu_dereference(idp->top); |
635 | if (!p) |
536 | if (!p) |
636 | return NULL; |
537 | return NULL; |
637 | n = (p->layer+1) * IDR_BITS; |
538 | n = (p->layer+1) * IDR_BITS; |
638 | 539 | ||
639 | /* Mask off upper bits we don't use for the search. */ |
540 | /* Mask off upper bits we don't use for the search. */ |
640 | id &= MAX_ID_MASK; |
541 | id &= MAX_ID_MASK; |
641 | 542 | ||
642 | if (id >= (1 << n)) |
543 | if (id >= (1 << n)) |
643 | return NULL; |
544 | return NULL; |
644 | BUG_ON(n == 0); |
545 | BUG_ON(n == 0); |
645 | 546 | ||
646 | while (n > 0 && p) { |
547 | while (n > 0 && p) { |
647 | n -= IDR_BITS; |
548 | n -= IDR_BITS; |
648 | BUG_ON(n != p->layer*IDR_BITS); |
549 | BUG_ON(n != p->layer*IDR_BITS); |
649 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
550 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
650 | } |
551 | } |
651 | return((void *)p); |
552 | return((void *)p); |
652 | } |
553 | } |
653 | EXPORT_SYMBOL(idr_find); |
554 | EXPORT_SYMBOL(idr_find); |
654 | 555 | ||
655 | #if 0 |
556 | #if 0 |
656 | /** |
557 | /** |
657 | * idr_for_each - iterate through all stored pointers |
558 | * idr_for_each - iterate through all stored pointers |
658 | * @idp: idr handle |
559 | * @idp: idr handle |
659 | * @fn: function to be called for each pointer |
560 | * @fn: function to be called for each pointer |
660 | * @data: data passed back to callback function |
561 | * @data: data passed back to callback function |
661 | * |
562 | * |
662 | * Iterate over the pointers registered with the given idr. The |
563 | * Iterate over the pointers registered with the given idr. The |
663 | * callback function will be called for each pointer currently |
564 | * callback function will be called for each pointer currently |
664 | * registered, passing the id, the pointer and the data pointer passed |
565 | * registered, passing the id, the pointer and the data pointer passed |
665 | * to this function. It is not safe to modify the idr tree while in |
566 | * to this function. It is not safe to modify the idr tree while in |
666 | * the callback, so functions such as idr_get_new and idr_remove are |
567 | * the callback, so functions such as idr_get_new and idr_remove are |
667 | * not allowed. |
568 | * not allowed. |
668 | * |
569 | * |
669 | * We check the return of @fn each time. If it returns anything other |
570 | * We check the return of @fn each time. If it returns anything other |
670 | * than 0, we break out and return that value. |
571 | * than 0, we break out and return that value. |
671 | * |
572 | * |
672 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
573 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
673 | */ |
574 | */ |
674 | int idr_for_each(struct idr *idp, |
575 | int idr_for_each(struct idr *idp, |
675 | int (*fn)(int id, void *p, void *data), void *data) |
576 | int (*fn)(int id, void *p, void *data), void *data) |
676 | { |
577 | { |
677 | int n, id, max, error = 0; |
578 | int n, id, max, error = 0; |
678 | struct idr_layer *p; |
579 | struct idr_layer *p; |
679 | struct idr_layer *pa[MAX_LEVEL]; |
580 | struct idr_layer *pa[MAX_LEVEL]; |
680 | struct idr_layer **paa = &pa[0]; |
581 | struct idr_layer **paa = &pa[0]; |
681 | 582 | ||
682 | n = idp->layers * IDR_BITS; |
583 | n = idp->layers * IDR_BITS; |
683 | p = rcu_dereference(idp->top); |
584 | p = rcu_dereference(idp->top); |
684 | max = 1 << n; |
585 | max = 1 << n; |
685 | 586 | ||
686 | id = 0; |
587 | id = 0; |
687 | while (id < max) { |
588 | while (id < max) { |
688 | while (n > 0 && p) { |
589 | while (n > 0 && p) { |
689 | n -= IDR_BITS; |
590 | n -= IDR_BITS; |
690 | *paa++ = p; |
591 | *paa++ = p; |
691 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
592 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
692 | } |
593 | } |
693 | 594 | ||
694 | if (p) { |
595 | if (p) { |
695 | error = fn(id, (void *)p, data); |
596 | error = fn(id, (void *)p, data); |
696 | if (error) |
597 | if (error) |
697 | break; |
598 | break; |
698 | } |
599 | } |
699 | 600 | ||
700 | id += 1 << n; |
601 | id += 1 << n; |
701 | while (n < fls(id)) { |
602 | while (n < fls(id)) { |
702 | n += IDR_BITS; |
603 | n += IDR_BITS; |
703 | p = *--paa; |
604 | p = *--paa; |
704 | } |
605 | } |
705 | } |
606 | } |
706 | 607 | ||
707 | return error; |
608 | return error; |
708 | } |
609 | } |
709 | EXPORT_SYMBOL(idr_for_each); |
610 | EXPORT_SYMBOL(idr_for_each); |
710 | 611 | ||
711 | /** |
612 | /** |
712 | * idr_get_next - lookup next object of id to given id. |
613 | * idr_get_next - lookup next object of id to given id. |
713 | * @idp: idr handle |
614 | * @idp: idr handle |
714 | * @id: pointer to lookup key |
615 | * @id: pointer to lookup key |
715 | * |
616 | * |
716 | * Returns pointer to registered object with id, which is next number to |
617 | * Returns pointer to registered object with id, which is next number to |
717 | * given id. |
618 | * given id. |
718 | */ |
619 | */ |
719 | 620 | ||
720 | void *idr_get_next(struct idr *idp, int *nextidp) |
621 | void *idr_get_next(struct idr *idp, int *nextidp) |
721 | { |
622 | { |
722 | struct idr_layer *p, *pa[MAX_LEVEL]; |
623 | struct idr_layer *p, *pa[MAX_LEVEL]; |
723 | struct idr_layer **paa = &pa[0]; |
624 | struct idr_layer **paa = &pa[0]; |
724 | int id = *nextidp; |
625 | int id = *nextidp; |
725 | int n, max; |
626 | int n, max; |
726 | 627 | ||
727 | /* find first ent */ |
628 | /* find first ent */ |
728 | n = idp->layers * IDR_BITS; |
629 | n = idp->layers * IDR_BITS; |
729 | max = 1 << n; |
630 | max = 1 << n; |
730 | p = rcu_dereference(idp->top); |
631 | p = rcu_dereference(idp->top); |
731 | if (!p) |
632 | if (!p) |
732 | return NULL; |
633 | return NULL; |
733 | 634 | ||
734 | while (id < max) { |
635 | while (id < max) { |
735 | while (n > 0 && p) { |
636 | while (n > 0 && p) { |
736 | n -= IDR_BITS; |
637 | n -= IDR_BITS; |
737 | *paa++ = p; |
638 | *paa++ = p; |
738 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
639 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
739 | } |
640 | } |
740 | 641 | ||
741 | if (p) { |
642 | if (p) { |
742 | *nextidp = id; |
643 | *nextidp = id; |
743 | return p; |
644 | return p; |
744 | } |
645 | } |
745 | 646 | ||
746 | id += 1 << n; |
647 | id += 1 << n; |
747 | while (n < fls(id)) { |
648 | while (n < fls(id)) { |
748 | n += IDR_BITS; |
649 | n += IDR_BITS; |
749 | p = *--paa; |
650 | p = *--paa; |
750 | } |
651 | } |
751 | } |
652 | } |
752 | return NULL; |
653 | return NULL; |
753 | } |
654 | } |
754 | 655 | ||
755 | 656 | ||
756 | 657 | ||
757 | /** |
658 | /** |
758 | * idr_replace - replace pointer for given id |
659 | * idr_replace - replace pointer for given id |
759 | * @idp: idr handle |
660 | * @idp: idr handle |
760 | * @ptr: pointer you want associated with the id |
661 | * @ptr: pointer you want associated with the id |
761 | * @id: lookup key |
662 | * @id: lookup key |
762 | * |
663 | * |
763 | * Replace the pointer registered with an id and return the old value. |
664 | * Replace the pointer registered with an id and return the old value. |
764 | * A -ENOENT return indicates that @id was not found. |
665 | * A -ENOENT return indicates that @id was not found. |
765 | * A -EINVAL return indicates that @id was not within valid constraints. |
666 | * A -EINVAL return indicates that @id was not within valid constraints. |
766 | * |
667 | * |
767 | * The caller must serialize with writers. |
668 | * The caller must serialize with writers. |
768 | */ |
669 | */ |
769 | void *idr_replace(struct idr *idp, void *ptr, int id) |
670 | void *idr_replace(struct idr *idp, void *ptr, int id) |
770 | { |
671 | { |
771 | int n; |
672 | int n; |
772 | struct idr_layer *p, *old_p; |
673 | struct idr_layer *p, *old_p; |
773 | 674 | ||
774 | p = idp->top; |
675 | p = idp->top; |
775 | if (!p) |
676 | if (!p) |
776 | return ERR_PTR(-EINVAL); |
677 | return ERR_PTR(-EINVAL); |
777 | 678 | ||
778 | n = (p->layer+1) * IDR_BITS; |
679 | n = (p->layer+1) * IDR_BITS; |
779 | 680 | ||
780 | id &= MAX_ID_MASK; |
681 | id &= MAX_ID_MASK; |
781 | 682 | ||
782 | if (id >= (1 << n)) |
683 | if (id >= (1 << n)) |
783 | return ERR_PTR(-EINVAL); |
684 | return ERR_PTR(-EINVAL); |
784 | 685 | ||
785 | n -= IDR_BITS; |
686 | n -= IDR_BITS; |
786 | while ((n > 0) && p) { |
687 | while ((n > 0) && p) { |
787 | p = p->ary[(id >> n) & IDR_MASK]; |
688 | p = p->ary[(id >> n) & IDR_MASK]; |
788 | n -= IDR_BITS; |
689 | n -= IDR_BITS; |
789 | } |
690 | } |
790 | 691 | ||
791 | n = id & IDR_MASK; |
692 | n = id & IDR_MASK; |
792 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
693 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
793 | return ERR_PTR(-ENOENT); |
694 | return ERR_PTR(-ENOENT); |
794 | 695 | ||
795 | old_p = p->ary[n]; |
696 | old_p = p->ary[n]; |
796 | rcu_assign_pointer(p->ary[n], ptr); |
697 | rcu_assign_pointer(p->ary[n], ptr); |
797 | 698 | ||
798 | return old_p; |
699 | return old_p; |
799 | } |
700 | } |
800 | EXPORT_SYMBOL(idr_replace); |
701 | EXPORT_SYMBOL(idr_replace); |
801 | 702 | ||
802 | 703 | ||
803 | #endif |
704 | #endif |
804 | 705 | ||
805 | 706 | ||
806 | void idr_init_cache(void) |
707 | void idr_init_cache(void) |
807 | { |
708 | { |
808 | //idr_layer_cache = kmem_cache_create("idr_layer_cache", |
709 | //idr_layer_cache = kmem_cache_create("idr_layer_cache", |
809 | // sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
710 | // sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
810 | } |
711 | } |
811 | 712 | ||
812 | /** |
713 | /** |
813 | * idr_init - initialize idr handle |
714 | * idr_init - initialize idr handle |
814 | * @idp: idr handle |
715 | * @idp: idr handle |
815 | * |
716 | * |
816 | * This function is use to set up the handle (@idp) that you will pass |
717 | * This function is use to set up the handle (@idp) that you will pass |
817 | * to the rest of the functions. |
718 | * to the rest of the functions. |
818 | */ |
719 | */ |
819 | void idr_init(struct idr *idp) |
720 | void idr_init(struct idr *idp) |
820 | { |
721 | { |
821 | memset(idp, 0, sizeof(struct idr)); |
722 | memset(idp, 0, sizeof(struct idr)); |
822 | // spin_lock_init(&idp->lock); |
723 | // spin_lock_init(&idp->lock); |
823 | } |
724 | } |
824 | EXPORT_SYMBOL(idr_init); |
725 | EXPORT_SYMBOL(idr_init); |
825 | 726 | ||
826 | #if 0 |
727 | #if 0 |
827 | 728 | ||
828 | /* |
729 | /* |
829 | * IDA - IDR based ID allocator |
730 | * IDA - IDR based ID allocator |
830 | * |
731 | * |
831 | * this is id allocator without id -> pointer translation. Memory |
732 | * this is id allocator without id -> pointer translation. Memory |
832 | * usage is much lower than full blown idr because each id only |
733 | * usage is much lower than full blown idr because each id only |
833 | * occupies a bit. ida uses a custom leaf node which contains |
734 | * occupies a bit. ida uses a custom leaf node which contains |
834 | * IDA_BITMAP_BITS slots. |
735 | * IDA_BITMAP_BITS slots. |
835 | * |
736 | * |
836 | * 2007-04-25 written by Tejun Heo |
737 | * 2007-04-25 written by Tejun Heo |
837 | */ |
738 | */ |
838 | 739 | ||
839 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
740 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
840 | { |
741 | { |
841 | unsigned long flags; |
742 | unsigned long flags; |
842 | 743 | ||
843 | if (!ida->free_bitmap) { |
744 | if (!ida->free_bitmap) { |
844 | spin_lock_irqsave(&ida->idr.lock, flags); |
745 | spin_lock_irqsave(&ida->idr.lock, flags); |
845 | if (!ida->free_bitmap) { |
746 | if (!ida->free_bitmap) { |
846 | ida->free_bitmap = bitmap; |
747 | ida->free_bitmap = bitmap; |
847 | bitmap = NULL; |
748 | bitmap = NULL; |
848 | } |
749 | } |
849 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
750 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
850 | } |
751 | } |
851 | 752 | ||
852 | kfree(bitmap); |
753 | kfree(bitmap); |
853 | } |
754 | } |
854 | 755 | ||
855 | /** |
756 | /** |
856 | * ida_pre_get - reserve resources for ida allocation |
757 | * ida_pre_get - reserve resources for ida allocation |
857 | * @ida: ida handle |
758 | * @ida: ida handle |
858 | * @gfp_mask: memory allocation flag |
759 | * @gfp_mask: memory allocation flag |
859 | * |
760 | * |
860 | * This function should be called prior to locking and calling the |
761 | * This function should be called prior to locking and calling the |
861 | * following function. It preallocates enough memory to satisfy the |
762 | * following function. It preallocates enough memory to satisfy the |
862 | * worst possible allocation. |
763 | * worst possible allocation. |
863 | * |
764 | * |
864 | * If the system is REALLY out of memory this function returns 0, |
765 | * If the system is REALLY out of memory this function returns 0, |
865 | * otherwise 1. |
766 | * otherwise 1. |
866 | */ |
767 | */ |
867 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
768 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
868 | { |
769 | { |
869 | /* allocate idr_layers */ |
770 | /* allocate idr_layers */ |
870 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
771 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
871 | return 0; |
772 | return 0; |
872 | 773 | ||
873 | /* allocate free_bitmap */ |
774 | /* allocate free_bitmap */ |
874 | if (!ida->free_bitmap) { |
775 | if (!ida->free_bitmap) { |
875 | struct ida_bitmap *bitmap; |
776 | struct ida_bitmap *bitmap; |
876 | 777 | ||
877 | bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask); |
778 | bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask); |
878 | if (!bitmap) |
779 | if (!bitmap) |
879 | return 0; |
780 | return 0; |
880 | 781 | ||
881 | free_bitmap(ida, bitmap); |
782 | free_bitmap(ida, bitmap); |
882 | } |
783 | } |
883 | 784 | ||
884 | return 1; |
785 | return 1; |
885 | } |
786 | } |
886 | EXPORT_SYMBOL(ida_pre_get); |
787 | EXPORT_SYMBOL(ida_pre_get); |
887 | 788 | ||
888 | /** |
789 | /** |
889 | * ida_get_new_above - allocate new ID above or equal to a start id |
790 | * ida_get_new_above - allocate new ID above or equal to a start id |
890 | * @ida: ida handle |
791 | * @ida: ida handle |
891 | * @staring_id: id to start search at |
792 | * @staring_id: id to start search at |
892 | * @p_id: pointer to the allocated handle |
793 | * @p_id: pointer to the allocated handle |
893 | * |
794 | * |
894 | * Allocate new ID above or equal to @ida. It should be called with |
795 | * Allocate new ID above or equal to @ida. It should be called with |
895 | * any required locks. |
796 | * any required locks. |
896 | * |
797 | * |
897 | * If memory is required, it will return -EAGAIN, you should unlock |
798 | * If memory is required, it will return -EAGAIN, you should unlock |
898 | * and go back to the ida_pre_get() call. If the ida is full, it will |
799 | * and go back to the ida_pre_get() call. If the ida is full, it will |
899 | * return -ENOSPC. |
800 | * return -ENOSPC. |
900 | * |
801 | * |
901 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. |
802 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. |
902 | */ |
803 | */ |
903 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
804 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
904 | { |
805 | { |
905 | struct idr_layer *pa[MAX_LEVEL]; |
806 | struct idr_layer *pa[MAX_LEVEL]; |
906 | struct ida_bitmap *bitmap; |
807 | struct ida_bitmap *bitmap; |
907 | unsigned long flags; |
808 | unsigned long flags; |
908 | int idr_id = starting_id / IDA_BITMAP_BITS; |
809 | int idr_id = starting_id / IDA_BITMAP_BITS; |
909 | int offset = starting_id % IDA_BITMAP_BITS; |
810 | int offset = starting_id % IDA_BITMAP_BITS; |
910 | int t, id; |
811 | int t, id; |
911 | 812 | ||
912 | restart: |
813 | restart: |
913 | /* get vacant slot */ |
814 | /* get vacant slot */ |
914 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
815 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
915 | if (t < 0) |
816 | if (t < 0) |
916 | return _idr_rc_to_errno(t); |
817 | return _idr_rc_to_errno(t); |
917 | 818 | ||
918 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
819 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
919 | return -ENOSPC; |
820 | return -ENOSPC; |
920 | 821 | ||
921 | if (t != idr_id) |
822 | if (t != idr_id) |
922 | offset = 0; |
823 | offset = 0; |
923 | idr_id = t; |
824 | idr_id = t; |
924 | 825 | ||
925 | /* if bitmap isn't there, create a new one */ |
826 | /* if bitmap isn't there, create a new one */ |
926 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
827 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
927 | if (!bitmap) { |
828 | if (!bitmap) { |
928 | spin_lock_irqsave(&ida->idr.lock, flags); |
829 | spin_lock_irqsave(&ida->idr.lock, flags); |
929 | bitmap = ida->free_bitmap; |
830 | bitmap = ida->free_bitmap; |
930 | ida->free_bitmap = NULL; |
831 | ida->free_bitmap = NULL; |
931 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
832 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
932 | 833 | ||
933 | if (!bitmap) |
834 | if (!bitmap) |
934 | return -EAGAIN; |
835 | return -EAGAIN; |
935 | 836 | ||
936 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
837 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
937 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
838 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
938 | (void *)bitmap); |
839 | (void *)bitmap); |
939 | pa[0]->count++; |
840 | pa[0]->count++; |
940 | } |
841 | } |
941 | 842 | ||
942 | /* lookup for empty slot */ |
843 | /* lookup for empty slot */ |
943 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
844 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
944 | if (t == IDA_BITMAP_BITS) { |
845 | if (t == IDA_BITMAP_BITS) { |
945 | /* no empty slot after offset, continue to the next chunk */ |
846 | /* no empty slot after offset, continue to the next chunk */ |
946 | idr_id++; |
847 | idr_id++; |
947 | offset = 0; |
848 | offset = 0; |
948 | goto restart; |
849 | goto restart; |
949 | } |
850 | } |
950 | 851 | ||
951 | id = idr_id * IDA_BITMAP_BITS + t; |
852 | id = idr_id * IDA_BITMAP_BITS + t; |
952 | if (id >= MAX_ID_BIT) |
853 | if (id >= MAX_ID_BIT) |
953 | return -ENOSPC; |
854 | return -ENOSPC; |
954 | 855 | ||
955 | __set_bit(t, bitmap->bitmap); |
856 | __set_bit(t, bitmap->bitmap); |
956 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
857 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
957 | idr_mark_full(pa, idr_id); |
858 | idr_mark_full(pa, idr_id); |
958 | 859 | ||
959 | *p_id = id; |
860 | *p_id = id; |
960 | 861 | ||
961 | /* Each leaf node can handle nearly a thousand slots and the |
862 | /* Each leaf node can handle nearly a thousand slots and the |
962 | * whole idea of ida is to have small memory foot print. |
863 | * whole idea of ida is to have small memory foot print. |
963 | * Throw away extra resources one by one after each successful |
864 | * Throw away extra resources one by one after each successful |
964 | * allocation. |
865 | * allocation. |
965 | */ |
866 | */ |
966 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
867 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
967 | struct idr_layer *p = get_from_free_list(&ida->idr); |
868 | struct idr_layer *p = get_from_free_list(&ida->idr); |
968 | if (p) |
869 | if (p) |
969 | kmem_cache_free(idr_layer_cache, p); |
870 | kmem_cache_free(idr_layer_cache, p); |
970 | } |
871 | } |
971 | 872 | ||
972 | return 0; |
873 | return 0; |
973 | } |
874 | } |
974 | EXPORT_SYMBOL(ida_get_new_above); |
875 | EXPORT_SYMBOL(ida_get_new_above); |
975 | 876 | ||
976 | /** |
877 | /** |
977 | * ida_get_new - allocate new ID |
878 | * ida_get_new - allocate new ID |
978 | * @ida: idr handle |
879 | * @ida: idr handle |
979 | * @p_id: pointer to the allocated handle |
880 | * @p_id: pointer to the allocated handle |
980 | * |
881 | * |
981 | * Allocate new ID. It should be called with any required locks. |
882 | * Allocate new ID. It should be called with any required locks. |
982 | * |
883 | * |
983 | * If memory is required, it will return -EAGAIN, you should unlock |
884 | * If memory is required, it will return -EAGAIN, you should unlock |
984 | * and go back to the idr_pre_get() call. If the idr is full, it will |
885 | * and go back to the idr_pre_get() call. If the idr is full, it will |
985 | * return -ENOSPC. |
886 | * return -ENOSPC. |
986 | * |
887 | * |
987 | * @id returns a value in the range 0 ... 0x7fffffff. |
888 | * @id returns a value in the range 0 ... 0x7fffffff. |
988 | */ |
889 | */ |
989 | int ida_get_new(struct ida *ida, int *p_id) |
890 | int ida_get_new(struct ida *ida, int *p_id) |
990 | { |
891 | { |
991 | return ida_get_new_above(ida, 0, p_id); |
892 | return ida_get_new_above(ida, 0, p_id); |
992 | } |
893 | } |
993 | EXPORT_SYMBOL(ida_get_new); |
894 | EXPORT_SYMBOL(ida_get_new); |
994 | 895 | ||
995 | /** |
896 | /** |
996 | * ida_remove - remove the given ID |
897 | * ida_remove - remove the given ID |
997 | * @ida: ida handle |
898 | * @ida: ida handle |
998 | * @id: ID to free |
899 | * @id: ID to free |
999 | */ |
900 | */ |
1000 | void ida_remove(struct ida *ida, int id) |
901 | void ida_remove(struct ida *ida, int id) |
1001 | { |
902 | { |
1002 | struct idr_layer *p = ida->idr.top; |
903 | struct idr_layer *p = ida->idr.top; |
1003 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
904 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
1004 | int idr_id = id / IDA_BITMAP_BITS; |
905 | int idr_id = id / IDA_BITMAP_BITS; |
1005 | int offset = id % IDA_BITMAP_BITS; |
906 | int offset = id % IDA_BITMAP_BITS; |
1006 | int n; |
907 | int n; |
1007 | struct ida_bitmap *bitmap; |
908 | struct ida_bitmap *bitmap; |
1008 | 909 | ||
1009 | /* clear full bits while looking up the leaf idr_layer */ |
910 | /* clear full bits while looking up the leaf idr_layer */ |
1010 | while ((shift > 0) && p) { |
911 | while ((shift > 0) && p) { |
1011 | n = (idr_id >> shift) & IDR_MASK; |
912 | n = (idr_id >> shift) & IDR_MASK; |
1012 | __clear_bit(n, &p->bitmap); |
913 | __clear_bit(n, &p->bitmap); |
1013 | p = p->ary[n]; |
914 | p = p->ary[n]; |
1014 | shift -= IDR_BITS; |
915 | shift -= IDR_BITS; |
1015 | } |
916 | } |
1016 | 917 | ||
1017 | if (p == NULL) |
918 | if (p == NULL) |
1018 | goto err; |
919 | goto err; |
1019 | 920 | ||
1020 | n = idr_id & IDR_MASK; |
921 | n = idr_id & IDR_MASK; |
1021 | __clear_bit(n, &p->bitmap); |
922 | __clear_bit(n, &p->bitmap); |
1022 | 923 | ||
1023 | bitmap = (void *)p->ary[n]; |
924 | bitmap = (void *)p->ary[n]; |
1024 | if (!test_bit(offset, bitmap->bitmap)) |
925 | if (!test_bit(offset, bitmap->bitmap)) |
1025 | goto err; |
926 | goto err; |
1026 | 927 | ||
1027 | /* update bitmap and remove it if empty */ |
928 | /* update bitmap and remove it if empty */ |
1028 | __clear_bit(offset, bitmap->bitmap); |
929 | __clear_bit(offset, bitmap->bitmap); |
1029 | if (--bitmap->nr_busy == 0) { |
930 | if (--bitmap->nr_busy == 0) { |
1030 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ |
931 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ |
1031 | idr_remove(&ida->idr, idr_id); |
932 | idr_remove(&ida->idr, idr_id); |
1032 | free_bitmap(ida, bitmap); |
933 | free_bitmap(ida, bitmap); |
1033 | } |
934 | } |
1034 | 935 | ||
1035 | return; |
936 | return; |
1036 | 937 | ||
1037 | err: |
938 | err: |
1038 | printk(KERN_WARNING |
939 | printk(KERN_WARNING |
1039 | "ida_remove called for id=%d which is not allocated.\n", id); |
940 | "ida_remove called for id=%d which is not allocated.\n", id); |
1040 | } |
941 | } |
1041 | EXPORT_SYMBOL(ida_remove); |
942 | EXPORT_SYMBOL(ida_remove); |
1042 | 943 | ||
1043 | /** |
944 | /** |
1044 | * ida_destroy - release all cached layers within an ida tree |
945 | * ida_destroy - release all cached layers within an ida tree |
1045 | * ida: ida handle |
946 | * ida: ida handle |
1046 | */ |
947 | */ |
1047 | void ida_destroy(struct ida *ida) |
948 | void ida_destroy(struct ida *ida) |
1048 | { |
949 | { |
1049 | idr_destroy(&ida->idr); |
950 | idr_destroy(&ida->idr); |
1050 | kfree(ida->free_bitmap); |
951 | kfree(ida->free_bitmap); |
1051 | } |
952 | } |
1052 | EXPORT_SYMBOL(ida_destroy); |
953 | EXPORT_SYMBOL(ida_destroy); |
1053 | 954 | ||
1054 | /** |
955 | /** |
1055 | * ida_init - initialize ida handle |
956 | * ida_init - initialize ida handle |
1056 | * @ida: ida handle |
957 | * @ida: ida handle |
1057 | * |
958 | * |
1058 | * This function is use to set up the handle (@ida) that you will pass |
959 | * This function is use to set up the handle (@ida) that you will pass |
1059 | * to the rest of the functions. |
960 | * to the rest of the functions. |
1060 | */ |
961 | */ |
1061 | void ida_init(struct ida *ida) |
962 | void ida_init(struct ida *ida) |
1062 | { |
963 | { |
1063 | memset(ida, 0, sizeof(struct ida)); |
964 | memset(ida, 0, sizeof(struct ida)); |
1064 | idr_init(&ida->idr); |
965 | idr_init(&ida->idr); |
1065 | 966 | ||
1066 | } |
967 | } |
1067 | EXPORT_SYMBOL(ida_init); |
968 | EXPORT_SYMBOL(ida_init); |
1068 | 969 | ||
1069 | 970 | ||
1070 | #endif>><>>><>>><>>><>>><>><>>><>>><>>>><>>>><>><>>>3); |
971 | #endif>><>>><>>><>>><>>><>><>>><>>><>>>><>>>><>><>> |
1071 | ><3); |
- | |
1072 | >>><> |
- |