Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1125 | serge | 1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
||
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
||
4 | * Distributed under the GNU GPL license version 2. |
||
5 | * |
||
6 | * Modified by George Anzinger to reuse immediately and to use |
||
7 | * find bit instructions. Also removed _irq on spinlocks. |
||
8 | * |
||
9 | * Modified by Nadia Derbey to make it RCU safe. |
||
10 | * |
||
11 | * Small id to pointer translation service. |
||
12 | * |
||
13 | * It uses a radix tree like structure as a sparse array indexed |
||
14 | * by the id to obtain the pointer. The bitmap makes allocating |
||
15 | * a new id quick. |
||
16 | * |
||
17 | * You call it to allocate an id (an int) an associate with that id a |
||
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
||
19 | * id to a user for him to pass back at a later time. You then pass |
||
20 | * that id to this code and it returns your pointer. |
||
21 | |||
22 | * You can release ids at any time. When all ids are released, most of |
||
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
||
24 | * don't need to go to the memory "store" during an id allocate, just |
||
25 | * so you don't need to be too concerned about locking and conflicts |
||
26 | * with the slab allocator. |
||
27 | */ |
||
28 | |||
29 | #include |
||
30 | |||
31 | #define ADDR "=m" (*(volatile long *) addr) |
||
32 | |||
33 | static inline void __set_bit(int nr, volatile void *addr) |
||
34 | { |
||
35 | asm volatile("bts %1,%0" |
||
36 | : ADDR |
||
37 | : "Ir" (nr) : "memory"); |
||
38 | } |
||
39 | |||
40 | static inline void __clear_bit(int nr, volatile void *addr) |
||
41 | { |
||
42 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
||
43 | } |
||
44 | |||
45 | static inline int constant_test_bit(int nr, const volatile void *addr) |
||
46 | { |
||
47 | return ((1UL << (nr % 32)) & |
||
48 | (((unsigned long *)addr)[nr / 32])) != 0; |
||
49 | } |
||
50 | |||
51 | static inline int variable_test_bit(int nr, volatile const void *addr) |
||
52 | { |
||
53 | int oldbit; |
||
54 | |||
55 | asm volatile("bt %2,%1\n\t" |
||
56 | "sbb %0,%0" |
||
57 | : "=r" (oldbit) |
||
58 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
||
59 | |||
60 | return oldbit; |
||
61 | }; |
||
62 | |||
63 | |||
64 | #define test_bit(nr,addr) \ |
||
65 | (__builtin_constant_p(nr) ? \ |
||
66 | constant_test_bit((nr),(addr)) : \ |
||
67 | variable_test_bit((nr),(addr))) |
||
68 | |||
69 | |||
70 | static inline int fls(int x) |
||
71 | { |
||
72 | int r; |
||
73 | |||
74 | __asm__("bsrl %1,%0\n\t" |
||
75 | "jnz 1f\n\t" |
||
76 | "movl $-1,%0\n" |
||
77 | "1:" : "=r" (r) : "rm" (x)); |
||
78 | return r+1; |
||
79 | } |
||
80 | |||
81 | static inline unsigned long __ffs(unsigned long word) |
||
82 | { |
||
83 | __asm__("bsfl %1,%0" |
||
84 | :"=r" (word) |
||
85 | :"rm" (word)); |
||
86 | return word; |
||
87 | } |
||
88 | |||
89 | |||
90 | static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) |
||
91 | { |
||
92 | unsigned x = 0; |
||
93 | |||
94 | while (x < size) { |
||
95 | unsigned long val = *addr++; |
||
96 | if (val) |
||
97 | return __ffs(val) + x; |
||
98 | x += (sizeof(*addr)<<3); |
||
99 | } |
||
100 | return x; |
||
101 | } |
||
102 | |||
103 | |||
104 | int find_next_bit(const unsigned long *addr, int size, int offset) |
||
105 | { |
||
106 | const unsigned long *p = addr + (offset >> 5); |
||
107 | int set = 0, bit = offset & 31, res; |
||
108 | |||
109 | if (bit) |
||
110 | { |
||
111 | /* |
||
112 | * Look for nonzero in the first 32 bits: |
||
113 | */ |
||
114 | __asm__("bsfl %1,%0\n\t" |
||
115 | "jne 1f\n\t" |
||
116 | "movl $32, %0\n" |
||
117 | "1:" |
||
118 | : "=r" (set) |
||
119 | : "r" (*p >> bit)); |
||
120 | if (set < (32 - bit)) |
||
121 | return set + offset; |
||
122 | set = 32 - bit; |
||
123 | p++; |
||
124 | } |
||
125 | /* |
||
126 | * No set bit yet, search remaining full words for a bit |
||
127 | */ |
||
128 | res = find_first_bit (p, size - 32 * (p - addr)); |
||
129 | return (offset + set + res); |
||
130 | } |
||
131 | |||
132 | |||
133 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
||
134 | |||
135 | #define rcu_dereference(p) ({ \ |
||
136 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
||
137 | (_________p1); \ |
||
138 | }) |
||
139 | |||
140 | #define rcu_assign_pointer(p, v) \ |
||
141 | ({ \ |
||
142 | if (!__builtin_constant_p(v) || \ |
||
143 | ((v) != NULL)) \ |
||
144 | (p) = (v); \ |
||
145 | }) |
||
146 | |||
147 | //static struct kmem_cache *idr_layer_cache; |
||
148 | |||
149 | |||
150 | |||
151 | |||
152 | |||
153 | static struct idr_layer *get_from_free_list(struct idr *idp) |
||
154 | { |
||
155 | struct idr_layer *p; |
||
156 | unsigned long flags; |
||
157 | |||
158 | // spin_lock_irqsave(&idp->lock, flags); |
||
159 | if ((p = idp->id_free)) { |
||
160 | idp->id_free = p->ary[0]; |
||
161 | idp->id_free_cnt--; |
||
162 | p->ary[0] = NULL; |
||
163 | } |
||
164 | // spin_unlock_irqrestore(&idp->lock, flags); |
||
165 | return(p); |
||
166 | } |
||
167 | |||
168 | |||
169 | static void idr_layer_rcu_free(struct rcu_head *head) |
||
170 | { |
||
171 | struct idr_layer *layer; |
||
172 | |||
173 | layer = container_of(head, struct idr_layer, rcu_head); |
||
174 | kfree(layer); |
||
175 | } |
||
176 | |||
177 | |||
178 | |||
179 | static inline void free_layer(struct idr_layer *p) |
||
180 | { |
||
181 | kfree(p); |
||
182 | } |
||
183 | |||
184 | |||
185 | /* only called when idp->lock is held */ |
||
186 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
||
187 | { |
||
188 | p->ary[0] = idp->id_free; |
||
189 | idp->id_free = p; |
||
190 | idp->id_free_cnt++; |
||
191 | } |
||
192 | |||
193 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
||
194 | { |
||
195 | unsigned long flags; |
||
196 | |||
197 | /* |
||
198 | * Depends on the return element being zeroed. |
||
199 | */ |
||
200 | // spin_lock_irqsave(&idp->lock, flags); |
||
201 | __move_to_free_list(idp, p); |
||
202 | // spin_unlock_irqrestore(&idp->lock, flags); |
||
203 | } |
||
204 | |||
205 | static void idr_mark_full(struct idr_layer **pa, int id) |
||
206 | { |
||
207 | struct idr_layer *p = pa[0]; |
||
208 | int l = 0; |
||
209 | |||
210 | __set_bit(id & IDR_MASK, &p->bitmap); |
||
211 | /* |
||
212 | * If this layer is full mark the bit in the layer above to |
||
213 | * show that this part of the radix tree is full. This may |
||
214 | * complete the layer above and require walking up the radix |
||
215 | * tree. |
||
216 | */ |
||
217 | while (p->bitmap == IDR_FULL) { |
||
218 | if (!(p = pa[++l])) |
||
219 | break; |
||
220 | id = id >> IDR_BITS; |
||
221 | __set_bit((id & IDR_MASK), &p->bitmap); |
||
222 | } |
||
223 | } |
||
224 | |||
225 | |||
226 | |||
227 | /** |
||
228 | * idr_pre_get - reserver resources for idr allocation |
||
229 | * @idp: idr handle |
||
230 | * @gfp_mask: memory allocation flags |
||
231 | * |
||
232 | * This function should be called prior to locking and calling the |
||
233 | * idr_get_new* functions. It preallocates enough memory to satisfy |
||
234 | * the worst possible allocation. |
||
235 | * |
||
236 | * If the system is REALLY out of memory this function returns 0, |
||
237 | * otherwise 1. |
||
238 | */ |
||
239 | int idr_pre_get(struct idr *idp, u32_t gfp_mask) |
||
240 | { |
||
241 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
||
242 | struct idr_layer *new; |
||
243 | new = kzalloc(sizeof(new), gfp_mask); |
||
244 | if (new == NULL) |
||
245 | return (0); |
||
246 | move_to_free_list(idp, new); |
||
247 | } |
||
248 | return 1; |
||
249 | } |
||
250 | EXPORT_SYMBOL(idr_pre_get); |
||
251 | |||
252 | |||
253 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
||
254 | { |
||
255 | int n, m, sh; |
||
256 | struct idr_layer *p, *new; |
||
257 | int l, id, oid; |
||
258 | unsigned long bm; |
||
259 | |||
260 | id = *starting_id; |
||
261 | restart: |
||
262 | p = idp->top; |
||
263 | l = idp->layers; |
||
264 | pa[l--] = NULL; |
||
265 | while (1) { |
||
266 | /* |
||
267 | * We run around this while until we reach the leaf node... |
||
268 | */ |
||
269 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
||
270 | bm = ~p->bitmap; |
||
271 | m = find_next_bit(&bm, IDR_SIZE, n); |
||
272 | if (m == IDR_SIZE) { |
||
273 | /* no space available go back to previous layer. */ |
||
274 | l++; |
||
275 | oid = id; |
||
276 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
||
277 | |||
278 | /* if already at the top layer, we need to grow */ |
||
279 | if (!(p = pa[l])) { |
||
280 | *starting_id = id; |
||
281 | return IDR_NEED_TO_GROW; |
||
282 | } |
||
283 | |||
284 | /* If we need to go up one layer, continue the |
||
285 | * loop; otherwise, restart from the top. |
||
286 | */ |
||
287 | sh = IDR_BITS * (l + 1); |
||
288 | if (oid >> sh == id >> sh) |
||
289 | continue; |
||
290 | else |
||
291 | goto restart; |
||
292 | } |
||
293 | if (m != n) { |
||
294 | sh = IDR_BITS*l; |
||
295 | id = ((id >> sh) ^ n ^ m) << sh; |
||
296 | } |
||
297 | if ((id >= MAX_ID_BIT) || (id < 0)) |
||
298 | return IDR_NOMORE_SPACE; |
||
299 | if (l == 0) |
||
300 | break; |
||
301 | /* |
||
302 | * Create the layer below if it is missing. |
||
303 | */ |
||
304 | if (!p->ary[m]) { |
||
305 | new = get_from_free_list(idp); |
||
306 | if (!new) |
||
307 | return -1; |
||
308 | new->layer = l-1; |
||
309 | rcu_assign_pointer(p->ary[m], new); |
||
310 | p->count++; |
||
311 | } |
||
312 | pa[l--] = p; |
||
313 | p = p->ary[m]; |
||
314 | } |
||
315 | |||
316 | pa[l] = p; |
||
317 | return id; |
||
318 | } |
||
319 | |||
320 | |||
321 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
||
322 | struct idr_layer **pa) |
||
323 | { |
||
324 | struct idr_layer *p, *new; |
||
325 | int layers, v, id; |
||
326 | unsigned long flags; |
||
327 | |||
328 | id = starting_id; |
||
329 | build_up: |
||
330 | p = idp->top; |
||
331 | layers = idp->layers; |
||
332 | if (unlikely(!p)) { |
||
333 | if (!(p = get_from_free_list(idp))) |
||
334 | return -1; |
||
335 | p->layer = 0; |
||
336 | layers = 1; |
||
337 | } |
||
338 | /* |
||
339 | * Add a new layer to the top of the tree if the requested |
||
340 | * id is larger than the currently allocated space. |
||
341 | */ |
||
342 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
||
343 | layers++; |
||
344 | if (!p->count) { |
||
345 | /* special case: if the tree is currently empty, |
||
346 | * then we grow the tree by moving the top node |
||
347 | * upwards. |
||
348 | */ |
||
349 | p->layer++; |
||
350 | continue; |
||
351 | } |
||
352 | if (!(new = get_from_free_list(idp))) { |
||
353 | /* |
||
354 | * The allocation failed. If we built part of |
||
355 | * the structure tear it down. |
||
356 | */ |
||
357 | // spin_lock_irqsave(&idp->lock, flags); |
||
358 | for (new = p; p && p != idp->top; new = p) { |
||
359 | p = p->ary[0]; |
||
360 | new->ary[0] = NULL; |
||
361 | new->bitmap = new->count = 0; |
||
362 | __move_to_free_list(idp, new); |
||
363 | } |
||
364 | // spin_unlock_irqrestore(&idp->lock, flags); |
||
365 | return -1; |
||
366 | } |
||
367 | new->ary[0] = p; |
||
368 | new->count = 1; |
||
369 | new->layer = layers-1; |
||
370 | if (p->bitmap == IDR_FULL) |
||
371 | __set_bit(0, &new->bitmap); |
||
372 | p = new; |
||
373 | } |
||
374 | rcu_assign_pointer(idp->top, p); |
||
375 | idp->layers = layers; |
||
376 | v = sub_alloc(idp, &id, pa); |
||
377 | if (v == IDR_NEED_TO_GROW) |
||
378 | goto build_up; |
||
379 | return(v); |
||
380 | } |
||
381 | |||
382 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
||
383 | { |
||
384 | struct idr_layer *pa[MAX_LEVEL]; |
||
385 | int id; |
||
386 | |||
387 | id = idr_get_empty_slot(idp, starting_id, pa); |
||
388 | if (id >= 0) { |
||
389 | /* |
||
390 | * Successfully found an empty slot. Install the user |
||
391 | * pointer and mark the slot full. |
||
392 | */ |
||
393 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
||
394 | (struct idr_layer *)ptr); |
||
395 | pa[0]->count++; |
||
396 | idr_mark_full(pa, id); |
||
397 | } |
||
398 | |||
399 | return id; |
||
400 | } |
||
401 | |||
402 | /** |
||
403 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
||
404 | * @idp: idr handle |
||
405 | * @ptr: pointer you want associated with the ide |
||
406 | * @start_id: id to start search at |
||
407 | * @id: pointer to the allocated handle |
||
408 | * |
||
409 | * This is the allocate id function. It should be called with any |
||
410 | * required locks. |
||
411 | * |
||
412 | * If memory is required, it will return -EAGAIN, you should unlock |
||
413 | * and go back to the idr_pre_get() call. If the idr is full, it will |
||
414 | * return -ENOSPC. |
||
415 | * |
||
416 | * @id returns a value in the range @starting_id ... 0x7fffffff |
||
417 | */ |
||
418 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
||
419 | { |
||
420 | int rv; |
||
421 | |||
422 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
||
423 | /* |
||
424 | * This is a cheap hack until the IDR code can be fixed to |
||
425 | * return proper error values. |
||
426 | */ |
||
427 | if (rv < 0) |
||
428 | return _idr_rc_to_errno(rv); |
||
429 | *id = rv; |
||
430 | return 0; |
||
431 | } |
||
432 | EXPORT_SYMBOL(idr_get_new_above); |
||
433 | |||
434 | /** |
||
435 | * idr_get_new - allocate new idr entry |
||
436 | * @idp: idr handle |
||
437 | * @ptr: pointer you want associated with the ide |
||
438 | * @id: pointer to the allocated handle |
||
439 | * |
||
440 | * This is the allocate id function. It should be called with any |
||
441 | * required locks. |
||
442 | * |
||
443 | * If memory is required, it will return -EAGAIN, you should unlock |
||
444 | * and go back to the idr_pre_get() call. If the idr is full, it will |
||
445 | * return -ENOSPC. |
||
446 | * |
||
447 | * @id returns a value in the range 0 ... 0x7fffffff |
||
448 | */ |
||
449 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
||
450 | { |
||
451 | int rv; |
||
452 | |||
453 | rv = idr_get_new_above_int(idp, ptr, 0); |
||
454 | /* |
||
455 | * This is a cheap hack until the IDR code can be fixed to |
||
456 | * return proper error values. |
||
457 | */ |
||
458 | if (rv < 0) |
||
459 | return _idr_rc_to_errno(rv); |
||
460 | *id = rv; |
||
461 | return 0; |
||
462 | } |
||
463 | EXPORT_SYMBOL(idr_get_new); |
||
464 | |||
465 | static void idr_remove_warning(int id) |
||
466 | { |
||
467 | printk(KERN_WARNING |
||
468 | "idr_remove called for id=%d which is not allocated.\n", id); |
||
469 | // dump_stack(); |
||
470 | } |
||
471 | |||
472 | static void sub_remove(struct idr *idp, int shift, int id) |
||
473 | { |
||
474 | struct idr_layer *p = idp->top; |
||
475 | struct idr_layer **pa[MAX_LEVEL]; |
||
476 | struct idr_layer ***paa = &pa[0]; |
||
477 | struct idr_layer *to_free; |
||
478 | int n; |
||
479 | |||
480 | *paa = NULL; |
||
481 | *++paa = &idp->top; |
||
482 | |||
483 | while ((shift > 0) && p) { |
||
484 | n = (id >> shift) & IDR_MASK; |
||
485 | __clear_bit(n, &p->bitmap); |
||
486 | *++paa = &p->ary[n]; |
||
487 | p = p->ary[n]; |
||
488 | shift -= IDR_BITS; |
||
489 | } |
||
490 | n = id & IDR_MASK; |
||
491 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
||
492 | __clear_bit(n, &p->bitmap); |
||
493 | rcu_assign_pointer(p->ary[n], NULL); |
||
494 | to_free = NULL; |
||
495 | while(*paa && ! --((**paa)->count)){ |
||
496 | if (to_free) |
||
497 | free_layer(to_free); |
||
498 | to_free = **paa; |
||
499 | **paa-- = NULL; |
||
500 | } |
||
501 | if (!*paa) |
||
502 | idp->layers = 0; |
||
503 | if (to_free) |
||
504 | free_layer(to_free); |
||
505 | } else |
||
506 | idr_remove_warning(id); |
||
507 | } |
||
508 | |||
509 | /** |
||
510 | * idr_remove - remove the given id and free it's slot |
||
511 | * @idp: idr handle |
||
512 | * @id: unique key |
||
513 | */ |
||
514 | void idr_remove(struct idr *idp, int id) |
||
515 | { |
||
516 | struct idr_layer *p; |
||
517 | struct idr_layer *to_free; |
||
518 | |||
519 | /* Mask off upper bits we don't use for the search. */ |
||
520 | id &= MAX_ID_MASK; |
||
521 | |||
522 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
||
523 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
||
524 | idp->top->ary[0]) { |
||
525 | /* |
||
526 | * Single child at leftmost slot: we can shrink the tree. |
||
527 | * This level is not needed anymore since when layers are |
||
528 | * inserted, they are inserted at the top of the existing |
||
529 | * tree. |
||
530 | */ |
||
531 | to_free = idp->top; |
||
532 | p = idp->top->ary[0]; |
||
533 | rcu_assign_pointer(idp->top, p); |
||
534 | --idp->layers; |
||
535 | to_free->bitmap = to_free->count = 0; |
||
536 | free_layer(to_free); |
||
537 | } |
||
538 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
||
539 | p = get_from_free_list(idp); |
||
540 | /* |
||
541 | * Note: we don't call the rcu callback here, since the only |
||
542 | * layers that fall into the freelist are those that have been |
||
543 | * preallocated. |
||
544 | */ |
||
545 | kfree(p); |
||
546 | } |
||
547 | return; |
||
548 | } |
||
549 | EXPORT_SYMBOL(idr_remove); |
||
550 | |||
551 | |||
552 | /** |
||
553 | * idr_remove_all - remove all ids from the given idr tree |
||
554 | * @idp: idr handle |
||
555 | * |
||
556 | * idr_destroy() only frees up unused, cached idp_layers, but this |
||
557 | * function will remove all id mappings and leave all idp_layers |
||
558 | * unused. |
||
559 | * |
||
560 | * A typical clean-up sequence for objects stored in an idr tree, will |
||
561 | * use idr_for_each() to free all objects, if necessay, then |
||
562 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
||
563 | * up the cached idr_layers. |
||
564 | */ |
||
565 | void idr_remove_all(struct idr *idp) |
||
566 | { |
||
567 | int n, id, max; |
||
568 | struct idr_layer *p; |
||
569 | struct idr_layer *pa[MAX_LEVEL]; |
||
570 | struct idr_layer **paa = &pa[0]; |
||
571 | |||
572 | n = idp->layers * IDR_BITS; |
||
573 | p = idp->top; |
||
574 | rcu_assign_pointer(idp->top, NULL); |
||
575 | max = 1 << n; |
||
576 | |||
577 | id = 0; |
||
578 | while (id < max) { |
||
579 | while (n > IDR_BITS && p) { |
||
580 | n -= IDR_BITS; |
||
581 | *paa++ = p; |
||
582 | p = p->ary[(id >> n) & IDR_MASK]; |
||
583 | } |
||
584 | |||
585 | id += 1 << n; |
||
586 | while (n < fls(id)) { |
||
587 | if (p) |
||
588 | free_layer(p); |
||
589 | n += IDR_BITS; |
||
590 | p = *--paa; |
||
591 | } |
||
592 | } |
||
593 | idp->layers = 0; |
||
594 | } |
||
595 | EXPORT_SYMBOL(idr_remove_all); |
||
596 | |||
597 | /** |
||
598 | * idr_destroy - release all cached layers within an idr tree |
||
599 | * idp: idr handle |
||
600 | */ |
||
601 | void idr_destroy(struct idr *idp) |
||
602 | { |
||
603 | while (idp->id_free_cnt) { |
||
604 | struct idr_layer *p = get_from_free_list(idp); |
||
605 | kfree(p); |
||
606 | } |
||
607 | } |
||
608 | EXPORT_SYMBOL(idr_destroy); |
||
609 | |||
610 | |||
611 | /** |
||
612 | * idr_find - return pointer for given id |
||
613 | * @idp: idr handle |
||
614 | * @id: lookup key |
||
615 | * |
||
616 | * Return the pointer given the id it has been registered with. A %NULL |
||
617 | * return indicates that @id is not valid or you passed %NULL in |
||
618 | * idr_get_new(). |
||
619 | * |
||
620 | * This function can be called under rcu_read_lock(), given that the leaf |
||
621 | * pointers lifetimes are correctly managed. |
||
622 | */ |
||
623 | void *idr_find(struct idr *idp, int id) |
||
624 | { |
||
625 | int n; |
||
626 | struct idr_layer *p; |
||
627 | |||
628 | p = rcu_dereference(idp->top); |
||
629 | if (!p) |
||
630 | return NULL; |
||
631 | n = (p->layer+1) * IDR_BITS; |
||
632 | |||
633 | /* Mask off upper bits we don't use for the search. */ |
||
634 | id &= MAX_ID_MASK; |
||
635 | |||
636 | if (id >= (1 << n)) |
||
637 | return NULL; |
||
638 | BUG_ON(n == 0); |
||
639 | |||
640 | while (n > 0 && p) { |
||
641 | n -= IDR_BITS; |
||
642 | BUG_ON(n != p->layer*IDR_BITS); |
||
643 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
||
644 | } |
||
645 | return((void *)p); |
||
646 | } |
||
647 | EXPORT_SYMBOL(idr_find); |
||
648 | |||
649 | #if 0 |
||
650 | /** |
||
651 | * idr_for_each - iterate through all stored pointers |
||
652 | * @idp: idr handle |
||
653 | * @fn: function to be called for each pointer |
||
654 | * @data: data passed back to callback function |
||
655 | * |
||
656 | * Iterate over the pointers registered with the given idr. The |
||
657 | * callback function will be called for each pointer currently |
||
658 | * registered, passing the id, the pointer and the data pointer passed |
||
659 | * to this function. It is not safe to modify the idr tree while in |
||
660 | * the callback, so functions such as idr_get_new and idr_remove are |
||
661 | * not allowed. |
||
662 | * |
||
663 | * We check the return of @fn each time. If it returns anything other |
||
664 | * than 0, we break out and return that value. |
||
665 | * |
||
666 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
||
667 | */ |
||
668 | int idr_for_each(struct idr *idp, |
||
669 | int (*fn)(int id, void *p, void *data), void *data) |
||
670 | { |
||
671 | int n, id, max, error = 0; |
||
672 | struct idr_layer *p; |
||
673 | struct idr_layer *pa[MAX_LEVEL]; |
||
674 | struct idr_layer **paa = &pa[0]; |
||
675 | |||
676 | n = idp->layers * IDR_BITS; |
||
677 | p = rcu_dereference(idp->top); |
||
678 | max = 1 << n; |
||
679 | |||
680 | id = 0; |
||
681 | while (id < max) { |
||
682 | while (n > 0 && p) { |
||
683 | n -= IDR_BITS; |
||
684 | *paa++ = p; |
||
685 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
||
686 | } |
||
687 | |||
688 | if (p) { |
||
689 | error = fn(id, (void *)p, data); |
||
690 | if (error) |
||
691 | break; |
||
692 | } |
||
693 | |||
694 | id += 1 << n; |
||
695 | while (n < fls(id)) { |
||
696 | n += IDR_BITS; |
||
697 | p = *--paa; |
||
698 | } |
||
699 | } |
||
700 | |||
701 | return error; |
||
702 | } |
||
703 | EXPORT_SYMBOL(idr_for_each); |
||
704 | |||
705 | /** |
||
706 | * idr_get_next - lookup next object of id to given id. |
||
707 | * @idp: idr handle |
||
708 | * @id: pointer to lookup key |
||
709 | * |
||
710 | * Returns pointer to registered object with id, which is next number to |
||
711 | * given id. |
||
712 | */ |
||
713 | |||
714 | void *idr_get_next(struct idr *idp, int *nextidp) |
||
715 | { |
||
716 | struct idr_layer *p, *pa[MAX_LEVEL]; |
||
717 | struct idr_layer **paa = &pa[0]; |
||
718 | int id = *nextidp; |
||
719 | int n, max; |
||
720 | |||
721 | /* find first ent */ |
||
722 | n = idp->layers * IDR_BITS; |
||
723 | max = 1 << n; |
||
724 | p = rcu_dereference(idp->top); |
||
725 | if (!p) |
||
726 | return NULL; |
||
727 | |||
728 | while (id < max) { |
||
729 | while (n > 0 && p) { |
||
730 | n -= IDR_BITS; |
||
731 | *paa++ = p; |
||
732 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
||
733 | } |
||
734 | |||
735 | if (p) { |
||
736 | *nextidp = id; |
||
737 | return p; |
||
738 | } |
||
739 | |||
740 | id += 1 << n; |
||
741 | while (n < fls(id)) { |
||
742 | n += IDR_BITS; |
||
743 | p = *--paa; |
||
744 | } |
||
745 | } |
||
746 | return NULL; |
||
747 | } |
||
748 | |||
749 | |||
750 | |||
751 | /** |
||
752 | * idr_replace - replace pointer for given id |
||
753 | * @idp: idr handle |
||
754 | * @ptr: pointer you want associated with the id |
||
755 | * @id: lookup key |
||
756 | * |
||
757 | * Replace the pointer registered with an id and return the old value. |
||
758 | * A -ENOENT return indicates that @id was not found. |
||
759 | * A -EINVAL return indicates that @id was not within valid constraints. |
||
760 | * |
||
761 | * The caller must serialize with writers. |
||
762 | */ |
||
763 | void *idr_replace(struct idr *idp, void *ptr, int id) |
||
764 | { |
||
765 | int n; |
||
766 | struct idr_layer *p, *old_p; |
||
767 | |||
768 | p = idp->top; |
||
769 | if (!p) |
||
770 | return ERR_PTR(-EINVAL); |
||
771 | |||
772 | n = (p->layer+1) * IDR_BITS; |
||
773 | |||
774 | id &= MAX_ID_MASK; |
||
775 | |||
776 | if (id >= (1 << n)) |
||
777 | return ERR_PTR(-EINVAL); |
||
778 | |||
779 | n -= IDR_BITS; |
||
780 | while ((n > 0) && p) { |
||
781 | p = p->ary[(id >> n) & IDR_MASK]; |
||
782 | n -= IDR_BITS; |
||
783 | } |
||
784 | |||
785 | n = id & IDR_MASK; |
||
786 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
||
787 | return ERR_PTR(-ENOENT); |
||
788 | |||
789 | old_p = p->ary[n]; |
||
790 | rcu_assign_pointer(p->ary[n], ptr); |
||
791 | |||
792 | return old_p; |
||
793 | } |
||
794 | EXPORT_SYMBOL(idr_replace); |
||
795 | |||
796 | |||
797 | #endif |
||
798 | |||
799 | |||
800 | void idr_init_cache(void) |
||
801 | { |
||
802 | //idr_layer_cache = kmem_cache_create("idr_layer_cache", |
||
803 | // sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
||
804 | } |
||
805 | |||
806 | /** |
||
807 | * idr_init - initialize idr handle |
||
808 | * @idp: idr handle |
||
809 | * |
||
810 | * This function is use to set up the handle (@idp) that you will pass |
||
811 | * to the rest of the functions. |
||
812 | */ |
||
813 | void idr_init(struct idr *idp) |
||
814 | { |
||
815 | memset(idp, 0, sizeof(struct idr)); |
||
816 | // spin_lock_init(&idp->lock); |
||
817 | } |
||
818 | EXPORT_SYMBOL(idr_init); |
||
819 | |||
820 | #if 0 |
||
821 | |||
822 | /* |
||
823 | * IDA - IDR based ID allocator |
||
824 | * |
||
825 | * this is id allocator without id -> pointer translation. Memory |
||
826 | * usage is much lower than full blown idr because each id only |
||
827 | * occupies a bit. ida uses a custom leaf node which contains |
||
828 | * IDA_BITMAP_BITS slots. |
||
829 | * |
||
830 | * 2007-04-25 written by Tejun Heo |
||
831 | */ |
||
832 | |||
833 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
||
834 | { |
||
835 | unsigned long flags; |
||
836 | |||
837 | if (!ida->free_bitmap) { |
||
838 | spin_lock_irqsave(&ida->idr.lock, flags); |
||
839 | if (!ida->free_bitmap) { |
||
840 | ida->free_bitmap = bitmap; |
||
841 | bitmap = NULL; |
||
842 | } |
||
843 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
||
844 | } |
||
845 | |||
846 | kfree(bitmap); |
||
847 | } |
||
848 | |||
849 | /** |
||
850 | * ida_pre_get - reserve resources for ida allocation |
||
851 | * @ida: ida handle |
||
852 | * @gfp_mask: memory allocation flag |
||
853 | * |
||
854 | * This function should be called prior to locking and calling the |
||
855 | * following function. It preallocates enough memory to satisfy the |
||
856 | * worst possible allocation. |
||
857 | * |
||
858 | * If the system is REALLY out of memory this function returns 0, |
||
859 | * otherwise 1. |
||
860 | */ |
||
861 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
||
862 | { |
||
863 | /* allocate idr_layers */ |
||
864 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
||
865 | return 0; |
||
866 | |||
867 | /* allocate free_bitmap */ |
||
868 | if (!ida->free_bitmap) { |
||
869 | struct ida_bitmap *bitmap; |
||
870 | |||
871 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); |
||
872 | if (!bitmap) |
||
873 | return 0; |
||
874 | |||
875 | free_bitmap(ida, bitmap); |
||
876 | } |
||
877 | |||
878 | return 1; |
||
879 | } |
||
880 | EXPORT_SYMBOL(ida_pre_get); |
||
881 | |||
882 | /** |
||
883 | * ida_get_new_above - allocate new ID above or equal to a start id |
||
884 | * @ida: ida handle |
||
885 | * @staring_id: id to start search at |
||
886 | * @p_id: pointer to the allocated handle |
||
887 | * |
||
888 | * Allocate new ID above or equal to @ida. It should be called with |
||
889 | * any required locks. |
||
890 | * |
||
891 | * If memory is required, it will return -EAGAIN, you should unlock |
||
892 | * and go back to the ida_pre_get() call. If the ida is full, it will |
||
893 | * return -ENOSPC. |
||
894 | * |
||
895 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. |
||
896 | */ |
||
897 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
||
898 | { |
||
899 | struct idr_layer *pa[MAX_LEVEL]; |
||
900 | struct ida_bitmap *bitmap; |
||
901 | unsigned long flags; |
||
902 | int idr_id = starting_id / IDA_BITMAP_BITS; |
||
903 | int offset = starting_id % IDA_BITMAP_BITS; |
||
904 | int t, id; |
||
905 | |||
906 | restart: |
||
907 | /* get vacant slot */ |
||
908 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
||
909 | if (t < 0) |
||
910 | return _idr_rc_to_errno(t); |
||
911 | |||
912 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
||
913 | return -ENOSPC; |
||
914 | |||
915 | if (t != idr_id) |
||
916 | offset = 0; |
||
917 | idr_id = t; |
||
918 | |||
919 | /* if bitmap isn't there, create a new one */ |
||
920 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
||
921 | if (!bitmap) { |
||
922 | spin_lock_irqsave(&ida->idr.lock, flags); |
||
923 | bitmap = ida->free_bitmap; |
||
924 | ida->free_bitmap = NULL; |
||
925 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
||
926 | |||
927 | if (!bitmap) |
||
928 | return -EAGAIN; |
||
929 | |||
930 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
||
931 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
||
932 | (void *)bitmap); |
||
933 | pa[0]->count++; |
||
934 | } |
||
935 | |||
936 | /* lookup for empty slot */ |
||
937 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
||
938 | if (t == IDA_BITMAP_BITS) { |
||
939 | /* no empty slot after offset, continue to the next chunk */ |
||
940 | idr_id++; |
||
941 | offset = 0; |
||
942 | goto restart; |
||
943 | } |
||
944 | |||
945 | id = idr_id * IDA_BITMAP_BITS + t; |
||
946 | if (id >= MAX_ID_BIT) |
||
947 | return -ENOSPC; |
||
948 | |||
949 | __set_bit(t, bitmap->bitmap); |
||
950 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
||
951 | idr_mark_full(pa, idr_id); |
||
952 | |||
953 | *p_id = id; |
||
954 | |||
955 | /* Each leaf node can handle nearly a thousand slots and the |
||
956 | * whole idea of ida is to have small memory foot print. |
||
957 | * Throw away extra resources one by one after each successful |
||
958 | * allocation. |
||
959 | */ |
||
960 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
||
961 | struct idr_layer *p = get_from_free_list(&ida->idr); |
||
962 | if (p) |
||
963 | kmem_cache_free(idr_layer_cache, p); |
||
964 | } |
||
965 | |||
966 | return 0; |
||
967 | } |
||
968 | EXPORT_SYMBOL(ida_get_new_above); |
||
969 | |||
970 | /** |
||
971 | * ida_get_new - allocate new ID |
||
972 | * @ida: idr handle |
||
973 | * @p_id: pointer to the allocated handle |
||
974 | * |
||
975 | * Allocate new ID. It should be called with any required locks. |
||
976 | * |
||
977 | * If memory is required, it will return -EAGAIN, you should unlock |
||
978 | * and go back to the idr_pre_get() call. If the idr is full, it will |
||
979 | * return -ENOSPC. |
||
980 | * |
||
981 | * @id returns a value in the range 0 ... 0x7fffffff. |
||
982 | */ |
||
983 | int ida_get_new(struct ida *ida, int *p_id) |
||
984 | { |
||
985 | return ida_get_new_above(ida, 0, p_id); |
||
986 | } |
||
987 | EXPORT_SYMBOL(ida_get_new); |
||
988 | |||
989 | /** |
||
990 | * ida_remove - remove the given ID |
||
991 | * @ida: ida handle |
||
992 | * @id: ID to free |
||
993 | */ |
||
994 | void ida_remove(struct ida *ida, int id) |
||
995 | { |
||
996 | struct idr_layer *p = ida->idr.top; |
||
997 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
||
998 | int idr_id = id / IDA_BITMAP_BITS; |
||
999 | int offset = id % IDA_BITMAP_BITS; |
||
1000 | int n; |
||
1001 | struct ida_bitmap *bitmap; |
||
1002 | |||
1003 | /* clear full bits while looking up the leaf idr_layer */ |
||
1004 | while ((shift > 0) && p) { |
||
1005 | n = (idr_id >> shift) & IDR_MASK; |
||
1006 | __clear_bit(n, &p->bitmap); |
||
1007 | p = p->ary[n]; |
||
1008 | shift -= IDR_BITS; |
||
1009 | } |
||
1010 | |||
1011 | if (p == NULL) |
||
1012 | goto err; |
||
1013 | |||
1014 | n = idr_id & IDR_MASK; |
||
1015 | __clear_bit(n, &p->bitmap); |
||
1016 | |||
1017 | bitmap = (void *)p->ary[n]; |
||
1018 | if (!test_bit(offset, bitmap->bitmap)) |
||
1019 | goto err; |
||
1020 | |||
1021 | /* update bitmap and remove it if empty */ |
||
1022 | __clear_bit(offset, bitmap->bitmap); |
||
1023 | if (--bitmap->nr_busy == 0) { |
||
1024 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ |
||
1025 | idr_remove(&ida->idr, idr_id); |
||
1026 | free_bitmap(ida, bitmap); |
||
1027 | } |
||
1028 | |||
1029 | return; |
||
1030 | |||
1031 | err: |
||
1032 | printk(KERN_WARNING |
||
1033 | "ida_remove called for id=%d which is not allocated.\n", id); |
||
1034 | } |
||
1035 | EXPORT_SYMBOL(ida_remove); |
||
1036 | |||
1037 | /** |
||
1038 | * ida_destroy - release all cached layers within an ida tree |
||
1039 | * ida: ida handle |
||
1040 | */ |
||
1041 | void ida_destroy(struct ida *ida) |
||
1042 | { |
||
1043 | idr_destroy(&ida->idr); |
||
1044 | kfree(ida->free_bitmap); |
||
1045 | } |
||
1046 | EXPORT_SYMBOL(ida_destroy); |
||
1047 | |||
1048 | /** |
||
1049 | * ida_init - initialize ida handle |
||
1050 | * @ida: ida handle |
||
1051 | * |
||
1052 | * This function is use to set up the handle (@ida) that you will pass |
||
1053 | * to the rest of the functions. |
||
1054 | */ |
||
1055 | void ida_init(struct ida *ida) |
||
1056 | { |
||
1057 | memset(ida, 0, sizeof(struct ida)); |
||
1058 | idr_init(&ida->idr); |
||
1059 | |||
1060 | } |
||
1061 | EXPORT_SYMBOL(ida_init); |
||
1062 | |||
1063 | |||
1064 | #endif>><>>><>>><>>><>>><>><>>><>>><>>>><>>>><>><>>>3); |