Rev 2966 | Rev 4065 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2966 | Rev 3391 | ||
---|---|---|---|
Line 18... | Line 18... | ||
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
19 | * id to a user for him to pass back at a later time. You then pass |
19 | * id to a user for him to pass back at a later time. You then pass |
20 | * that id to this code and it returns your pointer. |
20 | * that id to this code and it returns your pointer. |
Line 21... | Line 21... | ||
21 | 21 | ||
22 | * You can release ids at any time. When all ids are released, most of |
22 | * You can release ids at any time. When all ids are released, most of |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
23 | * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
24 | * don't need to go to the memory "store" during an id allocate, just |
24 | * don't need to go to the memory "store" during an id allocate, just |
25 | * so you don't need to be too concerned about locking and conflicts |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. |
26 | * with the slab allocator. |
Line 27... | Line 27... | ||
27 | */ |
27 | */ |
- | 28 | ||
28 | 29 | #include |
|
29 | #include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
Line 32... | Line 33... | ||
32 | #include |
33 | #include |
33 | //#include |
- | |
34 | 34 | //#include |
|
35 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
- | |
36 | { |
- | |
37 | const unsigned long *p = addr; |
- | |
38 | unsigned long result = 0; |
- | |
39 | unsigned long tmp; |
- | |
40 | - | ||
41 | while (size & ~(BITS_PER_LONG-1)) { |
- | |
42 | if ((tmp = *(p++))) |
- | |
43 | goto found; |
- | |
44 | result += BITS_PER_LONG; |
- | |
45 | size -= BITS_PER_LONG; |
- | |
Line 46... | Line -... | ||
46 | } |
- | |
47 | if (!size) |
- | |
48 | return result; |
- | |
49 | - | ||
50 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
- | |
51 | if (tmp == 0UL) /* Are any bits set? */ |
- | |
Line 52... | Line 35... | ||
52 | return result + size; /* Nope. */ |
35 | |
53 | found: |
- | |
54 | return result + __ffs(tmp); |
- | |
55 | } |
36 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
Line 56... | Line -... | ||
56 | - | ||
57 | int find_next_bit(const unsigned long *addr, int size, int offset) |
- | |
58 | { |
- | |
59 | const unsigned long *p = addr + (offset >> 5); |
37 | unsigned long offset); |
60 | int set = 0, bit = offset & 31, res; |
- | |
61 | - | ||
62 | if (bit) |
- | |
63 | { |
- | |
64 | /* |
- | |
65 | * Look for nonzero in the first 32 bits: |
- | |
66 | */ |
- | |
67 | __asm__("bsfl %1,%0\n\t" |
- | |
68 | "jne 1f\n\t" |
- | |
69 | "movl $32, %0\n" |
- | |
70 | "1:" |
- | |
71 | : "=r" (set) |
- | |
72 | : "r" (*p >> bit)); |
- | |
73 | if (set < (32 - bit)) |
- | |
74 | return set + offset; |
- | |
75 | set = 32 - bit; |
38 | |
76 | p++; |
- | |
77 | } |
- | |
Line -... | Line 39... | ||
- | 39 | ||
78 | /* |
40 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
Line 79... | Line 41... | ||
79 | * No set bit yet, search remaining full words for a bit |
41 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) |
80 | */ |
- | |
81 | res = find_first_bit (p, size - 32 * (p - addr)); |
- | |
82 | return (offset + set + res); |
- | |
83 | } |
- | |
84 | - | ||
85 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
- | |
86 | - | ||
87 | #define rcu_dereference(p) ({ \ |
- | |
88 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
- | |
89 | (_________p1); \ |
- | |
90 | }) |
- | |
91 | 42 | ||
Line -... | Line 43... | ||
- | 43 | /* Leave the possibility of an incomplete final layer */ |
|
- | 44 | #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) |
|
- | 45 | ||
- | 46 | /* Number of id_layer structs to leave in free list */ |
|
Line -... | Line 47... | ||
- | 47 | #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) |
|
- | 48 | ||
Line -... | Line 49... | ||
- | 49 | static struct idr_layer *idr_preload_head; |
|
- | 50 | static int idr_preload_cnt; |
|
- | 51 | ||
- | 52 | ||
- | 53 | /* the maximum ID which can be allocated given idr->layers */ |
|
- | 54 | static int idr_max(int layers) |
|
- | 55 | { |
|
- | 56 | int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); |
|
- | 57 | ||
Line 92... | Line 58... | ||
92 | #define rcu_assign_pointer(p, v) \ |
58 | return (1 << bits) - 1; |
93 | ({ \ |
59 | } |
94 | if (!__builtin_constant_p(v) || \ |
60 | |
95 | ((v) != NULL)) \ |
61 | /* |
Line 96... | Line 62... | ||
96 | (p) = (v); \ |
62 | * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is |
97 | }) |
63 | * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and |
98 | 64 | * so on. |
|
99 | //static struct kmem_cache *idr_layer_cache; |
65 | */ |
100 | 66 | static int idr_layer_prefix_mask(int layer) |
|
101 | 67 | { |
|
102 | 68 | return ~idr_max(layer + 1); |
|
103 | 69 | } |
|
104 | 70 | ||
Line -... | Line 71... | ||
- | 71 | static struct idr_layer *get_from_free_list(struct idr *idp) |
|
- | 72 | { |
|
- | 73 | struct idr_layer *p; |
|
- | 74 | unsigned long flags; |
|
- | 75 | ||
- | 76 | spin_lock_irqsave(&idp->lock, flags); |
|
- | 77 | if ((p = idp->id_free)) { |
|
- | 78 | idp->id_free = p->ary[0]; |
|
- | 79 | idp->id_free_cnt--; |
|
- | 80 | p->ary[0] = NULL; |
|
- | 81 | } |
|
- | 82 | spin_unlock_irqrestore(&idp->lock, flags); |
|
- | 83 | return(p); |
|
- | 84 | } |
|
- | 85 | ||
- | 86 | /** |
|
- | 87 | * idr_layer_alloc - allocate a new idr_layer |
|
- | 88 | * @gfp_mask: allocation mask |
|
- | 89 | * @layer_idr: optional idr to allocate from |
|
- | 90 | * |
|
- | 91 | * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch |
|
- | 92 | * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch |
|
- | 93 | * an idr_layer from @idr->id_free. |
|
- | 94 | * |
|
- | 95 | * @layer_idr is to maintain backward compatibility with the old alloc |
|
- | 96 | * interface - idr_pre_get() and idr_get_new*() - and will be removed |
|
- | 97 | * together with per-pool preload buffer. |
|
- | 98 | */ |
|
- | 99 | static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) |
|
- | 100 | { |
|
- | 101 | struct idr_layer *new; |
|
- | 102 | ||
- | 103 | /* this is the old path, bypass to get_from_free_list() */ |
|
- | 104 | if (layer_idr) |
|
- | 105 | return get_from_free_list(layer_idr); |
|
- | 106 | ||
- | 107 | /* try to allocate directly from kmem_cache */ |
|
105 | static struct idr_layer *get_from_free_list(struct idr *idp) |
108 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
106 | { |
109 | if (new) |
107 | struct idr_layer *p; |
110 | return new; |
Line 108... | Line 111... | ||
108 | unsigned long flags; |
111 | |
109 | 112 | ||
110 | // spin_lock_irqsave(&idp->lock, flags); |
113 | new = idr_preload_head; |
Line 111... | Line 114... | ||
111 | if ((p = idp->id_free)) { |
114 | if (new) { |
112 | idp->id_free = p->ary[0]; |
115 | idr_preload_head = new->ary[0]; |
- | 116 | idr_preload_cnt--; |
|
- | 117 | new->ary[0] = NULL; |
|
113 | idp->id_free_cnt--; |
118 | } |
114 | p->ary[0] = NULL; |
119 | preempt_enable(); |
Line 115... | Line 120... | ||
115 | } |
120 | return new; |
116 | // spin_unlock_irqrestore(&idp->lock, flags); |
121 | } |
117 | return(p); |
122 | |
Line 143... | Line 148... | ||
143 | unsigned long flags; |
148 | unsigned long flags; |
Line 144... | Line 149... | ||
144 | 149 | ||
145 | /* |
150 | /* |
146 | * Depends on the return element being zeroed. |
151 | * Depends on the return element being zeroed. |
147 | */ |
152 | */ |
148 | // spin_lock_irqsave(&idp->lock, flags); |
153 | spin_lock_irqsave(&idp->lock, flags); |
149 | __move_to_free_list(idp, p); |
154 | __move_to_free_list(idp, p); |
150 | // spin_unlock_irqrestore(&idp->lock, flags); |
155 | spin_unlock_irqrestore(&idp->lock, flags); |
Line 151... | Line 156... | ||
151 | } |
156 | } |
152 | 157 | ||
153 | static void idr_mark_full(struct idr_layer **pa, int id) |
158 | static void idr_mark_full(struct idr_layer **pa, int id) |
154 | { |
159 | { |
Line 155... | Line 160... | ||
155 | struct idr_layer *p = pa[0]; |
160 | struct idr_layer *p = pa[0]; |
156 | int l = 0; |
161 | int l = 0; |
157 | 162 | ||
158 | __set_bit(id & IDR_MASK, &p->bitmap); |
163 | __set_bit(id & IDR_MASK, p->bitmap); |
159 | /* |
164 | /* |
160 | * If this layer is full mark the bit in the layer above to |
165 | * If this layer is full mark the bit in the layer above to |
161 | * show that this part of the radix tree is full. This may |
166 | * show that this part of the radix tree is full. This may |
162 | * complete the layer above and require walking up the radix |
167 | * complete the layer above and require walking up the radix |
163 | * tree. |
168 | * tree. |
164 | */ |
169 | */ |
165 | while (p->bitmap == IDR_FULL) { |
170 | while (bitmap_full(p->bitmap, IDR_SIZE)) { |
166 | if (!(p = pa[++l])) |
171 | if (!(p = pa[++l])) |
167 | break; |
172 | break; |
168 | id = id >> IDR_BITS; |
173 | id = id >> IDR_BITS; |
Line 169... | Line 174... | ||
169 | __set_bit((id & IDR_MASK), &p->bitmap); |
174 | __set_bit((id & IDR_MASK), p->bitmap); |
170 | } |
175 | } |
Line 183... | Line 188... | ||
183 | * If the system is REALLY out of memory this function returns %0, |
188 | * If the system is REALLY out of memory this function returns %0, |
184 | * otherwise %1. |
189 | * otherwise %1. |
185 | */ |
190 | */ |
186 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
191 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
187 | { |
192 | { |
188 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
193 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
189 | struct idr_layer *new; |
194 | struct idr_layer *new; |
190 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
195 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
191 | if (new == NULL) |
196 | if (new == NULL) |
192 | return (0); |
197 | return (0); |
193 | move_to_free_list(idp, new); |
198 | move_to_free_list(idp, new); |
194 | } |
199 | } |
195 | return 1; |
200 | return 1; |
196 | } |
201 | } |
- | 202 | EXPORT_SYMBOL(idr_pre_get); |
|
Line -... | Line 203... | ||
- | 203 | ||
- | 204 | /** |
|
- | 205 | * sub_alloc - try to allocate an id without growing the tree depth |
|
- | 206 | * @idp: idr handle |
|
- | 207 | * @starting_id: id to start search at |
|
- | 208 | * @id: pointer to the allocated handle |
|
- | 209 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer |
|
- | 210 | * @gfp_mask: allocation mask for idr_layer_alloc() |
|
- | 211 | * @layer_idr: optional idr passed to idr_layer_alloc() |
|
- | 212 | * |
|
- | 213 | * Allocate an id in range [@starting_id, INT_MAX] from @idp without |
|
- | 214 | * growing its depth. Returns |
|
- | 215 | * |
|
- | 216 | * the allocated id >= 0 if successful, |
|
- | 217 | * -EAGAIN if the tree needs to grow for allocation to succeed, |
|
- | 218 | * -ENOSPC if the id space is exhausted, |
|
- | 219 | * -ENOMEM if more idr_layers need to be allocated. |
|
197 | 220 | */ |
|
- | 221 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, |
|
198 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
222 | gfp_t gfp_mask, struct idr *layer_idr) |
199 | { |
223 | { |
200 | int n, m, sh; |
224 | int n, m, sh; |
201 | struct idr_layer *p, *new; |
225 | struct idr_layer *p, *new; |
202 | int l, id, oid; |
- | |
Line 203... | Line 226... | ||
203 | unsigned long bm; |
226 | int l, id, oid; |
204 | 227 | ||
205 | id = *starting_id; |
228 | id = *starting_id; |
206 | restart: |
229 | restart: |
Line 210... | Line 233... | ||
210 | while (1) { |
233 | while (1) { |
211 | /* |
234 | /* |
212 | * We run around this while until we reach the leaf node... |
235 | * We run around this while until we reach the leaf node... |
213 | */ |
236 | */ |
214 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
237 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
215 | bm = ~p->bitmap; |
- | |
216 | m = find_next_bit(&bm, IDR_SIZE, n); |
238 | m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); |
217 | if (m == IDR_SIZE) { |
239 | if (m == IDR_SIZE) { |
218 | /* no space available go back to previous layer. */ |
240 | /* no space available go back to previous layer. */ |
219 | l++; |
241 | l++; |
220 | oid = id; |
242 | oid = id; |
221 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
243 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
Line 222... | Line 244... | ||
222 | 244 | ||
223 | /* if already at the top layer, we need to grow */ |
245 | /* if already at the top layer, we need to grow */ |
224 | if (!(p = pa[l])) { |
246 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
225 | *starting_id = id; |
247 | *starting_id = id; |
226 | return IDR_NEED_TO_GROW; |
248 | return -EAGAIN; |
- | 249 | } |
|
- | 250 | p = pa[l]; |
|
Line 227... | Line 251... | ||
227 | } |
251 | BUG_ON(!p); |
228 | 252 | ||
229 | /* If we need to go up one layer, continue the |
253 | /* If we need to go up one layer, continue the |
230 | * loop; otherwise, restart from the top. |
254 | * loop; otherwise, restart from the top. |
Line 237... | Line 261... | ||
237 | } |
261 | } |
238 | if (m != n) { |
262 | if (m != n) { |
239 | sh = IDR_BITS*l; |
263 | sh = IDR_BITS*l; |
240 | id = ((id >> sh) ^ n ^ m) << sh; |
264 | id = ((id >> sh) ^ n ^ m) << sh; |
241 | } |
265 | } |
242 | if ((id >= MAX_ID_BIT) || (id < 0)) |
266 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
243 | return IDR_NOMORE_SPACE; |
267 | return -ENOSPC; |
244 | if (l == 0) |
268 | if (l == 0) |
245 | break; |
269 | break; |
246 | /* |
270 | /* |
247 | * Create the layer below if it is missing. |
271 | * Create the layer below if it is missing. |
248 | */ |
272 | */ |
249 | if (!p->ary[m]) { |
273 | if (!p->ary[m]) { |
250 | new = get_from_free_list(idp); |
274 | new = idr_layer_alloc(gfp_mask, layer_idr); |
251 | if (!new) |
275 | if (!new) |
252 | return -1; |
276 | return -ENOMEM; |
253 | new->layer = l-1; |
277 | new->layer = l-1; |
- | 278 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
|
254 | rcu_assign_pointer(p->ary[m], new); |
279 | rcu_assign_pointer(p->ary[m], new); |
255 | p->count++; |
280 | p->count++; |
256 | } |
281 | } |
257 | pa[l--] = p; |
282 | pa[l--] = p; |
258 | p = p->ary[m]; |
283 | p = p->ary[m]; |
Line 261... | Line 286... | ||
261 | pa[l] = p; |
286 | pa[l] = p; |
262 | return id; |
287 | return id; |
263 | } |
288 | } |
Line 264... | Line 289... | ||
264 | 289 | ||
- | 290 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
|
265 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
291 | struct idr_layer **pa, gfp_t gfp_mask, |
266 | struct idr_layer **pa) |
292 | struct idr *layer_idr) |
267 | { |
293 | { |
268 | struct idr_layer *p, *new; |
294 | struct idr_layer *p, *new; |
269 | int layers, v, id; |
295 | int layers, v, id; |
Line 270... | Line 296... | ||
270 | unsigned long flags; |
296 | unsigned long flags; |
271 | 297 | ||
272 | id = starting_id; |
298 | id = starting_id; |
273 | build_up: |
299 | build_up: |
274 | p = idp->top; |
300 | p = idp->top; |
275 | layers = idp->layers; |
301 | layers = idp->layers; |
276 | if (unlikely(!p)) { |
302 | if (unlikely(!p)) { |
277 | if (!(p = get_from_free_list(idp))) |
303 | if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) |
278 | return -1; |
304 | return -ENOMEM; |
279 | p->layer = 0; |
305 | p->layer = 0; |
280 | layers = 1; |
306 | layers = 1; |
281 | } |
307 | } |
282 | /* |
308 | /* |
283 | * Add a new layer to the top of the tree if the requested |
309 | * Add a new layer to the top of the tree if the requested |
284 | * id is larger than the currently allocated space. |
310 | * id is larger than the currently allocated space. |
285 | */ |
311 | */ |
286 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
312 | while (id > idr_max(layers)) { |
287 | layers++; |
313 | layers++; |
288 | if (!p->count) { |
314 | if (!p->count) { |
289 | /* special case: if the tree is currently empty, |
315 | /* special case: if the tree is currently empty, |
290 | * then we grow the tree by moving the top node |
316 | * then we grow the tree by moving the top node |
291 | * upwards. |
317 | * upwards. |
- | 318 | */ |
|
292 | */ |
319 | p->layer++; |
293 | p->layer++; |
320 | WARN_ON_ONCE(p->prefix); |
294 | continue; |
321 | continue; |
295 | } |
322 | } |
296 | if (!(new = get_from_free_list(idp))) { |
323 | if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { |
297 | /* |
324 | /* |
298 | * The allocation failed. If we built part of |
325 | * The allocation failed. If we built part of |
299 | * the structure tear it down. |
326 | * the structure tear it down. |
300 | */ |
327 | */ |
301 | // spin_lock_irqsave(&idp->lock, flags); |
328 | spin_lock_irqsave(&idp->lock, flags); |
302 | for (new = p; p && p != idp->top; new = p) { |
329 | for (new = p; p && p != idp->top; new = p) { |
303 | p = p->ary[0]; |
330 | p = p->ary[0]; |
- | 331 | new->ary[0] = NULL; |
|
304 | new->ary[0] = NULL; |
332 | new->count = 0; |
305 | new->bitmap = new->count = 0; |
333 | bitmap_clear(new->bitmap, 0, IDR_SIZE); |
306 | __move_to_free_list(idp, new); |
334 | __move_to_free_list(idp, new); |
307 | } |
335 | } |
308 | // spin_unlock_irqrestore(&idp->lock, flags); |
336 | spin_unlock_irqrestore(&idp->lock, flags); |
309 | return -1; |
337 | return -ENOMEM; |
310 | } |
338 | } |
311 | new->ary[0] = p; |
339 | new->ary[0] = p; |
- | 340 | new->count = 1; |
|
312 | new->count = 1; |
341 | new->layer = layers-1; |
313 | new->layer = layers-1; |
342 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
314 | if (p->bitmap == IDR_FULL) |
343 | if (bitmap_full(p->bitmap, IDR_SIZE)) |
315 | __set_bit(0, &new->bitmap); |
344 | __set_bit(0, new->bitmap); |
316 | p = new; |
345 | p = new; |
317 | } |
346 | } |
318 | rcu_assign_pointer(idp->top, p); |
347 | rcu_assign_pointer(idp->top, p); |
319 | idp->layers = layers; |
348 | idp->layers = layers; |
320 | v = sub_alloc(idp, &id, pa); |
349 | v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); |
321 | if (v == IDR_NEED_TO_GROW) |
350 | if (v == -EAGAIN) |
322 | goto build_up; |
351 | goto build_up; |
Line 323... | Line -... | ||
323 | return(v); |
- | |
324 | } |
- | |
325 | - | ||
326 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
- | |
327 | { |
- | |
328 | struct idr_layer *pa[MAX_LEVEL]; |
- | |
329 | int id; |
- | |
330 | 352 | return(v); |
|
331 | id = idr_get_empty_slot(idp, starting_id, pa); |
353 | } |
332 | if (id >= 0) { |
354 | |
333 | /* |
355 | /* |
334 | * Successfully found an empty slot. Install the user |
356 | * @id and @pa are from a successful allocation from idr_get_empty_slot(). |
335 | * pointer and mark the slot full. |
357 | * Install the user pointer @ptr and mark the slot full. |
- | 358 | */ |
|
- | 359 | static void idr_fill_slot(struct idr *idr, void *ptr, int id, |
|
- | 360 | struct idr_layer **pa) |
|
- | 361 | { |
|
- | 362 | /* update hint used for lookup, cleared from free_layer() */ |
|
336 | */ |
363 | rcu_assign_pointer(idr->hint, pa[0]); |
337 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
364 | |
338 | (struct idr_layer *)ptr); |
365 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
Line 339... | Line -... | ||
339 | pa[0]->count++; |
- | |
340 | idr_mark_full(pa, id); |
- | |
341 | } |
- | |
342 | 366 | pa[0]->count++; |
|
343 | return id; |
367 | idr_mark_full(pa, id); |
344 | } |
368 | } |
345 | 369 | ||
346 | /** |
370 | /** |
Line 361... | Line 385... | ||
361 | * |
385 | * |
362 | * @id returns a value in the range @starting_id ... %0x7fffffff |
386 | * @id returns a value in the range @starting_id ... %0x7fffffff |
363 | */ |
387 | */ |
364 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
388 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
365 | { |
389 | { |
- | 390 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
|
366 | int rv; |
391 | int rv; |
Line 367... | Line 392... | ||
367 | 392 | ||
368 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
- | |
369 | /* |
- | |
370 | * This is a cheap hack until the IDR code can be fixed to |
- | |
371 | * return proper error values. |
- | |
372 | */ |
393 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
373 | if (rv < 0) |
- | |
374 | { |
394 | if (rv < 0) |
- | 395 | return rv == -ENOMEM ? -EAGAIN : rv; |
|
375 | dbgprintf("fail\n"); |
396 | |
376 | return _idr_rc_to_errno(rv); |
- | |
377 | }; |
397 | idr_fill_slot(idp, ptr, rv, pa); |
378 | *id = rv; |
398 | *id = rv; |
379 | return 0; |
399 | return 0; |
- | 400 | } |
|
Line 380... | Line 401... | ||
380 | } |
401 | EXPORT_SYMBOL(idr_get_new_above); |
381 | 402 | ||
382 | /** |
- | |
383 | * idr_get_new - allocate new idr entry |
- | |
384 | * @idp: idr handle |
403 | /** |
385 | * @ptr: pointer you want associated with the id |
404 | * idr_preload - preload for idr_alloc() |
386 | * @id: pointer to the allocated handle |
405 | * @gfp_mask: allocation mask to use for preloading |
387 | * |
406 | * |
388 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
407 | * Preload per-cpu layer buffer for idr_alloc(). Can only be used from |
389 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
408 | * process context and each idr_preload() invocation should be matched with |
- | 409 | * idr_preload_end(). Note that preemption is disabled while preloaded. |
|
- | 410 | * |
|
- | 411 | * The first idr_alloc() in the preloaded section can be treated as if it |
|
- | 412 | * were invoked with @gfp_mask used for preloading. This allows using more |
|
- | 413 | * permissive allocation masks for idrs protected by spinlocks. |
|
390 | * IDR's preallocation and then retry the idr_get_new_above() call. |
414 | * |
391 | * |
415 | * For example, if idr_alloc() below fails, the failure can be treated as |
- | 416 | * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. |
|
- | 417 | * |
|
- | 418 | * idr_preload(GFP_KERNEL); |
|
392 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
419 | * spin_lock(lock); |
- | 420 | * |
|
- | 421 | * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); |
|
- | 422 | * |
|
- | 423 | * spin_unlock(lock); |
|
- | 424 | * idr_preload_end(); |
|
393 | * |
425 | * if (id < 0) |
394 | * @id returns a value in the range %0 ... %0x7fffffff |
426 | * error; |
395 | */ |
427 | */ |
396 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
- | |
Line 397... | Line -... | ||
397 | { |
- | |
398 | int rv; |
428 | void idr_preload(gfp_t gfp_mask) |
- | 429 | { |
|
- | 430 | ||
399 | 431 | /* |
|
- | 432 | * idr_alloc() is likely to succeed w/o full idr_layer buffer and |
|
400 | rv = idr_get_new_above_int(idp, ptr, 0); |
433 | * return value from idr_alloc() needs to be checked for failure |
401 | /* |
434 | * anyway. Silently give up if allocation fails. The caller can |
402 | * This is a cheap hack until the IDR code can be fixed to |
435 | * treat failures from idr_alloc() as if idr_alloc() were called |
403 | * return proper error values. |
436 | * with @gfp_mask which should be enough. |
- | 437 | */ |
|
- | 438 | while (idr_preload_cnt < MAX_IDR_FREE) { |
|
404 | */ |
439 | struct idr_layer *new; |
405 | if (rv < 0) |
440 | |
- | 441 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
|
- | 442 | if (!new) |
|
- | 443 | break; |
|
- | 444 | ||
- | 445 | /* link the new one to per-cpu preload list */ |
|
406 | return _idr_rc_to_errno(rv); |
446 | new->ary[0] = idr_preload_head; |
- | 447 | idr_preload_head = new; |
|
- | 448 | idr_preload_cnt++; |
|
- | 449 | } |
|
- | 450 | } |
|
- | 451 | EXPORT_SYMBOL(idr_preload); |
|
- | 452 | ||
- | 453 | /** |
|
- | 454 | * idr_alloc - allocate new idr entry |
|
- | 455 | * @idr: the (initialized) idr |
|
- | 456 | * @ptr: pointer to be associated with the new id |
|
- | 457 | * @start: the minimum id (inclusive) |
|
- | 458 | * @end: the maximum id (exclusive, <= 0 for max) |
|
- | 459 | * @gfp_mask: memory allocation flags |
|
- | 460 | * |
|
- | 461 | * Allocate an id in [start, end) and associate it with @ptr. If no ID is |
|
- | 462 | * available in the specified range, returns -ENOSPC. On memory allocation |
|
- | 463 | * failure, returns -ENOMEM. |
|
- | 464 | * |
|
- | 465 | * Note that @end is treated as max when <= 0. This is to always allow |
|
- | 466 | * using @start + N as @end as long as N is inside integer range. |
|
- | 467 | * |
|
- | 468 | * The user is responsible for exclusively synchronizing all operations |
|
- | 469 | * which may modify @idr. However, read-only accesses such as idr_find() |
|
- | 470 | * or iteration can be performed under RCU read lock provided the user |
|
- | 471 | * destroys @ptr in RCU-safe way after removal from idr. |
|
- | 472 | */ |
|
- | 473 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) |
|
- | 474 | { |
|
- | 475 | int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ |
|
- | 476 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
|
- | 477 | int id; |
|
- | 478 | ||
- | 479 | /* sanity checks */ |
|
- | 480 | if (WARN_ON_ONCE(start < 0)) |
|
- | 481 | return -EINVAL; |
|
- | 482 | if (unlikely(max < start)) |
|
- | 483 | return -ENOSPC; |
|
- | 484 | ||
- | 485 | /* allocate id */ |
|
- | 486 | id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); |
|
- | 487 | if (unlikely(id < 0)) |
|
- | 488 | return id; |
|
- | 489 | if (unlikely(id > max)) |
|
- | 490 | return -ENOSPC; |
|
- | 491 | ||
- | 492 | idr_fill_slot(idr, ptr, id, pa); |
|
Line 407... | Line 493... | ||
407 | *id = rv; |
493 | return id; |
408 | return 0; |
494 | } |
409 | } |
495 | EXPORT_SYMBOL_GPL(idr_alloc); |
410 | 496 | ||
Line 416... | Line 502... | ||
416 | } |
502 | } |
Line 417... | Line 503... | ||
417 | 503 | ||
418 | static void sub_remove(struct idr *idp, int shift, int id) |
504 | static void sub_remove(struct idr *idp, int shift, int id) |
419 | { |
505 | { |
420 | struct idr_layer *p = idp->top; |
506 | struct idr_layer *p = idp->top; |
421 | struct idr_layer **pa[MAX_LEVEL]; |
507 | struct idr_layer **pa[MAX_IDR_LEVEL + 1]; |
422 | struct idr_layer ***paa = &pa[0]; |
508 | struct idr_layer ***paa = &pa[0]; |
423 | struct idr_layer *to_free; |
509 | struct idr_layer *to_free; |
Line 424... | Line 510... | ||
424 | int n; |
510 | int n; |
425 | 511 | ||
Line 426... | Line 512... | ||
426 | *paa = NULL; |
512 | *paa = NULL; |
427 | *++paa = &idp->top; |
513 | *++paa = &idp->top; |
428 | 514 | ||
429 | while ((shift > 0) && p) { |
515 | while ((shift > 0) && p) { |
430 | n = (id >> shift) & IDR_MASK; |
516 | n = (id >> shift) & IDR_MASK; |
431 | __clear_bit(n, &p->bitmap); |
517 | __clear_bit(n, p->bitmap); |
432 | *++paa = &p->ary[n]; |
518 | *++paa = &p->ary[n]; |
433 | p = p->ary[n]; |
519 | p = p->ary[n]; |
434 | shift -= IDR_BITS; |
520 | shift -= IDR_BITS; |
435 | } |
521 | } |
436 | n = id & IDR_MASK; |
522 | n = id & IDR_MASK; |
437 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
523 | if (likely(p != NULL && test_bit(n, p->bitmap))) { |
438 | __clear_bit(n, &p->bitmap); |
524 | __clear_bit(n, p->bitmap); |
439 | rcu_assign_pointer(p->ary[n], NULL); |
525 | rcu_assign_pointer(p->ary[n], NULL); |
440 | to_free = NULL; |
526 | to_free = NULL; |
441 | while(*paa && ! --((**paa)->count)){ |
527 | while(*paa && ! --((**paa)->count)){ |
442 | if (to_free) |
528 | if (to_free) |
443 | free_layer(to_free); |
529 | free_layer(idp, to_free); |
444 | to_free = **paa; |
530 | to_free = **paa; |
445 | **paa-- = NULL; |
531 | **paa-- = NULL; |
446 | } |
532 | } |
447 | if (!*paa) |
533 | if (!*paa) |
448 | idp->layers = 0; |
534 | idp->layers = 0; |
449 | if (to_free) |
535 | if (to_free) |
450 | free_layer(to_free); |
536 | free_layer(idp, to_free); |
Line 451... | Line 537... | ||
451 | } else |
537 | } else |
Line 460... | Line 546... | ||
460 | void idr_remove(struct idr *idp, int id) |
546 | void idr_remove(struct idr *idp, int id) |
461 | { |
547 | { |
462 | struct idr_layer *p; |
548 | struct idr_layer *p; |
463 | struct idr_layer *to_free; |
549 | struct idr_layer *to_free; |
Line 464... | Line 550... | ||
464 | 550 | ||
465 | /* Mask off upper bits we don't use for the search. */ |
551 | /* see comment in idr_find_slowpath() */ |
- | 552 | if (WARN_ON_ONCE(id < 0)) |
|
Line 466... | Line 553... | ||
466 | id &= MAX_ID_MASK; |
553 | return; |
467 | 554 | ||
468 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
555 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
469 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
556 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
Line 476... | Line 563... | ||
476 | */ |
563 | */ |
477 | to_free = idp->top; |
564 | to_free = idp->top; |
478 | p = idp->top->ary[0]; |
565 | p = idp->top->ary[0]; |
479 | rcu_assign_pointer(idp->top, p); |
566 | rcu_assign_pointer(idp->top, p); |
480 | --idp->layers; |
567 | --idp->layers; |
481 | to_free->bitmap = to_free->count = 0; |
568 | to_free->count = 0; |
- | 569 | bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
|
482 | free_layer(to_free); |
570 | free_layer(idp, to_free); |
483 | } |
571 | } |
484 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
572 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
485 | p = get_from_free_list(idp); |
573 | p = get_from_free_list(idp); |
486 | /* |
574 | /* |
487 | * Note: we don't call the rcu callback here, since the only |
575 | * Note: we don't call the rcu callback here, since the only |
488 | * layers that fall into the freelist are those that have been |
576 | * layers that fall into the freelist are those that have been |
489 | * preallocated. |
577 | * preallocated. |
490 | */ |
578 | */ |
491 | kfree(p); |
579 | kfree(p); |
492 | } |
580 | } |
493 | return; |
581 | return; |
494 | } |
582 | } |
- | 583 | EXPORT_SYMBOL(idr_remove); |
|
Line 495... | Line -... | ||
495 | - | ||
496 | - | ||
497 | /** |
- | |
498 | * idr_remove_all - remove all ids from the given idr tree |
- | |
499 | * @idp: idr handle |
- | |
500 | * |
- | |
501 | * idr_destroy() only frees up unused, cached idp_layers, but this |
- | |
502 | * function will remove all id mappings and leave all idp_layers |
- | |
503 | * unused. |
- | |
504 | * |
- | |
505 | * A typical clean-up sequence for objects stored in an idr tree will |
- | |
506 | * use idr_for_each() to free all objects, if necessay, then |
- | |
507 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
- | |
508 | * up the cached idr_layers. |
- | |
509 | */ |
584 | |
510 | void idr_remove_all(struct idr *idp) |
585 | void __idr_remove_all(struct idr *idp) |
511 | { |
586 | { |
512 | int n, id, max; |
587 | int n, id, max; |
513 | int bt_mask; |
588 | int bt_mask; |
514 | struct idr_layer *p; |
589 | struct idr_layer *p; |
515 | struct idr_layer *pa[MAX_LEVEL]; |
590 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
Line 516... | Line 591... | ||
516 | struct idr_layer **paa = &pa[0]; |
591 | struct idr_layer **paa = &pa[0]; |
517 | 592 | ||
518 | n = idp->layers * IDR_BITS; |
593 | n = idp->layers * IDR_BITS; |
519 | p = idp->top; |
594 | p = idp->top; |
Line 520... | Line 595... | ||
520 | rcu_assign_pointer(idp->top, NULL); |
595 | rcu_assign_pointer(idp->top, NULL); |
521 | max = 1 << n; |
596 | max = idr_max(idp->layers); |
522 | 597 | ||
523 | id = 0; |
598 | id = 0; |
524 | while (id < max) { |
599 | while (id >= 0 && id <= max) { |
525 | while (n > IDR_BITS && p) { |
600 | while (n > IDR_BITS && p) { |
526 | n -= IDR_BITS; |
601 | n -= IDR_BITS; |
Line 531... | Line 606... | ||
531 | bt_mask = id; |
606 | bt_mask = id; |
532 | id += 1 << n; |
607 | id += 1 << n; |
533 | /* Get the highest bit that the above add changed from 0->1. */ |
608 | /* Get the highest bit that the above add changed from 0->1. */ |
534 | while (n < fls(id ^ bt_mask)) { |
609 | while (n < fls(id ^ bt_mask)) { |
535 | if (p) |
610 | if (p) |
536 | free_layer(p); |
611 | free_layer(idp, p); |
537 | n += IDR_BITS; |
612 | n += IDR_BITS; |
538 | p = *--paa; |
613 | p = *--paa; |
539 | } |
614 | } |
540 | } |
615 | } |
541 | idp->layers = 0; |
616 | idp->layers = 0; |
542 | } |
617 | } |
- | 618 | EXPORT_SYMBOL(__idr_remove_all); |
|
Line 543... | Line 619... | ||
543 | 619 | ||
544 | /** |
620 | /** |
545 | * idr_destroy - release all cached layers within an idr tree |
621 | * idr_destroy - release all cached layers within an idr tree |
- | 622 | * @idp: idr handle |
|
- | 623 | * |
|
- | 624 | * Free all id mappings and all idp_layers. After this function, @idp is |
|
- | 625 | * completely unused and can be freed / recycled. The caller is |
|
- | 626 | * responsible for ensuring that no one else accesses @idp during or after |
|
- | 627 | * idr_destroy(). |
|
- | 628 | * |
|
- | 629 | * A typical clean-up sequence for objects stored in an idr tree will use |
|
- | 630 | * idr_for_each() to free all objects, if necessay, then idr_destroy() to |
|
546 | * @idp: idr handle |
631 | * free up the id mappings and cached idr_layers. |
547 | */ |
632 | */ |
548 | void idr_destroy(struct idr *idp) |
633 | void idr_destroy(struct idr *idp) |
- | 634 | { |
|
- | 635 | __idr_remove_all(idp); |
|
549 | { |
636 | |
550 | while (idp->id_free_cnt) { |
637 | while (idp->id_free_cnt) { |
551 | struct idr_layer *p = get_from_free_list(idp); |
638 | struct idr_layer *p = get_from_free_list(idp); |
552 | kfree(p); |
639 | kfree(p); |
553 | } |
640 | } |
- | 641 | } |
|
Line 554... | Line -... | ||
554 | } |
- | |
555 | - | ||
556 | - | ||
557 | /** |
- | |
558 | * idr_find - return pointer for given id |
- | |
559 | * @idp: idr handle |
- | |
560 | * @id: lookup key |
- | |
561 | * |
- | |
562 | * Return the pointer given the id it has been registered with. A %NULL |
- | |
563 | * return indicates that @id is not valid or you passed %NULL in |
- | |
564 | * idr_get_new(). |
- | |
565 | * |
- | |
566 | * This function can be called under rcu_read_lock(), given that the leaf |
- | |
567 | * pointers lifetimes are correctly managed. |
642 | EXPORT_SYMBOL(idr_destroy); |
568 | */ |
643 | |
569 | void *idr_find(struct idr *idp, int id) |
644 | void *idr_find_slowpath(struct idr *idp, int id) |
570 | { |
645 | { |
Line -... | Line 646... | ||
- | 646 | int n; |
|
- | 647 | struct idr_layer *p; |
|
- | 648 | ||
- | 649 | /* |
|
- | 650 | * If @id is negative, idr_find() used to ignore the sign bit and |
|
- | 651 | * performed lookup with the rest of bits, which is weird and can |
|
- | 652 | * lead to very obscure bugs. We're now returning NULL for all |
|
- | 653 | * negative IDs but just in case somebody was depending on the sign |
|
- | 654 | * bit being ignored, let's trigger WARN_ON_ONCE() so that they can |
|
- | 655 | * be detected and fixed. WARN_ON_ONCE() can later be removed. |
|
- | 656 | */ |
|
571 | int n; |
657 | if (WARN_ON_ONCE(id < 0)) |
572 | struct idr_layer *p; |
658 | return NULL; |
573 | 659 | ||
574 | p = rcu_dereference(idp->top); |
660 | p = rcu_dereference_raw(idp->top); |
Line 575... | Line -... | ||
575 | if (!p) |
- | |
576 | return NULL; |
- | |
577 | n = (p->layer+1) * IDR_BITS; |
- | |
578 | 661 | if (!p) |
|
579 | /* Mask off upper bits we don't use for the search. */ |
662 | return NULL; |
580 | id &= MAX_ID_MASK; |
663 | n = (p->layer+1) * IDR_BITS; |
Line 581... | Line 664... | ||
581 | 664 | ||
582 | if (id >= (1 << n)) |
665 | if (id > idr_max(p->layer + 1)) |
583 | return NULL; |
666 | return NULL; |
584 | BUG_ON(n == 0); |
667 | BUG_ON(n == 0); |
585 | 668 | ||
586 | while (n > 0 && p) { |
669 | while (n > 0 && p) { |
587 | n -= IDR_BITS; |
670 | n -= IDR_BITS; |
- | 671 | BUG_ON(n != p->layer*IDR_BITS); |
|
Line 588... | Line 672... | ||
588 | BUG_ON(n != p->layer*IDR_BITS); |
672 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
589 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
673 | } |
590 | } |
674 | return((void *)p); |
591 | return((void *)p); |
675 | } |
Line 613... | Line 697... | ||
613 | int idr_for_each(struct idr *idp, |
697 | int idr_for_each(struct idr *idp, |
614 | int (*fn)(int id, void *p, void *data), void *data) |
698 | int (*fn)(int id, void *p, void *data), void *data) |
615 | { |
699 | { |
616 | int n, id, max, error = 0; |
700 | int n, id, max, error = 0; |
617 | struct idr_layer *p; |
701 | struct idr_layer *p; |
618 | struct idr_layer *pa[MAX_LEVEL]; |
702 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
619 | struct idr_layer **paa = &pa[0]; |
703 | struct idr_layer **paa = &pa[0]; |
Line 620... | Line 704... | ||
620 | 704 | ||
621 | n = idp->layers * IDR_BITS; |
705 | n = idp->layers * IDR_BITS; |
622 | p = rcu_dereference(idp->top); |
706 | p = rcu_dereference_raw(idp->top); |
Line 623... | Line 707... | ||
623 | max = 1 << n; |
707 | max = idr_max(idp->layers); |
624 | 708 | ||
625 | id = 0; |
709 | id = 0; |
626 | while (id < max) { |
710 | while (id >= 0 && id <= max) { |
627 | while (n > 0 && p) { |
711 | while (n > 0 && p) { |
628 | n -= IDR_BITS; |
712 | n -= IDR_BITS; |
629 | *paa++ = p; |
713 | *paa++ = p; |
Line 630... | Line 714... | ||
630 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
714 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
631 | } |
715 | } |
632 | 716 | ||
Line 653... | Line 737... | ||
653 | * @nextidp: pointer to lookup key |
737 | * @nextidp: pointer to lookup key |
654 | * |
738 | * |
655 | * Returns pointer to registered object with id, which is next number to |
739 | * Returns pointer to registered object with id, which is next number to |
656 | * given id. After being looked up, *@nextidp will be updated for the next |
740 | * given id. After being looked up, *@nextidp will be updated for the next |
657 | * iteration. |
741 | * iteration. |
- | 742 | * |
|
- | 743 | * This function can be called under rcu_read_lock(), given that the leaf |
|
- | 744 | * pointers lifetimes are correctly managed. |
|
658 | */ |
745 | */ |
659 | - | ||
660 | void *idr_get_next(struct idr *idp, int *nextidp) |
746 | void *idr_get_next(struct idr *idp, int *nextidp) |
661 | { |
747 | { |
662 | struct idr_layer *p, *pa[MAX_LEVEL]; |
748 | struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; |
663 | struct idr_layer **paa = &pa[0]; |
749 | struct idr_layer **paa = &pa[0]; |
664 | int id = *nextidp; |
750 | int id = *nextidp; |
665 | int n, max; |
751 | int n, max; |
Line 666... | Line 752... | ||
666 | 752 | ||
667 | /* find first ent */ |
- | |
668 | n = idp->layers * IDR_BITS; |
- | |
669 | max = 1 << n; |
753 | /* find first ent */ |
670 | p = rcu_dereference(idp->top); |
754 | p = rcu_dereference_raw(idp->top); |
671 | if (!p) |
755 | if (!p) |
- | 756 | return NULL; |
|
- | 757 | n = (p->layer + 1) * IDR_BITS; |
|
Line 672... | Line 758... | ||
672 | return NULL; |
758 | max = idr_max(p->layer + 1); |
673 | 759 | ||
674 | while (id < max) { |
760 | while (id >= 0 && id <= max) { |
675 | while (n > 0 && p) { |
761 | while (n > 0 && p) { |
676 | n -= IDR_BITS; |
762 | n -= IDR_BITS; |
677 | *paa++ = p; |
763 | *paa++ = p; |
Line 678... | Line 764... | ||
678 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
764 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
679 | } |
765 | } |
680 | 766 | ||
681 | if (p) { |
767 | if (p) { |
Line -... | Line 768... | ||
- | 768 | *nextidp = id; |
|
- | 769 | return p; |
|
- | 770 | } |
|
- | 771 | ||
- | 772 | /* |
|
- | 773 | * Proceed to the next layer at the current level. Unlike |
|
- | 774 | * idr_for_each(), @id isn't guaranteed to be aligned to |
|
682 | *nextidp = id; |
775 | * layer boundary at this point and adding 1 << n may |
683 | return p; |
776 | * incorrectly skip IDs. Make sure we jump to the |
684 | } |
777 | * beginning of the next layer using round_up(). |
685 | 778 | */ |
|
686 | id += 1 << n; |
779 | id = round_up(id + 1, 1 << n); |
687 | while (n < fls(id)) { |
780 | while (n < fls(id)) { |
688 | n += IDR_BITS; |
781 | n += IDR_BITS; |
689 | p = *--paa; |
782 | p = *--paa; |
690 | } |
- | |
- | 783 | } |
|
Line 691... | Line 784... | ||
691 | } |
784 | } |
692 | return NULL; |
785 | return NULL; |
693 | } |
786 | } |
Line 709... | Line 802... | ||
709 | void *idr_replace(struct idr *idp, void *ptr, int id) |
802 | void *idr_replace(struct idr *idp, void *ptr, int id) |
710 | { |
803 | { |
711 | int n; |
804 | int n; |
712 | struct idr_layer *p, *old_p; |
805 | struct idr_layer *p, *old_p; |
Line -... | Line 806... | ||
- | 806 | ||
- | 807 | /* see comment in idr_find_slowpath() */ |
|
- | 808 | if (WARN_ON_ONCE(id < 0)) |
|
- | 809 | return ERR_PTR(-EINVAL); |
|
713 | 810 | ||
714 | p = idp->top; |
811 | p = idp->top; |
715 | if (!p) |
812 | if (!p) |
Line 716... | Line 813... | ||
716 | return ERR_PTR(-EINVAL); |
813 | return ERR_PTR(-EINVAL); |
Line 717... | Line -... | ||
717 | - | ||
718 | n = (p->layer+1) * IDR_BITS; |
- | |
719 | 814 | ||
720 | id &= MAX_ID_MASK; |
815 | n = (p->layer+1) * IDR_BITS; |
Line 721... | Line 816... | ||
721 | 816 | ||
722 | if (id >= (1 << n)) |
817 | if (id >= (1 << n)) |
723 | return ERR_PTR(-EINVAL); |
818 | return ERR_PTR(-EINVAL); |
724 | 819 | ||
725 | n -= IDR_BITS; |
820 | n -= IDR_BITS; |
Line 726... | Line 821... | ||
726 | while ((n > 0) && p) { |
821 | while ((n > 0) && p) { |
727 | p = p->ary[(id >> n) & IDR_MASK]; |
822 | p = p->ary[(id >> n) & IDR_MASK]; |
728 | n -= IDR_BITS; |
823 | n -= IDR_BITS; |
Line 729... | Line 824... | ||
729 | } |
824 | } |
730 | 825 | ||
Line 757... | Line 852... | ||
757 | * to the rest of the functions. |
852 | * to the rest of the functions. |
758 | */ |
853 | */ |
759 | void idr_init(struct idr *idp) |
854 | void idr_init(struct idr *idp) |
760 | { |
855 | { |
761 | memset(idp, 0, sizeof(struct idr)); |
856 | memset(idp, 0, sizeof(struct idr)); |
762 | // spin_lock_init(&idp->lock); |
857 | spin_lock_init(&idp->lock); |
763 | } |
858 | } |
- | 859 | EXPORT_SYMBOL(idr_init); |
|
Line 764... | Line 860... | ||
764 | 860 | ||
Line 765... | Line 861... | ||
765 | #if 0 |
861 | #if 0 |
- | 862 | ||
766 | 863 | /** |
|
767 | /* |
864 | * DOC: IDA description |
768 | * IDA - IDR based ID allocator |
865 | * IDA - IDR based ID allocator |
769 | * |
866 | * |
770 | * This is id allocator without id -> pointer translation. Memory |
867 | * This is id allocator without id -> pointer translation. Memory |
Line 811... | Line 908... | ||
811 | 908 | ||
812 | /* allocate free_bitmap */ |
909 | /* allocate free_bitmap */ |
813 | if (!ida->free_bitmap) { |
910 | if (!ida->free_bitmap) { |
Line 814... | Line 911... | ||
814 | struct ida_bitmap *bitmap; |
911 | struct ida_bitmap *bitmap; |
815 | 912 | ||
816 | bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask); |
913 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); |
Line 817... | Line 914... | ||
817 | if (!bitmap) |
914 | if (!bitmap) |
818 | return 0; |
915 | return 0; |
Line 839... | Line 936... | ||
839 | * |
936 | * |
840 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
937 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
841 | */ |
938 | */ |
842 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
939 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
843 | { |
940 | { |
844 | struct idr_layer *pa[MAX_LEVEL]; |
941 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
845 | struct ida_bitmap *bitmap; |
942 | struct ida_bitmap *bitmap; |
846 | unsigned long flags; |
943 | unsigned long flags; |
847 | int idr_id = starting_id / IDA_BITMAP_BITS; |
944 | int idr_id = starting_id / IDA_BITMAP_BITS; |
848 | int offset = starting_id % IDA_BITMAP_BITS; |
945 | int offset = starting_id % IDA_BITMAP_BITS; |
849 | int t, id; |
946 | int t, id; |
Line 850... | Line 947... | ||
850 | 947 | ||
851 | restart: |
948 | restart: |
852 | /* get vacant slot */ |
949 | /* get vacant slot */ |
853 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
950 | t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); |
854 | if (t < 0) |
951 | if (t < 0) |
Line 855... | Line 952... | ||
855 | return _idr_rc_to_errno(t); |
952 | return t == -ENOMEM ? -EAGAIN : t; |
856 | 953 | ||
Line 857... | Line 954... | ||
857 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
954 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
858 | return -ENOSPC; |
955 | return -ENOSPC; |
859 | 956 | ||
Line 886... | Line 983... | ||
886 | offset = 0; |
983 | offset = 0; |
887 | goto restart; |
984 | goto restart; |
888 | } |
985 | } |
Line 889... | Line 986... | ||
889 | 986 | ||
890 | id = idr_id * IDA_BITMAP_BITS + t; |
987 | id = idr_id * IDA_BITMAP_BITS + t; |
891 | if (id >= MAX_ID_BIT) |
988 | if (id >= MAX_IDR_BIT) |
Line 892... | Line 989... | ||
892 | return -ENOSPC; |
989 | return -ENOSPC; |
893 | 990 | ||
894 | __set_bit(t, bitmap->bitmap); |
991 | __set_bit(t, bitmap->bitmap); |
Line 911... | Line 1008... | ||
911 | return 0; |
1008 | return 0; |
912 | } |
1009 | } |
913 | EXPORT_SYMBOL(ida_get_new_above); |
1010 | EXPORT_SYMBOL(ida_get_new_above); |
Line 914... | Line 1011... | ||
914 | 1011 | ||
915 | /** |
- | |
916 | * ida_get_new - allocate new ID |
- | |
917 | * @ida: idr handle |
- | |
918 | * @p_id: pointer to the allocated handle |
- | |
919 | * |
- | |
920 | * Allocate new ID. It should be called with any required locks. |
- | |
921 | * |
- | |
922 | * If memory is required, it will return %-EAGAIN, you should unlock |
- | |
923 | * and go back to the idr_pre_get() call. If the idr is full, it will |
- | |
924 | * return %-ENOSPC. |
- | |
925 | * |
- | |
926 | * @p_id returns a value in the range %0 ... %0x7fffffff. |
- | |
927 | */ |
- | |
928 | int ida_get_new(struct ida *ida, int *p_id) |
- | |
929 | { |
- | |
930 | return ida_get_new_above(ida, 0, p_id); |
- | |
931 | } |
- | |
932 | EXPORT_SYMBOL(ida_get_new); |
- | |
933 | - | ||
934 | /** |
1012 | /** |
935 | * ida_remove - remove the given ID |
1013 | * ida_remove - remove the given ID |
936 | * @ida: ida handle |
1014 | * @ida: ida handle |
937 | * @id: ID to free |
1015 | * @id: ID to free |
938 | */ |
1016 | */ |
Line 946... | Line 1024... | ||
946 | struct ida_bitmap *bitmap; |
1024 | struct ida_bitmap *bitmap; |
Line 947... | Line 1025... | ||
947 | 1025 | ||
948 | /* clear full bits while looking up the leaf idr_layer */ |
1026 | /* clear full bits while looking up the leaf idr_layer */ |
949 | while ((shift > 0) && p) { |
1027 | while ((shift > 0) && p) { |
950 | n = (idr_id >> shift) & IDR_MASK; |
1028 | n = (idr_id >> shift) & IDR_MASK; |
951 | __clear_bit(n, &p->bitmap); |
1029 | __clear_bit(n, p->bitmap); |
952 | p = p->ary[n]; |
1030 | p = p->ary[n]; |
953 | shift -= IDR_BITS; |
1031 | shift -= IDR_BITS; |
Line 954... | Line 1032... | ||
954 | } |
1032 | } |
955 | 1033 | ||
Line 956... | Line 1034... | ||
956 | if (p == NULL) |
1034 | if (p == NULL) |
957 | goto err; |
1035 | goto err; |
Line 958... | Line 1036... | ||
958 | 1036 | ||
959 | n = idr_id & IDR_MASK; |
1037 | n = idr_id & IDR_MASK; |
960 | __clear_bit(n, &p->bitmap); |
1038 | __clear_bit(n, p->bitmap); |
Line 961... | Line 1039... | ||
961 | 1039 | ||
962 | bitmap = (void *)p->ary[n]; |
1040 | bitmap = (void *)p->ary[n]; |
963 | if (!test_bit(offset, bitmap->bitmap)) |
1041 | if (!test_bit(offset, bitmap->bitmap)) |
964 | goto err; |
1042 | goto err; |
965 | 1043 | ||
966 | /* update bitmap and remove it if empty */ |
1044 | /* update bitmap and remove it if empty */ |
967 | __clear_bit(offset, bitmap->bitmap); |
1045 | __clear_bit(offset, bitmap->bitmap); |
Line 968... | Line 1046... | ||
968 | if (--bitmap->nr_busy == 0) { |
1046 | if (--bitmap->nr_busy == 0) { |
Line 1005... | Line 1083... | ||
1005 | } |
1083 | } |
1006 | EXPORT_SYMBOL(ida_init); |
1084 | EXPORT_SYMBOL(ida_init); |
Line 1007... | Line 1085... | ||
1007 | 1085 | ||
- | 1086 | ||
- | 1087 | #endif |
|
- | 1088 | ||
- | 1089 | ||
- | 1090 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
|
- | 1091 | { |
|
- | 1092 | const unsigned long *p = addr; |
|
- | 1093 | unsigned long result = 0; |
|
- | 1094 | unsigned long tmp; |
|
- | 1095 | ||
- | 1096 | while (size & ~(BITS_PER_LONG-1)) { |
|
- | 1097 | if ((tmp = *(p++))) |
|
- | 1098 | goto found; |
|
- | 1099 | result += BITS_PER_LONG; |
|
- | 1100 | size -= BITS_PER_LONG; |
|
- | 1101 | } |
|
- | 1102 | if (!size) |
|
- | 1103 | return result; |
|
- | 1104 | ||
- | 1105 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
|
- | 1106 | if (tmp == 0UL) /* Are any bits set? */ |
|
- | 1107 | return result + size; /* Nope. */ |
|
- | 1108 | found: |
|
- | 1109 | return result + __ffs(tmp); |
|
- | 1110 | } |
|
- | 1111 | ||
- | 1112 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
|
- | 1113 | unsigned long offset) |
|
- | 1114 | { |
|
- | 1115 | const unsigned long *p = addr + BITOP_WORD(offset); |
|
- | 1116 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
|
- | 1117 | unsigned long tmp; |
|
- | 1118 | ||
- | 1119 | if (offset >= size) |
|
- | 1120 | return size; |
|
- | 1121 | size -= result; |
|
- | 1122 | offset %= BITS_PER_LONG; |
|
- | 1123 | if (offset) { |
|
- | 1124 | tmp = *(p++); |
|
- | 1125 | tmp &= (~0UL << offset); |
|
- | 1126 | if (size < BITS_PER_LONG) |
|
- | 1127 | goto found_first; |
|
- | 1128 | if (tmp) |
|
- | 1129 | goto found_middle; |
|
- | 1130 | size -= BITS_PER_LONG; |
|
- | 1131 | result += BITS_PER_LONG; |
|
- | 1132 | } |
|
- | 1133 | while (size & ~(BITS_PER_LONG-1)) { |
|
- | 1134 | if ((tmp = *(p++))) |
|
- | 1135 | goto found_middle; |
|
- | 1136 | result += BITS_PER_LONG; |
|
- | 1137 | size -= BITS_PER_LONG; |
|
- | 1138 | } |
|
- | 1139 | if (!size) |
|
- | 1140 | return result; |
|
- | 1141 | tmp = *p; |
|
- | 1142 | ||
- | 1143 | found_first: |
|
- | 1144 | tmp &= (~0UL >> (BITS_PER_LONG - size)); |
|
- | 1145 | if (tmp == 0UL) /* Are any bits set? */ |
|
- | 1146 | return result + size; /* Nope. */ |
|
- | 1147 | found_middle: |
|
- | 1148 | return result + __ffs(tmp); |
|
- | 1149 | } |
|
- | 1150 | ||
- | 1151 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
|
- | 1152 | unsigned long offset) |
|
- | 1153 | { |
|
- | 1154 | const unsigned long *p = addr + BITOP_WORD(offset); |
|
- | 1155 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
|
- | 1156 | unsigned long tmp; |
|
- | 1157 | ||
- | 1158 | if (offset >= size) |
|
- | 1159 | return size; |
|
- | 1160 | size -= result; |
|
- | 1161 | offset %= BITS_PER_LONG; |
|
- | 1162 | if (offset) { |
|
- | 1163 | tmp = *(p++); |
|
- | 1164 | tmp |= ~0UL >> (BITS_PER_LONG - offset); |
|
- | 1165 | if (size < BITS_PER_LONG) |
|
- | 1166 | goto found_first; |
|
- | 1167 | if (~tmp) |
|
- | 1168 | goto found_middle; |
|
- | 1169 | size -= BITS_PER_LONG; |
|
- | 1170 | result += BITS_PER_LONG; |
|
- | 1171 | } |
|
- | 1172 | while (size & ~(BITS_PER_LONG-1)) { |
|
- | 1173 | if (~(tmp = *(p++))) |
|
- | 1174 | goto found_middle; |
|
- | 1175 | result += BITS_PER_LONG; |
|
- | 1176 | size -= BITS_PER_LONG; |
|
- | 1177 | } |
|
- | 1178 | if (!size) |
|
- | 1179 | return result; |
|
- | 1180 | tmp = *p; |
|
- | 1181 | ||
- | 1182 | found_first: |
|
- | 1183 | tmp |= ~0UL << size; |
|
- | 1184 | if (tmp == ~0UL) /* Are any bits zero? */ |
|
- | 1185 | return result + size; /* Nope. */ |
|
- | 1186 | found_middle: |
|
- | 1187 | return result + ffz(tmp); |
|
- | 1188 | } |
|
- | 1189 | ||
- | 1190 | unsigned int hweight32(unsigned int w) |
|
- | 1191 | { |
|
- | 1192 | unsigned int res = w - ((w >> 1) & 0x55555555); |
|
- | 1193 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
|
- | 1194 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
|
- | 1195 | res = res + (res >> 8); |
|
- | 1196 | return (res + (res >> 16)) & 0x000000FF; |