Rev 4065 | Rev 5056 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4065 | Rev 4103 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
4 | * Distributed under the GNU GPL license version 2. |
4 | * Distributed under the GNU GPL license version 2. |
5 | * |
5 | * |
6 | * Modified by George Anzinger to reuse immediately and to use |
6 | * Modified by George Anzinger to reuse immediately and to use |
7 | * find bit instructions. Also removed _irq on spinlocks. |
7 | * find bit instructions. Also removed _irq on spinlocks. |
8 | * |
8 | * |
9 | * Modified by Nadia Derbey to make it RCU safe. |
9 | * Modified by Nadia Derbey to make it RCU safe. |
10 | * |
10 | * |
11 | * Small id to pointer translation service. |
11 | * Small id to pointer translation service. |
12 | * |
12 | * |
13 | * It uses a radix tree like structure as a sparse array indexed |
13 | * It uses a radix tree like structure as a sparse array indexed |
14 | * by the id to obtain the pointer. The bitmap makes allocating |
14 | * by the id to obtain the pointer. The bitmap makes allocating |
15 | * a new id quick. |
15 | * a new id quick. |
16 | * |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a |
17 | * You call it to allocate an id (an int) an associate with that id a |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
19 | * id to a user for him to pass back at a later time. You then pass |
19 | * id to a user for him to pass back at a later time. You then pass |
20 | * that id to this code and it returns your pointer. |
20 | * that id to this code and it returns your pointer. |
21 | 21 | ||
22 | * You can release ids at any time. When all ids are released, most of |
22 | * You can release ids at any time. When all ids are released, most of |
23 | * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
23 | * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
24 | * don't need to go to the memory "store" during an id allocate, just |
24 | * don't need to go to the memory "store" during an id allocate, just |
25 | * so you don't need to be too concerned about locking and conflicts |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. |
26 | * with the slab allocator. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | //#include |
34 | //#include |
- | 35 | ||
- | 36 | static inline void * __must_check ERR_PTR(long error) |
|
- | 37 | { |
|
- | 38 | return (void *) error; |
|
- | 39 | } |
|
35 | 40 | ||
36 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
41 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
37 | unsigned long offset); |
42 | unsigned long offset); |
38 | 43 | ||
39 | 44 | ||
40 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
45 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
41 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) |
46 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) |
42 | 47 | ||
43 | /* Leave the possibility of an incomplete final layer */ |
48 | /* Leave the possibility of an incomplete final layer */ |
44 | #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) |
49 | #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) |
45 | 50 | ||
46 | /* Number of id_layer structs to leave in free list */ |
51 | /* Number of id_layer structs to leave in free list */ |
47 | #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) |
52 | #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) |
48 | 53 | ||
49 | static struct idr_layer *idr_preload_head; |
54 | static struct idr_layer *idr_preload_head; |
50 | static int idr_preload_cnt; |
55 | static int idr_preload_cnt; |
- | 56 | ||
51 | 57 | static DEFINE_SPINLOCK(simple_ida_lock); |
|
52 | 58 | ||
53 | /* the maximum ID which can be allocated given idr->layers */ |
59 | /* the maximum ID which can be allocated given idr->layers */ |
54 | static int idr_max(int layers) |
60 | static int idr_max(int layers) |
55 | { |
61 | { |
56 | int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); |
62 | int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); |
57 | 63 | ||
58 | return (1 << bits) - 1; |
64 | return (1 << bits) - 1; |
59 | } |
65 | } |
60 | 66 | ||
61 | /* |
67 | /* |
62 | * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is |
68 | * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is |
63 | * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and |
69 | * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and |
64 | * so on. |
70 | * so on. |
65 | */ |
71 | */ |
66 | static int idr_layer_prefix_mask(int layer) |
72 | static int idr_layer_prefix_mask(int layer) |
67 | { |
73 | { |
68 | return ~idr_max(layer + 1); |
74 | return ~idr_max(layer + 1); |
69 | } |
75 | } |
70 | 76 | ||
71 | static struct idr_layer *get_from_free_list(struct idr *idp) |
77 | static struct idr_layer *get_from_free_list(struct idr *idp) |
72 | { |
78 | { |
73 | struct idr_layer *p; |
79 | struct idr_layer *p; |
74 | unsigned long flags; |
80 | unsigned long flags; |
75 | 81 | ||
76 | spin_lock_irqsave(&idp->lock, flags); |
82 | spin_lock_irqsave(&idp->lock, flags); |
77 | if ((p = idp->id_free)) { |
83 | if ((p = idp->id_free)) { |
78 | idp->id_free = p->ary[0]; |
84 | idp->id_free = p->ary[0]; |
79 | idp->id_free_cnt--; |
85 | idp->id_free_cnt--; |
80 | p->ary[0] = NULL; |
86 | p->ary[0] = NULL; |
81 | } |
87 | } |
82 | spin_unlock_irqrestore(&idp->lock, flags); |
88 | spin_unlock_irqrestore(&idp->lock, flags); |
83 | return(p); |
89 | return(p); |
84 | } |
90 | } |
85 | 91 | ||
86 | /** |
92 | /** |
87 | * idr_layer_alloc - allocate a new idr_layer |
93 | * idr_layer_alloc - allocate a new idr_layer |
88 | * @gfp_mask: allocation mask |
94 | * @gfp_mask: allocation mask |
89 | * @layer_idr: optional idr to allocate from |
95 | * @layer_idr: optional idr to allocate from |
90 | * |
96 | * |
91 | * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch |
97 | * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch |
92 | * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch |
98 | * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch |
93 | * an idr_layer from @idr->id_free. |
99 | * an idr_layer from @idr->id_free. |
94 | * |
100 | * |
95 | * @layer_idr is to maintain backward compatibility with the old alloc |
101 | * @layer_idr is to maintain backward compatibility with the old alloc |
96 | * interface - idr_pre_get() and idr_get_new*() - and will be removed |
102 | * interface - idr_pre_get() and idr_get_new*() - and will be removed |
97 | * together with per-pool preload buffer. |
103 | * together with per-pool preload buffer. |
98 | */ |
104 | */ |
99 | static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) |
105 | static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) |
100 | { |
106 | { |
101 | struct idr_layer *new; |
107 | struct idr_layer *new; |
102 | 108 | ||
103 | /* this is the old path, bypass to get_from_free_list() */ |
109 | /* this is the old path, bypass to get_from_free_list() */ |
104 | if (layer_idr) |
110 | if (layer_idr) |
105 | return get_from_free_list(layer_idr); |
111 | return get_from_free_list(layer_idr); |
106 | 112 | ||
107 | /* try to allocate directly from kmem_cache */ |
113 | /* try to allocate directly from kmem_cache */ |
108 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
114 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
109 | if (new) |
115 | if (new) |
110 | return new; |
116 | return new; |
111 | 117 | ||
112 | 118 | ||
113 | new = idr_preload_head; |
119 | new = idr_preload_head; |
114 | if (new) { |
120 | if (new) { |
115 | idr_preload_head = new->ary[0]; |
121 | idr_preload_head = new->ary[0]; |
116 | idr_preload_cnt--; |
122 | idr_preload_cnt--; |
117 | new->ary[0] = NULL; |
123 | new->ary[0] = NULL; |
118 | } |
124 | } |
119 | preempt_enable(); |
125 | preempt_enable(); |
120 | return new; |
126 | return new; |
121 | } |
127 | } |
122 | 128 | ||
123 | static void idr_layer_rcu_free(struct rcu_head *head) |
129 | static void idr_layer_rcu_free(struct rcu_head *head) |
124 | { |
130 | { |
125 | struct idr_layer *layer; |
131 | struct idr_layer *layer; |
126 | 132 | ||
127 | layer = container_of(head, struct idr_layer, rcu_head); |
133 | layer = container_of(head, struct idr_layer, rcu_head); |
128 | kfree(layer); |
134 | kfree(layer); |
129 | } |
135 | } |
130 | 136 | ||
131 | static inline void free_layer(struct idr *idr, struct idr_layer *p) |
137 | static inline void free_layer(struct idr *idr, struct idr_layer *p) |
132 | { |
138 | { |
133 | if (idr->hint && idr->hint == p) |
139 | if (idr->hint && idr->hint == p) |
134 | RCU_INIT_POINTER(idr->hint, NULL); |
140 | RCU_INIT_POINTER(idr->hint, NULL); |
135 | idr_layer_rcu_free(&p->rcu_head); |
141 | idr_layer_rcu_free(&p->rcu_head); |
136 | } |
142 | } |
137 | 143 | ||
138 | /* only called when idp->lock is held */ |
144 | /* only called when idp->lock is held */ |
139 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
145 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
140 | { |
146 | { |
141 | p->ary[0] = idp->id_free; |
147 | p->ary[0] = idp->id_free; |
142 | idp->id_free = p; |
148 | idp->id_free = p; |
143 | idp->id_free_cnt++; |
149 | idp->id_free_cnt++; |
144 | } |
150 | } |
145 | 151 | ||
146 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
152 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
147 | { |
153 | { |
148 | unsigned long flags; |
154 | unsigned long flags; |
149 | 155 | ||
150 | /* |
156 | /* |
151 | * Depends on the return element being zeroed. |
157 | * Depends on the return element being zeroed. |
152 | */ |
158 | */ |
153 | spin_lock_irqsave(&idp->lock, flags); |
159 | spin_lock_irqsave(&idp->lock, flags); |
154 | __move_to_free_list(idp, p); |
160 | __move_to_free_list(idp, p); |
155 | spin_unlock_irqrestore(&idp->lock, flags); |
161 | spin_unlock_irqrestore(&idp->lock, flags); |
156 | } |
162 | } |
157 | 163 | ||
158 | static void idr_mark_full(struct idr_layer **pa, int id) |
164 | static void idr_mark_full(struct idr_layer **pa, int id) |
159 | { |
165 | { |
160 | struct idr_layer *p = pa[0]; |
166 | struct idr_layer *p = pa[0]; |
161 | int l = 0; |
167 | int l = 0; |
162 | 168 | ||
163 | __set_bit(id & IDR_MASK, p->bitmap); |
169 | __set_bit(id & IDR_MASK, p->bitmap); |
164 | /* |
170 | /* |
165 | * If this layer is full mark the bit in the layer above to |
171 | * If this layer is full mark the bit in the layer above to |
166 | * show that this part of the radix tree is full. This may |
172 | * show that this part of the radix tree is full. This may |
167 | * complete the layer above and require walking up the radix |
173 | * complete the layer above and require walking up the radix |
168 | * tree. |
174 | * tree. |
169 | */ |
175 | */ |
170 | while (bitmap_full(p->bitmap, IDR_SIZE)) { |
176 | while (bitmap_full(p->bitmap, IDR_SIZE)) { |
171 | if (!(p = pa[++l])) |
177 | if (!(p = pa[++l])) |
172 | break; |
178 | break; |
173 | id = id >> IDR_BITS; |
179 | id = id >> IDR_BITS; |
174 | __set_bit((id & IDR_MASK), p->bitmap); |
180 | __set_bit((id & IDR_MASK), p->bitmap); |
175 | } |
181 | } |
176 | } |
182 | } |
177 | 183 | ||
178 | int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
184 | int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
179 | { |
185 | { |
180 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
186 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
181 | struct idr_layer *new; |
187 | struct idr_layer *new; |
182 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
188 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
183 | if (new == NULL) |
189 | if (new == NULL) |
184 | return (0); |
190 | return (0); |
185 | move_to_free_list(idp, new); |
191 | move_to_free_list(idp, new); |
186 | } |
192 | } |
187 | return 1; |
193 | return 1; |
188 | } |
194 | } |
189 | EXPORT_SYMBOL(__idr_pre_get); |
195 | EXPORT_SYMBOL(__idr_pre_get); |
190 | 196 | ||
191 | /** |
197 | /** |
192 | * sub_alloc - try to allocate an id without growing the tree depth |
198 | * sub_alloc - try to allocate an id without growing the tree depth |
193 | * @idp: idr handle |
199 | * @idp: idr handle |
194 | * @starting_id: id to start search at |
200 | * @starting_id: id to start search at |
195 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer |
201 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer |
196 | * @gfp_mask: allocation mask for idr_layer_alloc() |
202 | * @gfp_mask: allocation mask for idr_layer_alloc() |
197 | * @layer_idr: optional idr passed to idr_layer_alloc() |
203 | * @layer_idr: optional idr passed to idr_layer_alloc() |
198 | * |
204 | * |
199 | * Allocate an id in range [@starting_id, INT_MAX] from @idp without |
205 | * Allocate an id in range [@starting_id, INT_MAX] from @idp without |
200 | * growing its depth. Returns |
206 | * growing its depth. Returns |
201 | * |
207 | * |
202 | * the allocated id >= 0 if successful, |
208 | * the allocated id >= 0 if successful, |
203 | * -EAGAIN if the tree needs to grow for allocation to succeed, |
209 | * -EAGAIN if the tree needs to grow for allocation to succeed, |
204 | * -ENOSPC if the id space is exhausted, |
210 | * -ENOSPC if the id space is exhausted, |
205 | * -ENOMEM if more idr_layers need to be allocated. |
211 | * -ENOMEM if more idr_layers need to be allocated. |
206 | */ |
212 | */ |
207 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, |
213 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, |
208 | gfp_t gfp_mask, struct idr *layer_idr) |
214 | gfp_t gfp_mask, struct idr *layer_idr) |
209 | { |
215 | { |
210 | int n, m, sh; |
216 | int n, m, sh; |
211 | struct idr_layer *p, *new; |
217 | struct idr_layer *p, *new; |
212 | int l, id, oid; |
218 | int l, id, oid; |
213 | 219 | ||
214 | id = *starting_id; |
220 | id = *starting_id; |
215 | restart: |
221 | restart: |
216 | p = idp->top; |
222 | p = idp->top; |
217 | l = idp->layers; |
223 | l = idp->layers; |
218 | pa[l--] = NULL; |
224 | pa[l--] = NULL; |
219 | while (1) { |
225 | while (1) { |
220 | /* |
226 | /* |
221 | * We run around this while until we reach the leaf node... |
227 | * We run around this while until we reach the leaf node... |
222 | */ |
228 | */ |
223 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
229 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
224 | m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); |
230 | m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); |
225 | if (m == IDR_SIZE) { |
231 | if (m == IDR_SIZE) { |
226 | /* no space available go back to previous layer. */ |
232 | /* no space available go back to previous layer. */ |
227 | l++; |
233 | l++; |
228 | oid = id; |
234 | oid = id; |
229 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
235 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
230 | 236 | ||
231 | /* if already at the top layer, we need to grow */ |
237 | /* if already at the top layer, we need to grow */ |
232 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
238 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
233 | *starting_id = id; |
239 | *starting_id = id; |
234 | return -EAGAIN; |
240 | return -EAGAIN; |
235 | } |
241 | } |
236 | p = pa[l]; |
242 | p = pa[l]; |
237 | BUG_ON(!p); |
243 | BUG_ON(!p); |
238 | 244 | ||
239 | /* If we need to go up one layer, continue the |
245 | /* If we need to go up one layer, continue the |
240 | * loop; otherwise, restart from the top. |
246 | * loop; otherwise, restart from the top. |
241 | */ |
247 | */ |
242 | sh = IDR_BITS * (l + 1); |
248 | sh = IDR_BITS * (l + 1); |
243 | if (oid >> sh == id >> sh) |
249 | if (oid >> sh == id >> sh) |
244 | continue; |
250 | continue; |
245 | else |
251 | else |
246 | goto restart; |
252 | goto restart; |
247 | } |
253 | } |
248 | if (m != n) { |
254 | if (m != n) { |
249 | sh = IDR_BITS*l; |
255 | sh = IDR_BITS*l; |
250 | id = ((id >> sh) ^ n ^ m) << sh; |
256 | id = ((id >> sh) ^ n ^ m) << sh; |
251 | } |
257 | } |
252 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
258 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
253 | return -ENOSPC; |
259 | return -ENOSPC; |
254 | if (l == 0) |
260 | if (l == 0) |
255 | break; |
261 | break; |
256 | /* |
262 | /* |
257 | * Create the layer below if it is missing. |
263 | * Create the layer below if it is missing. |
258 | */ |
264 | */ |
259 | if (!p->ary[m]) { |
265 | if (!p->ary[m]) { |
260 | new = idr_layer_alloc(gfp_mask, layer_idr); |
266 | new = idr_layer_alloc(gfp_mask, layer_idr); |
261 | if (!new) |
267 | if (!new) |
262 | return -ENOMEM; |
268 | return -ENOMEM; |
263 | new->layer = l-1; |
269 | new->layer = l-1; |
264 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
270 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
265 | rcu_assign_pointer(p->ary[m], new); |
271 | rcu_assign_pointer(p->ary[m], new); |
266 | p->count++; |
272 | p->count++; |
267 | } |
273 | } |
268 | pa[l--] = p; |
274 | pa[l--] = p; |
269 | p = p->ary[m]; |
275 | p = p->ary[m]; |
270 | } |
276 | } |
271 | 277 | ||
272 | pa[l] = p; |
278 | pa[l] = p; |
273 | return id; |
279 | return id; |
274 | } |
280 | } |
275 | 281 | ||
276 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
282 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
277 | struct idr_layer **pa, gfp_t gfp_mask, |
283 | struct idr_layer **pa, gfp_t gfp_mask, |
278 | struct idr *layer_idr) |
284 | struct idr *layer_idr) |
279 | { |
285 | { |
280 | struct idr_layer *p, *new; |
286 | struct idr_layer *p, *new; |
281 | int layers, v, id; |
287 | int layers, v, id; |
282 | unsigned long flags; |
288 | unsigned long flags; |
283 | 289 | ||
284 | id = starting_id; |
290 | id = starting_id; |
285 | build_up: |
291 | build_up: |
286 | p = idp->top; |
292 | p = idp->top; |
287 | layers = idp->layers; |
293 | layers = idp->layers; |
288 | if (unlikely(!p)) { |
294 | if (unlikely(!p)) { |
289 | if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) |
295 | if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) |
290 | return -ENOMEM; |
296 | return -ENOMEM; |
291 | p->layer = 0; |
297 | p->layer = 0; |
292 | layers = 1; |
298 | layers = 1; |
293 | } |
299 | } |
294 | /* |
300 | /* |
295 | * Add a new layer to the top of the tree if the requested |
301 | * Add a new layer to the top of the tree if the requested |
296 | * id is larger than the currently allocated space. |
302 | * id is larger than the currently allocated space. |
297 | */ |
303 | */ |
298 | while (id > idr_max(layers)) { |
304 | while (id > idr_max(layers)) { |
299 | layers++; |
305 | layers++; |
300 | if (!p->count) { |
306 | if (!p->count) { |
301 | /* special case: if the tree is currently empty, |
307 | /* special case: if the tree is currently empty, |
302 | * then we grow the tree by moving the top node |
308 | * then we grow the tree by moving the top node |
303 | * upwards. |
309 | * upwards. |
304 | */ |
310 | */ |
305 | p->layer++; |
311 | p->layer++; |
306 | WARN_ON_ONCE(p->prefix); |
312 | WARN_ON_ONCE(p->prefix); |
307 | continue; |
313 | continue; |
308 | } |
314 | } |
309 | if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { |
315 | if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { |
310 | /* |
316 | /* |
311 | * The allocation failed. If we built part of |
317 | * The allocation failed. If we built part of |
312 | * the structure tear it down. |
318 | * the structure tear it down. |
313 | */ |
319 | */ |
314 | spin_lock_irqsave(&idp->lock, flags); |
320 | spin_lock_irqsave(&idp->lock, flags); |
315 | for (new = p; p && p != idp->top; new = p) { |
321 | for (new = p; p && p != idp->top; new = p) { |
316 | p = p->ary[0]; |
322 | p = p->ary[0]; |
317 | new->ary[0] = NULL; |
323 | new->ary[0] = NULL; |
318 | new->count = 0; |
324 | new->count = 0; |
319 | bitmap_clear(new->bitmap, 0, IDR_SIZE); |
325 | bitmap_clear(new->bitmap, 0, IDR_SIZE); |
320 | __move_to_free_list(idp, new); |
326 | __move_to_free_list(idp, new); |
321 | } |
327 | } |
322 | spin_unlock_irqrestore(&idp->lock, flags); |
328 | spin_unlock_irqrestore(&idp->lock, flags); |
323 | return -ENOMEM; |
329 | return -ENOMEM; |
324 | } |
330 | } |
325 | new->ary[0] = p; |
331 | new->ary[0] = p; |
326 | new->count = 1; |
332 | new->count = 1; |
327 | new->layer = layers-1; |
333 | new->layer = layers-1; |
328 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
334 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
329 | if (bitmap_full(p->bitmap, IDR_SIZE)) |
335 | if (bitmap_full(p->bitmap, IDR_SIZE)) |
330 | __set_bit(0, new->bitmap); |
336 | __set_bit(0, new->bitmap); |
331 | p = new; |
337 | p = new; |
332 | } |
338 | } |
333 | rcu_assign_pointer(idp->top, p); |
339 | rcu_assign_pointer(idp->top, p); |
334 | idp->layers = layers; |
340 | idp->layers = layers; |
335 | v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); |
341 | v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); |
336 | if (v == -EAGAIN) |
342 | if (v == -EAGAIN) |
337 | goto build_up; |
343 | goto build_up; |
338 | return(v); |
344 | return(v); |
339 | } |
345 | } |
340 | 346 | ||
341 | /* |
347 | /* |
342 | * @id and @pa are from a successful allocation from idr_get_empty_slot(). |
348 | * @id and @pa are from a successful allocation from idr_get_empty_slot(). |
343 | * Install the user pointer @ptr and mark the slot full. |
349 | * Install the user pointer @ptr and mark the slot full. |
344 | */ |
350 | */ |
345 | static void idr_fill_slot(struct idr *idr, void *ptr, int id, |
351 | static void idr_fill_slot(struct idr *idr, void *ptr, int id, |
346 | struct idr_layer **pa) |
352 | struct idr_layer **pa) |
347 | { |
353 | { |
348 | /* update hint used for lookup, cleared from free_layer() */ |
354 | /* update hint used for lookup, cleared from free_layer() */ |
349 | rcu_assign_pointer(idr->hint, pa[0]); |
355 | rcu_assign_pointer(idr->hint, pa[0]); |
350 | 356 | ||
351 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
357 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
352 | pa[0]->count++; |
358 | pa[0]->count++; |
353 | idr_mark_full(pa, id); |
359 | idr_mark_full(pa, id); |
354 | } |
360 | } |
355 | 361 | ||
356 | int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
362 | int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
357 | { |
363 | { |
358 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
364 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
359 | int rv; |
365 | int rv; |
360 | 366 | ||
361 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
367 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
362 | if (rv < 0) |
368 | if (rv < 0) |
363 | return rv == -ENOMEM ? -EAGAIN : rv; |
369 | return rv == -ENOMEM ? -EAGAIN : rv; |
364 | 370 | ||
365 | idr_fill_slot(idp, ptr, rv, pa); |
371 | idr_fill_slot(idp, ptr, rv, pa); |
366 | *id = rv; |
372 | *id = rv; |
367 | return 0; |
373 | return 0; |
368 | } |
374 | } |
369 | EXPORT_SYMBOL(__idr_get_new_above); |
375 | EXPORT_SYMBOL(__idr_get_new_above); |
370 | 376 | ||
371 | /** |
377 | /** |
372 | * idr_preload - preload for idr_alloc() |
378 | * idr_preload - preload for idr_alloc() |
373 | * @gfp_mask: allocation mask to use for preloading |
379 | * @gfp_mask: allocation mask to use for preloading |
374 | * |
380 | * |
375 | * Preload per-cpu layer buffer for idr_alloc(). Can only be used from |
381 | * Preload per-cpu layer buffer for idr_alloc(). Can only be used from |
376 | * process context and each idr_preload() invocation should be matched with |
382 | * process context and each idr_preload() invocation should be matched with |
377 | * idr_preload_end(). Note that preemption is disabled while preloaded. |
383 | * idr_preload_end(). Note that preemption is disabled while preloaded. |
378 | * |
384 | * |
379 | * The first idr_alloc() in the preloaded section can be treated as if it |
385 | * The first idr_alloc() in the preloaded section can be treated as if it |
380 | * were invoked with @gfp_mask used for preloading. This allows using more |
386 | * were invoked with @gfp_mask used for preloading. This allows using more |
381 | * permissive allocation masks for idrs protected by spinlocks. |
387 | * permissive allocation masks for idrs protected by spinlocks. |
382 | * |
388 | * |
383 | * For example, if idr_alloc() below fails, the failure can be treated as |
389 | * For example, if idr_alloc() below fails, the failure can be treated as |
384 | * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. |
390 | * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. |
385 | * |
391 | * |
386 | * idr_preload(GFP_KERNEL); |
392 | * idr_preload(GFP_KERNEL); |
387 | * spin_lock(lock); |
393 | * spin_lock(lock); |
388 | * |
394 | * |
389 | * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); |
395 | * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); |
390 | * |
396 | * |
391 | * spin_unlock(lock); |
397 | * spin_unlock(lock); |
392 | * idr_preload_end(); |
398 | * idr_preload_end(); |
393 | * if (id < 0) |
399 | * if (id < 0) |
394 | * error; |
400 | * error; |
395 | */ |
401 | */ |
396 | void idr_preload(gfp_t gfp_mask) |
402 | void idr_preload(gfp_t gfp_mask) |
397 | { |
403 | { |
398 | 404 | ||
399 | /* |
405 | /* |
400 | * idr_alloc() is likely to succeed w/o full idr_layer buffer and |
406 | * idr_alloc() is likely to succeed w/o full idr_layer buffer and |
401 | * return value from idr_alloc() needs to be checked for failure |
407 | * return value from idr_alloc() needs to be checked for failure |
402 | * anyway. Silently give up if allocation fails. The caller can |
408 | * anyway. Silently give up if allocation fails. The caller can |
403 | * treat failures from idr_alloc() as if idr_alloc() were called |
409 | * treat failures from idr_alloc() as if idr_alloc() were called |
404 | * with @gfp_mask which should be enough. |
410 | * with @gfp_mask which should be enough. |
405 | */ |
411 | */ |
406 | while (idr_preload_cnt < MAX_IDR_FREE) { |
412 | while (idr_preload_cnt < MAX_IDR_FREE) { |
407 | struct idr_layer *new; |
413 | struct idr_layer *new; |
408 | 414 | ||
409 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
415 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
410 | if (!new) |
416 | if (!new) |
411 | break; |
417 | break; |
412 | 418 | ||
413 | /* link the new one to per-cpu preload list */ |
419 | /* link the new one to per-cpu preload list */ |
414 | new->ary[0] = idr_preload_head; |
420 | new->ary[0] = idr_preload_head; |
415 | idr_preload_head = new; |
421 | idr_preload_head = new; |
416 | idr_preload_cnt++; |
422 | idr_preload_cnt++; |
417 | } |
423 | } |
418 | } |
424 | } |
419 | EXPORT_SYMBOL(idr_preload); |
425 | EXPORT_SYMBOL(idr_preload); |
420 | 426 | ||
421 | /** |
427 | /** |
422 | * idr_alloc - allocate new idr entry |
428 | * idr_alloc - allocate new idr entry |
423 | * @idr: the (initialized) idr |
429 | * @idr: the (initialized) idr |
424 | * @ptr: pointer to be associated with the new id |
430 | * @ptr: pointer to be associated with the new id |
425 | * @start: the minimum id (inclusive) |
431 | * @start: the minimum id (inclusive) |
426 | * @end: the maximum id (exclusive, <= 0 for max) |
432 | * @end: the maximum id (exclusive, <= 0 for max) |
427 | * @gfp_mask: memory allocation flags |
433 | * @gfp_mask: memory allocation flags |
428 | * |
434 | * |
429 | * Allocate an id in [start, end) and associate it with @ptr. If no ID is |
435 | * Allocate an id in [start, end) and associate it with @ptr. If no ID is |
430 | * available in the specified range, returns -ENOSPC. On memory allocation |
436 | * available in the specified range, returns -ENOSPC. On memory allocation |
431 | * failure, returns -ENOMEM. |
437 | * failure, returns -ENOMEM. |
432 | * |
438 | * |
433 | * Note that @end is treated as max when <= 0. This is to always allow |
439 | * Note that @end is treated as max when <= 0. This is to always allow |
434 | * using @start + N as @end as long as N is inside integer range. |
440 | * using @start + N as @end as long as N is inside integer range. |
435 | * |
441 | * |
436 | * The user is responsible for exclusively synchronizing all operations |
442 | * The user is responsible for exclusively synchronizing all operations |
437 | * which may modify @idr. However, read-only accesses such as idr_find() |
443 | * which may modify @idr. However, read-only accesses such as idr_find() |
438 | * or iteration can be performed under RCU read lock provided the user |
444 | * or iteration can be performed under RCU read lock provided the user |
439 | * destroys @ptr in RCU-safe way after removal from idr. |
445 | * destroys @ptr in RCU-safe way after removal from idr. |
440 | */ |
446 | */ |
441 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) |
447 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) |
442 | { |
448 | { |
443 | int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ |
449 | int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ |
444 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
450 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
445 | int id; |
451 | int id; |
446 | 452 | ||
447 | /* sanity checks */ |
453 | /* sanity checks */ |
448 | if (WARN_ON_ONCE(start < 0)) |
454 | if (WARN_ON_ONCE(start < 0)) |
449 | return -EINVAL; |
455 | return -EINVAL; |
450 | if (unlikely(max < start)) |
456 | if (unlikely(max < start)) |
451 | return -ENOSPC; |
457 | return -ENOSPC; |
452 | 458 | ||
453 | /* allocate id */ |
459 | /* allocate id */ |
454 | id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); |
460 | id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); |
455 | if (unlikely(id < 0)) |
461 | if (unlikely(id < 0)) |
456 | return id; |
462 | return id; |
457 | if (unlikely(id > max)) |
463 | if (unlikely(id > max)) |
458 | return -ENOSPC; |
464 | return -ENOSPC; |
459 | 465 | ||
460 | idr_fill_slot(idr, ptr, id, pa); |
466 | idr_fill_slot(idr, ptr, id, pa); |
461 | return id; |
467 | return id; |
462 | } |
468 | } |
463 | EXPORT_SYMBOL_GPL(idr_alloc); |
469 | EXPORT_SYMBOL_GPL(idr_alloc); |
- | 470 | ||
- | 471 | /** |
|
- | 472 | * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion |
|
- | 473 | * @idr: the (initialized) idr |
|
- | 474 | * @ptr: pointer to be associated with the new id |
|
- | 475 | * @start: the minimum id (inclusive) |
|
- | 476 | * @end: the maximum id (exclusive, <= 0 for max) |
|
- | 477 | * @gfp_mask: memory allocation flags |
|
- | 478 | * |
|
- | 479 | * Essentially the same as idr_alloc, but prefers to allocate progressively |
|
- | 480 | * higher ids if it can. If the "cur" counter wraps, then it will start again |
|
- | 481 | * at the "start" end of the range and allocate one that has already been used. |
|
- | 482 | */ |
|
- | 483 | int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, |
|
- | 484 | gfp_t gfp_mask) |
|
- | 485 | { |
|
- | 486 | int id; |
|
- | 487 | ||
- | 488 | id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask); |
|
- | 489 | if (id == -ENOSPC) |
|
- | 490 | id = idr_alloc(idr, ptr, start, end, gfp_mask); |
|
- | 491 | ||
- | 492 | if (likely(id >= 0)) |
|
- | 493 | idr->cur = id + 1; |
|
- | 494 | return id; |
|
- | 495 | } |
|
- | 496 | EXPORT_SYMBOL(idr_alloc_cyclic); |
|
464 | 497 | ||
465 | static void idr_remove_warning(int id) |
498 | static void idr_remove_warning(int id) |
466 | { |
499 | { |
467 | WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); |
500 | WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); |
468 | } |
501 | } |
469 | 502 | ||
470 | static void sub_remove(struct idr *idp, int shift, int id) |
503 | static void sub_remove(struct idr *idp, int shift, int id) |
471 | { |
504 | { |
472 | struct idr_layer *p = idp->top; |
505 | struct idr_layer *p = idp->top; |
473 | struct idr_layer **pa[MAX_IDR_LEVEL + 1]; |
506 | struct idr_layer **pa[MAX_IDR_LEVEL + 1]; |
474 | struct idr_layer ***paa = &pa[0]; |
507 | struct idr_layer ***paa = &pa[0]; |
475 | struct idr_layer *to_free; |
508 | struct idr_layer *to_free; |
476 | int n; |
509 | int n; |
477 | 510 | ||
478 | *paa = NULL; |
511 | *paa = NULL; |
479 | *++paa = &idp->top; |
512 | *++paa = &idp->top; |
480 | 513 | ||
481 | while ((shift > 0) && p) { |
514 | while ((shift > 0) && p) { |
482 | n = (id >> shift) & IDR_MASK; |
515 | n = (id >> shift) & IDR_MASK; |
483 | __clear_bit(n, p->bitmap); |
516 | __clear_bit(n, p->bitmap); |
484 | *++paa = &p->ary[n]; |
517 | *++paa = &p->ary[n]; |
485 | p = p->ary[n]; |
518 | p = p->ary[n]; |
486 | shift -= IDR_BITS; |
519 | shift -= IDR_BITS; |
487 | } |
520 | } |
488 | n = id & IDR_MASK; |
521 | n = id & IDR_MASK; |
489 | if (likely(p != NULL && test_bit(n, p->bitmap))) { |
522 | if (likely(p != NULL && test_bit(n, p->bitmap))) { |
490 | __clear_bit(n, p->bitmap); |
523 | __clear_bit(n, p->bitmap); |
491 | rcu_assign_pointer(p->ary[n], NULL); |
524 | rcu_assign_pointer(p->ary[n], NULL); |
492 | to_free = NULL; |
525 | to_free = NULL; |
493 | while(*paa && ! --((**paa)->count)){ |
526 | while(*paa && ! --((**paa)->count)){ |
494 | if (to_free) |
527 | if (to_free) |
495 | free_layer(idp, to_free); |
528 | free_layer(idp, to_free); |
496 | to_free = **paa; |
529 | to_free = **paa; |
497 | **paa-- = NULL; |
530 | **paa-- = NULL; |
498 | } |
531 | } |
499 | if (!*paa) |
532 | if (!*paa) |
500 | idp->layers = 0; |
533 | idp->layers = 0; |
501 | if (to_free) |
534 | if (to_free) |
502 | free_layer(idp, to_free); |
535 | free_layer(idp, to_free); |
503 | } else |
536 | } else |
504 | idr_remove_warning(id); |
537 | idr_remove_warning(id); |
505 | } |
538 | } |
506 | 539 | ||
507 | /** |
540 | /** |
508 | * idr_remove - remove the given id and free its slot |
541 | * idr_remove - remove the given id and free its slot |
509 | * @idp: idr handle |
542 | * @idp: idr handle |
510 | * @id: unique key |
543 | * @id: unique key |
511 | */ |
544 | */ |
512 | void idr_remove(struct idr *idp, int id) |
545 | void idr_remove(struct idr *idp, int id) |
513 | { |
546 | { |
514 | struct idr_layer *p; |
547 | struct idr_layer *p; |
515 | struct idr_layer *to_free; |
548 | struct idr_layer *to_free; |
516 | 549 | ||
517 | if (id < 0) |
550 | if (id < 0) |
518 | return; |
551 | return; |
519 | 552 | ||
520 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
553 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
521 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
554 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
522 | idp->top->ary[0]) { |
555 | idp->top->ary[0]) { |
523 | /* |
556 | /* |
524 | * Single child at leftmost slot: we can shrink the tree. |
557 | * Single child at leftmost slot: we can shrink the tree. |
525 | * This level is not needed anymore since when layers are |
558 | * This level is not needed anymore since when layers are |
526 | * inserted, they are inserted at the top of the existing |
559 | * inserted, they are inserted at the top of the existing |
527 | * tree. |
560 | * tree. |
528 | */ |
561 | */ |
529 | to_free = idp->top; |
562 | to_free = idp->top; |
530 | p = idp->top->ary[0]; |
563 | p = idp->top->ary[0]; |
531 | rcu_assign_pointer(idp->top, p); |
564 | rcu_assign_pointer(idp->top, p); |
532 | --idp->layers; |
565 | --idp->layers; |
533 | to_free->count = 0; |
566 | to_free->count = 0; |
534 | bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
567 | bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
535 | free_layer(idp, to_free); |
568 | free_layer(idp, to_free); |
536 | } |
569 | } |
537 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
570 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
538 | p = get_from_free_list(idp); |
571 | p = get_from_free_list(idp); |
539 | /* |
572 | /* |
540 | * Note: we don't call the rcu callback here, since the only |
573 | * Note: we don't call the rcu callback here, since the only |
541 | * layers that fall into the freelist are those that have been |
574 | * layers that fall into the freelist are those that have been |
542 | * preallocated. |
575 | * preallocated. |
543 | */ |
576 | */ |
544 | kfree(p); |
577 | kfree(p); |
545 | } |
578 | } |
546 | return; |
579 | return; |
547 | } |
580 | } |
548 | EXPORT_SYMBOL(idr_remove); |
581 | EXPORT_SYMBOL(idr_remove); |
549 | 582 | ||
550 | void __idr_remove_all(struct idr *idp) |
583 | void __idr_remove_all(struct idr *idp) |
551 | { |
584 | { |
552 | int n, id, max; |
585 | int n, id, max; |
553 | int bt_mask; |
586 | int bt_mask; |
554 | struct idr_layer *p; |
587 | struct idr_layer *p; |
555 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
588 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
556 | struct idr_layer **paa = &pa[0]; |
589 | struct idr_layer **paa = &pa[0]; |
557 | 590 | ||
558 | n = idp->layers * IDR_BITS; |
591 | n = idp->layers * IDR_BITS; |
559 | p = idp->top; |
592 | p = idp->top; |
560 | rcu_assign_pointer(idp->top, NULL); |
593 | rcu_assign_pointer(idp->top, NULL); |
561 | max = idr_max(idp->layers); |
594 | max = idr_max(idp->layers); |
562 | 595 | ||
563 | id = 0; |
596 | id = 0; |
564 | while (id >= 0 && id <= max) { |
597 | while (id >= 0 && id <= max) { |
565 | while (n > IDR_BITS && p) { |
598 | while (n > IDR_BITS && p) { |
566 | n -= IDR_BITS; |
599 | n -= IDR_BITS; |
567 | *paa++ = p; |
600 | *paa++ = p; |
568 | p = p->ary[(id >> n) & IDR_MASK]; |
601 | p = p->ary[(id >> n) & IDR_MASK]; |
569 | } |
602 | } |
570 | 603 | ||
571 | bt_mask = id; |
604 | bt_mask = id; |
572 | id += 1 << n; |
605 | id += 1 << n; |
573 | /* Get the highest bit that the above add changed from 0->1. */ |
606 | /* Get the highest bit that the above add changed from 0->1. */ |
574 | while (n < fls(id ^ bt_mask)) { |
607 | while (n < fls(id ^ bt_mask)) { |
575 | if (p) |
608 | if (p) |
576 | free_layer(idp, p); |
609 | free_layer(idp, p); |
577 | n += IDR_BITS; |
610 | n += IDR_BITS; |
578 | p = *--paa; |
611 | p = *--paa; |
579 | } |
612 | } |
580 | } |
613 | } |
581 | idp->layers = 0; |
614 | idp->layers = 0; |
582 | } |
615 | } |
583 | EXPORT_SYMBOL(__idr_remove_all); |
616 | EXPORT_SYMBOL(__idr_remove_all); |
584 | 617 | ||
585 | /** |
618 | /** |
586 | * idr_destroy - release all cached layers within an idr tree |
619 | * idr_destroy - release all cached layers within an idr tree |
587 | * @idp: idr handle |
620 | * @idp: idr handle |
588 | * |
621 | * |
589 | * Free all id mappings and all idp_layers. After this function, @idp is |
622 | * Free all id mappings and all idp_layers. After this function, @idp is |
590 | * completely unused and can be freed / recycled. The caller is |
623 | * completely unused and can be freed / recycled. The caller is |
591 | * responsible for ensuring that no one else accesses @idp during or after |
624 | * responsible for ensuring that no one else accesses @idp during or after |
592 | * idr_destroy(). |
625 | * idr_destroy(). |
593 | * |
626 | * |
594 | * A typical clean-up sequence for objects stored in an idr tree will use |
627 | * A typical clean-up sequence for objects stored in an idr tree will use |
595 | * idr_for_each() to free all objects, if necessay, then idr_destroy() to |
628 | * idr_for_each() to free all objects, if necessay, then idr_destroy() to |
596 | * free up the id mappings and cached idr_layers. |
629 | * free up the id mappings and cached idr_layers. |
597 | */ |
630 | */ |
598 | void idr_destroy(struct idr *idp) |
631 | void idr_destroy(struct idr *idp) |
599 | { |
632 | { |
600 | __idr_remove_all(idp); |
633 | __idr_remove_all(idp); |
601 | 634 | ||
602 | while (idp->id_free_cnt) { |
635 | while (idp->id_free_cnt) { |
603 | struct idr_layer *p = get_from_free_list(idp); |
636 | struct idr_layer *p = get_from_free_list(idp); |
604 | kfree(p); |
637 | kfree(p); |
605 | } |
638 | } |
606 | } |
639 | } |
607 | EXPORT_SYMBOL(idr_destroy); |
640 | EXPORT_SYMBOL(idr_destroy); |
608 | 641 | ||
609 | void *idr_find_slowpath(struct idr *idp, int id) |
642 | void *idr_find_slowpath(struct idr *idp, int id) |
610 | { |
643 | { |
611 | int n; |
644 | int n; |
612 | struct idr_layer *p; |
645 | struct idr_layer *p; |
613 | 646 | ||
614 | if (id < 0) |
647 | if (id < 0) |
615 | return NULL; |
648 | return NULL; |
616 | 649 | ||
617 | p = rcu_dereference_raw(idp->top); |
650 | p = rcu_dereference_raw(idp->top); |
618 | if (!p) |
651 | if (!p) |
619 | return NULL; |
652 | return NULL; |
620 | n = (p->layer+1) * IDR_BITS; |
653 | n = (p->layer+1) * IDR_BITS; |
621 | 654 | ||
622 | if (id > idr_max(p->layer + 1)) |
655 | if (id > idr_max(p->layer + 1)) |
623 | return NULL; |
656 | return NULL; |
624 | BUG_ON(n == 0); |
657 | BUG_ON(n == 0); |
625 | 658 | ||
626 | while (n > 0 && p) { |
659 | while (n > 0 && p) { |
627 | n -= IDR_BITS; |
660 | n -= IDR_BITS; |
628 | BUG_ON(n != p->layer*IDR_BITS); |
661 | BUG_ON(n != p->layer*IDR_BITS); |
629 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
662 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
630 | } |
663 | } |
631 | return((void *)p); |
664 | return((void *)p); |
632 | } |
665 | } |
633 | EXPORT_SYMBOL(idr_find_slowpath); |
666 | EXPORT_SYMBOL(idr_find_slowpath); |
634 | - | ||
635 | #if 0 |
667 | |
636 | /** |
668 | /** |
637 | * idr_for_each - iterate through all stored pointers |
669 | * idr_for_each - iterate through all stored pointers |
638 | * @idp: idr handle |
670 | * @idp: idr handle |
639 | * @fn: function to be called for each pointer |
671 | * @fn: function to be called for each pointer |
640 | * @data: data passed back to callback function |
672 | * @data: data passed back to callback function |
641 | * |
673 | * |
642 | * Iterate over the pointers registered with the given idr. The |
674 | * Iterate over the pointers registered with the given idr. The |
643 | * callback function will be called for each pointer currently |
675 | * callback function will be called for each pointer currently |
644 | * registered, passing the id, the pointer and the data pointer passed |
676 | * registered, passing the id, the pointer and the data pointer passed |
645 | * to this function. It is not safe to modify the idr tree while in |
677 | * to this function. It is not safe to modify the idr tree while in |
646 | * the callback, so functions such as idr_get_new and idr_remove are |
678 | * the callback, so functions such as idr_get_new and idr_remove are |
647 | * not allowed. |
679 | * not allowed. |
648 | * |
680 | * |
649 | * We check the return of @fn each time. If it returns anything other |
681 | * We check the return of @fn each time. If it returns anything other |
650 | * than %0, we break out and return that value. |
682 | * than %0, we break out and return that value. |
651 | * |
683 | * |
652 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
684 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
653 | */ |
685 | */ |
654 | int idr_for_each(struct idr *idp, |
686 | int idr_for_each(struct idr *idp, |
655 | int (*fn)(int id, void *p, void *data), void *data) |
687 | int (*fn)(int id, void *p, void *data), void *data) |
656 | { |
688 | { |
657 | int n, id, max, error = 0; |
689 | int n, id, max, error = 0; |
658 | struct idr_layer *p; |
690 | struct idr_layer *p; |
659 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
691 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
660 | struct idr_layer **paa = &pa[0]; |
692 | struct idr_layer **paa = &pa[0]; |
661 | 693 | ||
662 | n = idp->layers * IDR_BITS; |
694 | n = idp->layers * IDR_BITS; |
663 | p = rcu_dereference_raw(idp->top); |
695 | p = rcu_dereference_raw(idp->top); |
664 | max = idr_max(idp->layers); |
696 | max = idr_max(idp->layers); |
665 | 697 | ||
666 | id = 0; |
698 | id = 0; |
667 | while (id >= 0 && id <= max) { |
699 | while (id >= 0 && id <= max) { |
668 | while (n > 0 && p) { |
700 | while (n > 0 && p) { |
669 | n -= IDR_BITS; |
701 | n -= IDR_BITS; |
670 | *paa++ = p; |
702 | *paa++ = p; |
671 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
703 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
672 | } |
704 | } |
673 | 705 | ||
674 | if (p) { |
706 | if (p) { |
675 | error = fn(id, (void *)p, data); |
707 | error = fn(id, (void *)p, data); |
676 | if (error) |
708 | if (error) |
677 | break; |
709 | break; |
678 | } |
710 | } |
679 | 711 | ||
680 | id += 1 << n; |
712 | id += 1 << n; |
681 | while (n < fls(id)) { |
713 | while (n < fls(id)) { |
682 | n += IDR_BITS; |
714 | n += IDR_BITS; |
683 | p = *--paa; |
715 | p = *--paa; |
684 | } |
716 | } |
685 | } |
717 | } |
686 | 718 | ||
687 | return error; |
719 | return error; |
688 | } |
720 | } |
689 | EXPORT_SYMBOL(idr_for_each); |
721 | EXPORT_SYMBOL(idr_for_each); |
690 | 722 | ||
691 | /** |
723 | /** |
692 | * idr_get_next - lookup next object of id to given id. |
724 | * idr_get_next - lookup next object of id to given id. |
693 | * @idp: idr handle |
725 | * @idp: idr handle |
694 | * @nextidp: pointer to lookup key |
726 | * @nextidp: pointer to lookup key |
695 | * |
727 | * |
696 | * Returns pointer to registered object with id, which is next number to |
728 | * Returns pointer to registered object with id, which is next number to |
697 | * given id. After being looked up, *@nextidp will be updated for the next |
729 | * given id. After being looked up, *@nextidp will be updated for the next |
698 | * iteration. |
730 | * iteration. |
699 | * |
731 | * |
700 | * This function can be called under rcu_read_lock(), given that the leaf |
732 | * This function can be called under rcu_read_lock(), given that the leaf |
701 | * pointers lifetimes are correctly managed. |
733 | * pointers lifetimes are correctly managed. |
702 | */ |
734 | */ |
703 | void *idr_get_next(struct idr *idp, int *nextidp) |
735 | void *idr_get_next(struct idr *idp, int *nextidp) |
704 | { |
736 | { |
705 | struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; |
737 | struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; |
706 | struct idr_layer **paa = &pa[0]; |
738 | struct idr_layer **paa = &pa[0]; |
707 | int id = *nextidp; |
739 | int id = *nextidp; |
708 | int n, max; |
740 | int n, max; |
709 | 741 | ||
710 | /* find first ent */ |
742 | /* find first ent */ |
711 | p = rcu_dereference_raw(idp->top); |
743 | p = rcu_dereference_raw(idp->top); |
712 | if (!p) |
744 | if (!p) |
713 | return NULL; |
745 | return NULL; |
714 | n = (p->layer + 1) * IDR_BITS; |
746 | n = (p->layer + 1) * IDR_BITS; |
715 | max = idr_max(p->layer + 1); |
747 | max = idr_max(p->layer + 1); |
716 | 748 | ||
717 | while (id >= 0 && id <= max) { |
749 | while (id >= 0 && id <= max) { |
718 | while (n > 0 && p) { |
750 | while (n > 0 && p) { |
719 | n -= IDR_BITS; |
751 | n -= IDR_BITS; |
720 | *paa++ = p; |
752 | *paa++ = p; |
721 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
753 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
722 | } |
754 | } |
723 | 755 | ||
724 | if (p) { |
756 | if (p) { |
725 | *nextidp = id; |
757 | *nextidp = id; |
726 | return p; |
758 | return p; |
727 | } |
759 | } |
728 | 760 | ||
729 | /* |
761 | /* |
730 | * Proceed to the next layer at the current level. Unlike |
762 | * Proceed to the next layer at the current level. Unlike |
731 | * idr_for_each(), @id isn't guaranteed to be aligned to |
763 | * idr_for_each(), @id isn't guaranteed to be aligned to |
732 | * layer boundary at this point and adding 1 << n may |
764 | * layer boundary at this point and adding 1 << n may |
733 | * incorrectly skip IDs. Make sure we jump to the |
765 | * incorrectly skip IDs. Make sure we jump to the |
734 | * beginning of the next layer using round_up(). |
766 | * beginning of the next layer using round_up(). |
735 | */ |
767 | */ |
736 | id = round_up(id + 1, 1 << n); |
768 | id = round_up(id + 1, 1 << n); |
737 | while (n < fls(id)) { |
769 | while (n < fls(id)) { |
738 | n += IDR_BITS; |
770 | n += IDR_BITS; |
739 | p = *--paa; |
771 | p = *--paa; |
740 | } |
772 | } |
741 | } |
773 | } |
742 | return NULL; |
774 | return NULL; |
743 | } |
775 | } |
744 | EXPORT_SYMBOL(idr_get_next); |
776 | EXPORT_SYMBOL(idr_get_next); |
745 | 777 | ||
746 | 778 | ||
747 | /** |
779 | /** |
748 | * idr_replace - replace pointer for given id |
780 | * idr_replace - replace pointer for given id |
749 | * @idp: idr handle |
781 | * @idp: idr handle |
750 | * @ptr: pointer you want associated with the id |
782 | * @ptr: pointer you want associated with the id |
751 | * @id: lookup key |
783 | * @id: lookup key |
752 | * |
784 | * |
753 | * Replace the pointer registered with an id and return the old value. |
785 | * Replace the pointer registered with an id and return the old value. |
754 | * A %-ENOENT return indicates that @id was not found. |
786 | * A %-ENOENT return indicates that @id was not found. |
755 | * A %-EINVAL return indicates that @id was not within valid constraints. |
787 | * A %-EINVAL return indicates that @id was not within valid constraints. |
756 | * |
788 | * |
757 | * The caller must serialize with writers. |
789 | * The caller must serialize with writers. |
758 | */ |
790 | */ |
759 | void *idr_replace(struct idr *idp, void *ptr, int id) |
791 | void *idr_replace(struct idr *idp, void *ptr, int id) |
760 | { |
792 | { |
761 | int n; |
793 | int n; |
762 | struct idr_layer *p, *old_p; |
794 | struct idr_layer *p, *old_p; |
763 | 795 | ||
764 | if (id < 0) |
796 | if (id < 0) |
765 | return ERR_PTR(-EINVAL); |
797 | return ERR_PTR(-EINVAL); |
766 | 798 | ||
767 | p = idp->top; |
799 | p = idp->top; |
768 | if (!p) |
800 | if (!p) |
769 | return ERR_PTR(-EINVAL); |
801 | return ERR_PTR(-EINVAL); |
770 | 802 | ||
771 | n = (p->layer+1) * IDR_BITS; |
803 | n = (p->layer+1) * IDR_BITS; |
772 | 804 | ||
773 | if (id >= (1 << n)) |
805 | if (id >= (1 << n)) |
774 | return ERR_PTR(-EINVAL); |
806 | return ERR_PTR(-EINVAL); |
775 | 807 | ||
776 | n -= IDR_BITS; |
808 | n -= IDR_BITS; |
777 | while ((n > 0) && p) { |
809 | while ((n > 0) && p) { |
778 | p = p->ary[(id >> n) & IDR_MASK]; |
810 | p = p->ary[(id >> n) & IDR_MASK]; |
779 | n -= IDR_BITS; |
811 | n -= IDR_BITS; |
780 | } |
812 | } |
781 | 813 | ||
782 | n = id & IDR_MASK; |
814 | n = id & IDR_MASK; |
783 | if (unlikely(p == NULL || !test_bit(n, p->bitmap))) |
815 | if (unlikely(p == NULL || !test_bit(n, p->bitmap))) |
784 | return ERR_PTR(-ENOENT); |
816 | return ERR_PTR(-ENOENT); |
785 | 817 | ||
786 | old_p = p->ary[n]; |
818 | old_p = p->ary[n]; |
787 | rcu_assign_pointer(p->ary[n], ptr); |
819 | rcu_assign_pointer(p->ary[n], ptr); |
788 | 820 | ||
789 | return old_p; |
821 | return old_p; |
790 | } |
822 | } |
791 | EXPORT_SYMBOL(idr_replace); |
823 | EXPORT_SYMBOL(idr_replace); |
792 | - | ||
793 | - | ||
794 | #endif |
- | |
795 | - | ||
796 | 824 | ||
797 | void __init idr_init_cache(void) |
825 | void __init idr_init_cache(void) |
798 | { |
826 | { |
799 | //idr_layer_cache = kmem_cache_create("idr_layer_cache", |
827 | //idr_layer_cache = kmem_cache_create("idr_layer_cache", |
800 | // sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
828 | // sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
801 | } |
829 | } |
802 | 830 | ||
803 | /** |
831 | /** |
804 | * idr_init - initialize idr handle |
832 | * idr_init - initialize idr handle |
805 | * @idp: idr handle |
833 | * @idp: idr handle |
806 | * |
834 | * |
807 | * This function is use to set up the handle (@idp) that you will pass |
835 | * This function is use to set up the handle (@idp) that you will pass |
808 | * to the rest of the functions. |
836 | * to the rest of the functions. |
809 | */ |
837 | */ |
810 | void idr_init(struct idr *idp) |
838 | void idr_init(struct idr *idp) |
811 | { |
839 | { |
812 | memset(idp, 0, sizeof(struct idr)); |
840 | memset(idp, 0, sizeof(struct idr)); |
813 | spin_lock_init(&idp->lock); |
841 | spin_lock_init(&idp->lock); |
814 | } |
842 | } |
815 | EXPORT_SYMBOL(idr_init); |
843 | EXPORT_SYMBOL(idr_init); |
816 | 844 | ||
817 | 845 | ||
818 | /** |
846 | /** |
819 | * DOC: IDA description |
847 | * DOC: IDA description |
820 | * IDA - IDR based ID allocator |
848 | * IDA - IDR based ID allocator |
821 | * |
849 | * |
822 | * This is id allocator without id -> pointer translation. Memory |
850 | * This is id allocator without id -> pointer translation. Memory |
823 | * usage is much lower than full blown idr because each id only |
851 | * usage is much lower than full blown idr because each id only |
824 | * occupies a bit. ida uses a custom leaf node which contains |
852 | * occupies a bit. ida uses a custom leaf node which contains |
825 | * IDA_BITMAP_BITS slots. |
853 | * IDA_BITMAP_BITS slots. |
826 | * |
854 | * |
827 | * 2007-04-25 written by Tejun Heo |
855 | * 2007-04-25 written by Tejun Heo |
828 | */ |
856 | */ |
829 | 857 | ||
830 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
858 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
831 | { |
859 | { |
832 | unsigned long flags; |
860 | unsigned long flags; |
833 | 861 | ||
834 | if (!ida->free_bitmap) { |
862 | if (!ida->free_bitmap) { |
835 | spin_lock_irqsave(&ida->idr.lock, flags); |
863 | spin_lock_irqsave(&ida->idr.lock, flags); |
836 | if (!ida->free_bitmap) { |
864 | if (!ida->free_bitmap) { |
837 | ida->free_bitmap = bitmap; |
865 | ida->free_bitmap = bitmap; |
838 | bitmap = NULL; |
866 | bitmap = NULL; |
839 | } |
867 | } |
840 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
868 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
841 | } |
869 | } |
842 | 870 | ||
843 | kfree(bitmap); |
871 | kfree(bitmap); |
844 | } |
872 | } |
845 | 873 | ||
846 | /** |
874 | /** |
847 | * ida_pre_get - reserve resources for ida allocation |
875 | * ida_pre_get - reserve resources for ida allocation |
848 | * @ida: ida handle |
876 | * @ida: ida handle |
849 | * @gfp_mask: memory allocation flag |
877 | * @gfp_mask: memory allocation flag |
850 | * |
878 | * |
851 | * This function should be called prior to locking and calling the |
879 | * This function should be called prior to locking and calling the |
852 | * following function. It preallocates enough memory to satisfy the |
880 | * following function. It preallocates enough memory to satisfy the |
853 | * worst possible allocation. |
881 | * worst possible allocation. |
854 | * |
882 | * |
855 | * If the system is REALLY out of memory this function returns %0, |
883 | * If the system is REALLY out of memory this function returns %0, |
856 | * otherwise %1. |
884 | * otherwise %1. |
857 | */ |
885 | */ |
858 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
886 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
859 | { |
887 | { |
860 | /* allocate idr_layers */ |
888 | /* allocate idr_layers */ |
861 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
889 | if (!__idr_pre_get(&ida->idr, gfp_mask)) |
862 | return 0; |
890 | return 0; |
863 | 891 | ||
864 | /* allocate free_bitmap */ |
892 | /* allocate free_bitmap */ |
865 | if (!ida->free_bitmap) { |
893 | if (!ida->free_bitmap) { |
866 | struct ida_bitmap *bitmap; |
894 | struct ida_bitmap *bitmap; |
867 | 895 | ||
868 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); |
896 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); |
869 | if (!bitmap) |
897 | if (!bitmap) |
870 | return 0; |
898 | return 0; |
871 | 899 | ||
872 | free_bitmap(ida, bitmap); |
900 | free_bitmap(ida, bitmap); |
873 | } |
901 | } |
874 | 902 | ||
875 | return 1; |
903 | return 1; |
876 | } |
904 | } |
877 | EXPORT_SYMBOL(ida_pre_get); |
905 | EXPORT_SYMBOL(ida_pre_get); |
878 | 906 | ||
879 | /** |
907 | /** |
880 | * ida_get_new_above - allocate new ID above or equal to a start id |
908 | * ida_get_new_above - allocate new ID above or equal to a start id |
881 | * @ida: ida handle |
909 | * @ida: ida handle |
882 | * @starting_id: id to start search at |
910 | * @starting_id: id to start search at |
883 | * @p_id: pointer to the allocated handle |
911 | * @p_id: pointer to the allocated handle |
884 | * |
912 | * |
885 | * Allocate new ID above or equal to @starting_id. It should be called |
913 | * Allocate new ID above or equal to @starting_id. It should be called |
886 | * with any required locks. |
914 | * with any required locks. |
887 | * |
915 | * |
888 | * If memory is required, it will return %-EAGAIN, you should unlock |
916 | * If memory is required, it will return %-EAGAIN, you should unlock |
889 | * and go back to the ida_pre_get() call. If the ida is full, it will |
917 | * and go back to the ida_pre_get() call. If the ida is full, it will |
890 | * return %-ENOSPC. |
918 | * return %-ENOSPC. |
891 | * |
919 | * |
892 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
920 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
893 | */ |
921 | */ |
894 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
922 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
895 | { |
923 | { |
896 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
924 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
897 | struct ida_bitmap *bitmap; |
925 | struct ida_bitmap *bitmap; |
898 | unsigned long flags; |
926 | unsigned long flags; |
899 | int idr_id = starting_id / IDA_BITMAP_BITS; |
927 | int idr_id = starting_id / IDA_BITMAP_BITS; |
900 | int offset = starting_id % IDA_BITMAP_BITS; |
928 | int offset = starting_id % IDA_BITMAP_BITS; |
901 | int t, id; |
929 | int t, id; |
902 | 930 | ||
903 | restart: |
931 | restart: |
904 | /* get vacant slot */ |
932 | /* get vacant slot */ |
905 | t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); |
933 | t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); |
906 | if (t < 0) |
934 | if (t < 0) |
907 | return t == -ENOMEM ? -EAGAIN : t; |
935 | return t == -ENOMEM ? -EAGAIN : t; |
908 | 936 | ||
909 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
937 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
910 | return -ENOSPC; |
938 | return -ENOSPC; |
911 | 939 | ||
912 | if (t != idr_id) |
940 | if (t != idr_id) |
913 | offset = 0; |
941 | offset = 0; |
914 | idr_id = t; |
942 | idr_id = t; |
915 | 943 | ||
916 | /* if bitmap isn't there, create a new one */ |
944 | /* if bitmap isn't there, create a new one */ |
917 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
945 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
918 | if (!bitmap) { |
946 | if (!bitmap) { |
919 | spin_lock_irqsave(&ida->idr.lock, flags); |
947 | spin_lock_irqsave(&ida->idr.lock, flags); |
920 | bitmap = ida->free_bitmap; |
948 | bitmap = ida->free_bitmap; |
921 | ida->free_bitmap = NULL; |
949 | ida->free_bitmap = NULL; |
922 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
950 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
923 | 951 | ||
924 | if (!bitmap) |
952 | if (!bitmap) |
925 | return -EAGAIN; |
953 | return -EAGAIN; |
926 | 954 | ||
927 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
955 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
928 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
956 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
929 | (void *)bitmap); |
957 | (void *)bitmap); |
930 | pa[0]->count++; |
958 | pa[0]->count++; |
931 | } |
959 | } |
932 | 960 | ||
933 | /* lookup for empty slot */ |
961 | /* lookup for empty slot */ |
934 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
962 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
935 | if (t == IDA_BITMAP_BITS) { |
963 | if (t == IDA_BITMAP_BITS) { |
936 | /* no empty slot after offset, continue to the next chunk */ |
964 | /* no empty slot after offset, continue to the next chunk */ |
937 | idr_id++; |
965 | idr_id++; |
938 | offset = 0; |
966 | offset = 0; |
939 | goto restart; |
967 | goto restart; |
940 | } |
968 | } |
941 | 969 | ||
942 | id = idr_id * IDA_BITMAP_BITS + t; |
970 | id = idr_id * IDA_BITMAP_BITS + t; |
943 | if (id >= MAX_IDR_BIT) |
971 | if (id >= MAX_IDR_BIT) |
944 | return -ENOSPC; |
972 | return -ENOSPC; |
945 | 973 | ||
946 | __set_bit(t, bitmap->bitmap); |
974 | __set_bit(t, bitmap->bitmap); |
947 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
975 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
948 | idr_mark_full(pa, idr_id); |
976 | idr_mark_full(pa, idr_id); |
949 | 977 | ||
950 | *p_id = id; |
978 | *p_id = id; |
951 | 979 | ||
952 | /* Each leaf node can handle nearly a thousand slots and the |
980 | /* Each leaf node can handle nearly a thousand slots and the |
953 | * whole idea of ida is to have small memory foot print. |
981 | * whole idea of ida is to have small memory foot print. |
954 | * Throw away extra resources one by one after each successful |
982 | * Throw away extra resources one by one after each successful |
955 | * allocation. |
983 | * allocation. |
956 | */ |
984 | */ |
957 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
985 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
958 | struct idr_layer *p = get_from_free_list(&ida->idr); |
986 | struct idr_layer *p = get_from_free_list(&ida->idr); |
959 | if (p) |
987 | if (p) |
960 | kfree(p); |
988 | kfree(p); |
961 | } |
989 | } |
962 | 990 | ||
963 | return 0; |
991 | return 0; |
964 | } |
992 | } |
965 | EXPORT_SYMBOL(ida_get_new_above); |
993 | EXPORT_SYMBOL(ida_get_new_above); |
966 | 994 | ||
967 | /** |
995 | /** |
968 | * ida_remove - remove the given ID |
996 | * ida_remove - remove the given ID |
969 | * @ida: ida handle |
997 | * @ida: ida handle |
970 | * @id: ID to free |
998 | * @id: ID to free |
971 | */ |
999 | */ |
972 | void ida_remove(struct ida *ida, int id) |
1000 | void ida_remove(struct ida *ida, int id) |
973 | { |
1001 | { |
974 | struct idr_layer *p = ida->idr.top; |
1002 | struct idr_layer *p = ida->idr.top; |
975 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
1003 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
976 | int idr_id = id / IDA_BITMAP_BITS; |
1004 | int idr_id = id / IDA_BITMAP_BITS; |
977 | int offset = id % IDA_BITMAP_BITS; |
1005 | int offset = id % IDA_BITMAP_BITS; |
978 | int n; |
1006 | int n; |
979 | struct ida_bitmap *bitmap; |
1007 | struct ida_bitmap *bitmap; |
980 | 1008 | ||
981 | /* clear full bits while looking up the leaf idr_layer */ |
1009 | /* clear full bits while looking up the leaf idr_layer */ |
982 | while ((shift > 0) && p) { |
1010 | while ((shift > 0) && p) { |
983 | n = (idr_id >> shift) & IDR_MASK; |
1011 | n = (idr_id >> shift) & IDR_MASK; |
984 | __clear_bit(n, p->bitmap); |
1012 | __clear_bit(n, p->bitmap); |
985 | p = p->ary[n]; |
1013 | p = p->ary[n]; |
986 | shift -= IDR_BITS; |
1014 | shift -= IDR_BITS; |
987 | } |
1015 | } |
988 | 1016 | ||
989 | if (p == NULL) |
1017 | if (p == NULL) |
990 | goto err; |
1018 | goto err; |
991 | 1019 | ||
992 | n = idr_id & IDR_MASK; |
1020 | n = idr_id & IDR_MASK; |
993 | __clear_bit(n, p->bitmap); |
1021 | __clear_bit(n, p->bitmap); |
994 | 1022 | ||
995 | bitmap = (void *)p->ary[n]; |
1023 | bitmap = (void *)p->ary[n]; |
996 | if (!test_bit(offset, bitmap->bitmap)) |
1024 | if (!test_bit(offset, bitmap->bitmap)) |
997 | goto err; |
1025 | goto err; |
998 | 1026 | ||
999 | /* update bitmap and remove it if empty */ |
1027 | /* update bitmap and remove it if empty */ |
1000 | __clear_bit(offset, bitmap->bitmap); |
1028 | __clear_bit(offset, bitmap->bitmap); |
1001 | if (--bitmap->nr_busy == 0) { |
1029 | if (--bitmap->nr_busy == 0) { |
1002 | __set_bit(n, p->bitmap); /* to please idr_remove() */ |
1030 | __set_bit(n, p->bitmap); /* to please idr_remove() */ |
1003 | idr_remove(&ida->idr, idr_id); |
1031 | idr_remove(&ida->idr, idr_id); |
1004 | free_bitmap(ida, bitmap); |
1032 | free_bitmap(ida, bitmap); |
1005 | } |
1033 | } |
1006 | 1034 | ||
1007 | return; |
1035 | return; |
1008 | 1036 | ||
1009 | err: |
1037 | err: |
1010 | WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); |
1038 | WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); |
1011 | } |
1039 | } |
1012 | EXPORT_SYMBOL(ida_remove); |
1040 | EXPORT_SYMBOL(ida_remove); |
1013 | 1041 | ||
1014 | /** |
1042 | /** |
1015 | * ida_destroy - release all cached layers within an ida tree |
1043 | * ida_destroy - release all cached layers within an ida tree |
1016 | * @ida: ida handle |
1044 | * @ida: ida handle |
1017 | */ |
1045 | */ |
1018 | void ida_destroy(struct ida *ida) |
1046 | void ida_destroy(struct ida *ida) |
1019 | { |
1047 | { |
1020 | idr_destroy(&ida->idr); |
1048 | idr_destroy(&ida->idr); |
1021 | kfree(ida->free_bitmap); |
1049 | kfree(ida->free_bitmap); |
1022 | } |
1050 | } |
1023 | EXPORT_SYMBOL(ida_destroy); |
1051 | EXPORT_SYMBOL(ida_destroy); |
- | 1052 | ||
- | 1053 | /** |
|
- | 1054 | * ida_simple_get - get a new id. |
|
- | 1055 | * @ida: the (initialized) ida. |
|
- | 1056 | * @start: the minimum id (inclusive, < 0x8000000) |
|
- | 1057 | * @end: the maximum id (exclusive, < 0x8000000 or 0) |
|
- | 1058 | * @gfp_mask: memory allocation flags |
|
- | 1059 | * |
|
- | 1060 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. |
|
- | 1061 | * On memory allocation failure, returns -ENOMEM. |
|
- | 1062 | * |
|
- | 1063 | * Use ida_simple_remove() to get rid of an id. |
|
- | 1064 | */ |
|
- | 1065 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
|
- | 1066 | gfp_t gfp_mask) |
|
- | 1067 | { |
|
- | 1068 | int ret, id; |
|
- | 1069 | unsigned int max; |
|
- | 1070 | unsigned long flags; |
|
- | 1071 | ||
- | 1072 | BUG_ON((int)start < 0); |
|
- | 1073 | BUG_ON((int)end < 0); |
|
- | 1074 | ||
- | 1075 | if (end == 0) |
|
- | 1076 | max = 0x80000000; |
|
- | 1077 | else { |
|
- | 1078 | BUG_ON(end < start); |
|
- | 1079 | max = end - 1; |
|
- | 1080 | } |
|
- | 1081 | ||
- | 1082 | again: |
|
- | 1083 | if (!ida_pre_get(ida, gfp_mask)) |
|
- | 1084 | return -ENOMEM; |
|
- | 1085 | ||
- | 1086 | spin_lock_irqsave(&simple_ida_lock, flags); |
|
- | 1087 | ret = ida_get_new_above(ida, start, &id); |
|
- | 1088 | if (!ret) { |
|
- | 1089 | if (id > max) { |
|
- | 1090 | ida_remove(ida, id); |
|
- | 1091 | ret = -ENOSPC; |
|
- | 1092 | } else { |
|
- | 1093 | ret = id; |
|
- | 1094 | } |
|
- | 1095 | } |
|
- | 1096 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
|
- | 1097 | ||
- | 1098 | if (unlikely(ret == -EAGAIN)) |
|
- | 1099 | goto again; |
|
- | 1100 | ||
- | 1101 | return ret; |
|
- | 1102 | } |
|
- | 1103 | EXPORT_SYMBOL(ida_simple_get); |
|
- | 1104 | ||
- | 1105 | /** |
|
- | 1106 | * ida_simple_remove - remove an allocated id. |
|
- | 1107 | * @ida: the (initialized) ida. |
|
- | 1108 | * @id: the id returned by ida_simple_get. |
|
- | 1109 | */ |
|
- | 1110 | void ida_simple_remove(struct ida *ida, unsigned int id) |
|
- | 1111 | { |
|
- | 1112 | unsigned long flags; |
|
- | 1113 | ||
- | 1114 | BUG_ON((int)id < 0); |
|
- | 1115 | spin_lock_irqsave(&simple_ida_lock, flags); |
|
- | 1116 | ida_remove(ida, id); |
|
- | 1117 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
|
- | 1118 | } |
|
- | 1119 | EXPORT_SYMBOL(ida_simple_remove); |
|
1024 | 1120 | ||
1025 | /** |
1121 | /** |
1026 | * ida_init - initialize ida handle |
1122 | * ida_init - initialize ida handle |
1027 | * @ida: ida handle |
1123 | * @ida: ida handle |
1028 | * |
1124 | * |
1029 | * This function is use to set up the handle (@ida) that you will pass |
1125 | * This function is use to set up the handle (@ida) that you will pass |
1030 | * to the rest of the functions. |
1126 | * to the rest of the functions. |
1031 | */ |
1127 | */ |
1032 | void ida_init(struct ida *ida) |
1128 | void ida_init(struct ida *ida) |
1033 | { |
1129 | { |
1034 | memset(ida, 0, sizeof(struct ida)); |
1130 | memset(ida, 0, sizeof(struct ida)); |
1035 | idr_init(&ida->idr); |
1131 | idr_init(&ida->idr); |
1036 | 1132 | ||
1037 | } |
1133 | } |
1038 | EXPORT_SYMBOL(ida_init); |
1134 | EXPORT_SYMBOL(ida_init); |
1039 | 1135 | ||
1040 | 1136 | ||
1041 | 1137 | ||
1042 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
1138 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
1043 | { |
1139 | { |
1044 | const unsigned long *p = addr; |
1140 | const unsigned long *p = addr; |
1045 | unsigned long result = 0; |
1141 | unsigned long result = 0; |
1046 | unsigned long tmp; |
1142 | unsigned long tmp; |
1047 | 1143 | ||
1048 | while (size & ~(BITS_PER_LONG-1)) { |
1144 | while (size & ~(BITS_PER_LONG-1)) { |
1049 | if ((tmp = *(p++))) |
1145 | if ((tmp = *(p++))) |
1050 | goto found; |
1146 | goto found; |
1051 | result += BITS_PER_LONG; |
1147 | result += BITS_PER_LONG; |
1052 | size -= BITS_PER_LONG; |
1148 | size -= BITS_PER_LONG; |
1053 | } |
1149 | } |
1054 | if (!size) |
1150 | if (!size) |
1055 | return result; |
1151 | return result; |
1056 | 1152 | ||
1057 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
1153 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
1058 | if (tmp == 0UL) /* Are any bits set? */ |
1154 | if (tmp == 0UL) /* Are any bits set? */ |
1059 | return result + size; /* Nope. */ |
1155 | return result + size; /* Nope. */ |
1060 | found: |
1156 | found: |
1061 | return result + __ffs(tmp); |
1157 | return result + __ffs(tmp); |
1062 | } |
1158 | } |
1063 | 1159 | ||
1064 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
1160 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
1065 | unsigned long offset) |
1161 | unsigned long offset) |
1066 | { |
1162 | { |
1067 | const unsigned long *p = addr + BITOP_WORD(offset); |
1163 | const unsigned long *p = addr + BITOP_WORD(offset); |
1068 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
1164 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
1069 | unsigned long tmp; |
1165 | unsigned long tmp; |
1070 | 1166 | ||
1071 | if (offset >= size) |
1167 | if (offset >= size) |
1072 | return size; |
1168 | return size; |
1073 | size -= result; |
1169 | size -= result; |
1074 | offset %= BITS_PER_LONG; |
1170 | offset %= BITS_PER_LONG; |
1075 | if (offset) { |
1171 | if (offset) { |
1076 | tmp = *(p++); |
1172 | tmp = *(p++); |
1077 | tmp &= (~0UL << offset); |
1173 | tmp &= (~0UL << offset); |
1078 | if (size < BITS_PER_LONG) |
1174 | if (size < BITS_PER_LONG) |
1079 | goto found_first; |
1175 | goto found_first; |
1080 | if (tmp) |
1176 | if (tmp) |
1081 | goto found_middle; |
1177 | goto found_middle; |
1082 | size -= BITS_PER_LONG; |
1178 | size -= BITS_PER_LONG; |
1083 | result += BITS_PER_LONG; |
1179 | result += BITS_PER_LONG; |
1084 | } |
1180 | } |
1085 | while (size & ~(BITS_PER_LONG-1)) { |
1181 | while (size & ~(BITS_PER_LONG-1)) { |
1086 | if ((tmp = *(p++))) |
1182 | if ((tmp = *(p++))) |
1087 | goto found_middle; |
1183 | goto found_middle; |
1088 | result += BITS_PER_LONG; |
1184 | result += BITS_PER_LONG; |
1089 | size -= BITS_PER_LONG; |
1185 | size -= BITS_PER_LONG; |
1090 | } |
1186 | } |
1091 | if (!size) |
1187 | if (!size) |
1092 | return result; |
1188 | return result; |
1093 | tmp = *p; |
1189 | tmp = *p; |
1094 | 1190 | ||
1095 | found_first: |
1191 | found_first: |
1096 | tmp &= (~0UL >> (BITS_PER_LONG - size)); |
1192 | tmp &= (~0UL >> (BITS_PER_LONG - size)); |
1097 | if (tmp == 0UL) /* Are any bits set? */ |
1193 | if (tmp == 0UL) /* Are any bits set? */ |
1098 | return result + size; /* Nope. */ |
1194 | return result + size; /* Nope. */ |
1099 | found_middle: |
1195 | found_middle: |
1100 | return result + __ffs(tmp); |
1196 | return result + __ffs(tmp); |
1101 | } |
1197 | } |
1102 | 1198 | ||
1103 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
1199 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
1104 | unsigned long offset) |
1200 | unsigned long offset) |
1105 | { |
1201 | { |
1106 | const unsigned long *p = addr + BITOP_WORD(offset); |
1202 | const unsigned long *p = addr + BITOP_WORD(offset); |
1107 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
1203 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
1108 | unsigned long tmp; |
1204 | unsigned long tmp; |
1109 | 1205 | ||
1110 | if (offset >= size) |
1206 | if (offset >= size) |
1111 | return size; |
1207 | return size; |
1112 | size -= result; |
1208 | size -= result; |
1113 | offset %= BITS_PER_LONG; |
1209 | offset %= BITS_PER_LONG; |
1114 | if (offset) { |
1210 | if (offset) { |
1115 | tmp = *(p++); |
1211 | tmp = *(p++); |
1116 | tmp |= ~0UL >> (BITS_PER_LONG - offset); |
1212 | tmp |= ~0UL >> (BITS_PER_LONG - offset); |
1117 | if (size < BITS_PER_LONG) |
1213 | if (size < BITS_PER_LONG) |
1118 | goto found_first; |
1214 | goto found_first; |
1119 | if (~tmp) |
1215 | if (~tmp) |
1120 | goto found_middle; |
1216 | goto found_middle; |
1121 | size -= BITS_PER_LONG; |
1217 | size -= BITS_PER_LONG; |
1122 | result += BITS_PER_LONG; |
1218 | result += BITS_PER_LONG; |
1123 | } |
1219 | } |
1124 | while (size & ~(BITS_PER_LONG-1)) { |
1220 | while (size & ~(BITS_PER_LONG-1)) { |
1125 | if (~(tmp = *(p++))) |
1221 | if (~(tmp = *(p++))) |
1126 | goto found_middle; |
1222 | goto found_middle; |
1127 | result += BITS_PER_LONG; |
1223 | result += BITS_PER_LONG; |
1128 | size -= BITS_PER_LONG; |
1224 | size -= BITS_PER_LONG; |
1129 | } |
1225 | } |
1130 | if (!size) |
1226 | if (!size) |
1131 | return result; |
1227 | return result; |
1132 | tmp = *p; |
1228 | tmp = *p; |
1133 | 1229 | ||
1134 | found_first: |
1230 | found_first: |
1135 | tmp |= ~0UL << size; |
1231 | tmp |= ~0UL << size; |
1136 | if (tmp == ~0UL) /* Are any bits zero? */ |
1232 | if (tmp == ~0UL) /* Are any bits zero? */ |
1137 | return result + size; /* Nope. */ |
1233 | return result + size; /* Nope. */ |
1138 | found_middle: |
1234 | found_middle: |
1139 | return result + ffz(tmp); |
1235 | return result + ffz(tmp); |
1140 | } |
1236 | } |
1141 | 1237 | ||
1142 | unsigned int hweight32(unsigned int w) |
1238 | unsigned int hweight32(unsigned int w) |
1143 | { |
1239 | { |
1144 | unsigned int res = w - ((w >> 1) & 0x55555555); |
1240 | unsigned int res = w - ((w >> 1) & 0x55555555); |
1145 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
1241 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
1146 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
1242 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
1147 | res = res + (res >> 8); |
1243 | res = res + (res >> 8); |
1148 | return (res + (res >> 16)) & 0x000000FF; |
1244 | return (res + (res >> 16)) & 0x000000FF; |
1149 | }><>>>><>>><>>>><>><>=>>><>=>>>><>=>>>>>=>=>>>>>><>><>><>>><>><> |
1245 | }><>>>><>>>>>>=>>>>><>>>><>><>=>>><>=>>>><>=>>=>>>>=>=>>>>>><>><>><>>><>><> |