Rev 1412 | Rev 3391 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1412 | Rev 2966 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
4 | * Distributed under the GNU GPL license version 2. |
4 | * Distributed under the GNU GPL license version 2. |
5 | * |
5 | * |
6 | * Modified by George Anzinger to reuse immediately and to use |
6 | * Modified by George Anzinger to reuse immediately and to use |
7 | * find bit instructions. Also removed _irq on spinlocks. |
7 | * find bit instructions. Also removed _irq on spinlocks. |
8 | * |
8 | * |
9 | * Modified by Nadia Derbey to make it RCU safe. |
9 | * Modified by Nadia Derbey to make it RCU safe. |
10 | * |
10 | * |
11 | * Small id to pointer translation service. |
11 | * Small id to pointer translation service. |
12 | * |
12 | * |
13 | * It uses a radix tree like structure as a sparse array indexed |
13 | * It uses a radix tree like structure as a sparse array indexed |
14 | * by the id to obtain the pointer. The bitmap makes allocating |
14 | * by the id to obtain the pointer. The bitmap makes allocating |
15 | * a new id quick. |
15 | * a new id quick. |
16 | * |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a |
17 | * You call it to allocate an id (an int) an associate with that id a |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
19 | * id to a user for him to pass back at a later time. You then pass |
19 | * id to a user for him to pass back at a later time. You then pass |
20 | * that id to this code and it returns your pointer. |
20 | * that id to this code and it returns your pointer. |
21 | 21 | ||
22 | * You can release ids at any time. When all ids are released, most of |
22 | * You can release ids at any time. When all ids are released, most of |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
24 | * don't need to go to the memory "store" during an id allocate, just |
24 | * don't need to go to the memory "store" during an id allocate, just |
25 | * so you don't need to be too concerned about locking and conflicts |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. |
26 | * with the slab allocator. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | //#include |
33 | //#include |
34 | 34 | ||
35 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
35 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
36 | { |
36 | { |
37 | const unsigned long *p = addr; |
37 | const unsigned long *p = addr; |
38 | unsigned long result = 0; |
38 | unsigned long result = 0; |
39 | unsigned long tmp; |
39 | unsigned long tmp; |
40 | 40 | ||
41 | while (size & ~(BITS_PER_LONG-1)) { |
41 | while (size & ~(BITS_PER_LONG-1)) { |
42 | if ((tmp = *(p++))) |
42 | if ((tmp = *(p++))) |
43 | goto found; |
43 | goto found; |
44 | result += BITS_PER_LONG; |
44 | result += BITS_PER_LONG; |
45 | size -= BITS_PER_LONG; |
45 | size -= BITS_PER_LONG; |
46 | } |
46 | } |
47 | if (!size) |
47 | if (!size) |
48 | return result; |
48 | return result; |
49 | 49 | ||
50 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
50 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
51 | if (tmp == 0UL) /* Are any bits set? */ |
51 | if (tmp == 0UL) /* Are any bits set? */ |
52 | return result + size; /* Nope. */ |
52 | return result + size; /* Nope. */ |
53 | found: |
53 | found: |
54 | return result + __ffs(tmp); |
54 | return result + __ffs(tmp); |
55 | } |
55 | } |
56 | 56 | ||
57 | int find_next_bit(const unsigned long *addr, int size, int offset) |
57 | int find_next_bit(const unsigned long *addr, int size, int offset) |
58 | { |
58 | { |
59 | const unsigned long *p = addr + (offset >> 5); |
59 | const unsigned long *p = addr + (offset >> 5); |
60 | int set = 0, bit = offset & 31, res; |
60 | int set = 0, bit = offset & 31, res; |
61 | 61 | ||
62 | if (bit) |
62 | if (bit) |
63 | { |
63 | { |
64 | /* |
64 | /* |
65 | * Look for nonzero in the first 32 bits: |
65 | * Look for nonzero in the first 32 bits: |
66 | */ |
66 | */ |
67 | __asm__("bsfl %1,%0\n\t" |
67 | __asm__("bsfl %1,%0\n\t" |
68 | "jne 1f\n\t" |
68 | "jne 1f\n\t" |
69 | "movl $32, %0\n" |
69 | "movl $32, %0\n" |
70 | "1:" |
70 | "1:" |
71 | : "=r" (set) |
71 | : "=r" (set) |
72 | : "r" (*p >> bit)); |
72 | : "r" (*p >> bit)); |
73 | if (set < (32 - bit)) |
73 | if (set < (32 - bit)) |
74 | return set + offset; |
74 | return set + offset; |
75 | set = 32 - bit; |
75 | set = 32 - bit; |
76 | p++; |
76 | p++; |
77 | } |
77 | } |
78 | /* |
78 | /* |
79 | * No set bit yet, search remaining full words for a bit |
79 | * No set bit yet, search remaining full words for a bit |
80 | */ |
80 | */ |
81 | res = find_first_bit (p, size - 32 * (p - addr)); |
81 | res = find_first_bit (p, size - 32 * (p - addr)); |
82 | return (offset + set + res); |
82 | return (offset + set + res); |
83 | } |
83 | } |
84 | 84 | ||
85 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
85 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
86 | 86 | ||
87 | #define rcu_dereference(p) ({ \ |
87 | #define rcu_dereference(p) ({ \ |
88 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
88 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
89 | (_________p1); \ |
89 | (_________p1); \ |
90 | }) |
90 | }) |
91 | 91 | ||
92 | #define rcu_assign_pointer(p, v) \ |
92 | #define rcu_assign_pointer(p, v) \ |
93 | ({ \ |
93 | ({ \ |
94 | if (!__builtin_constant_p(v) || \ |
94 | if (!__builtin_constant_p(v) || \ |
95 | ((v) != NULL)) \ |
95 | ((v) != NULL)) \ |
96 | (p) = (v); \ |
96 | (p) = (v); \ |
97 | }) |
97 | }) |
98 | 98 | ||
99 | //static struct kmem_cache *idr_layer_cache; |
99 | //static struct kmem_cache *idr_layer_cache; |
100 | 100 | ||
101 | 101 | ||
102 | 102 | ||
103 | 103 | ||
104 | 104 | ||
105 | static struct idr_layer *get_from_free_list(struct idr *idp) |
105 | static struct idr_layer *get_from_free_list(struct idr *idp) |
106 | { |
106 | { |
107 | struct idr_layer *p; |
107 | struct idr_layer *p; |
108 | unsigned long flags; |
108 | unsigned long flags; |
109 | 109 | ||
110 | // spin_lock_irqsave(&idp->lock, flags); |
110 | // spin_lock_irqsave(&idp->lock, flags); |
111 | if ((p = idp->id_free)) { |
111 | if ((p = idp->id_free)) { |
112 | idp->id_free = p->ary[0]; |
112 | idp->id_free = p->ary[0]; |
113 | idp->id_free_cnt--; |
113 | idp->id_free_cnt--; |
114 | p->ary[0] = NULL; |
114 | p->ary[0] = NULL; |
115 | } |
115 | } |
116 | // spin_unlock_irqrestore(&idp->lock, flags); |
116 | // spin_unlock_irqrestore(&idp->lock, flags); |
117 | return(p); |
117 | return(p); |
118 | } |
118 | } |
119 | - | ||
120 | 119 | ||
121 | static void idr_layer_rcu_free(struct rcu_head *head) |
120 | static void idr_layer_rcu_free(struct rcu_head *head) |
122 | { |
121 | { |
123 | struct idr_layer *layer; |
122 | struct idr_layer *layer; |
124 | 123 | ||
125 | layer = container_of(head, struct idr_layer, rcu_head); |
124 | layer = container_of(head, struct idr_layer, rcu_head); |
126 | kfree(layer); |
125 | kfree(layer); |
127 | } |
126 | } |
128 | - | ||
129 | - | ||
130 | 127 | ||
131 | static inline void free_layer(struct idr_layer *p) |
128 | static inline void free_layer(struct idr_layer *p) |
132 | { |
129 | { |
133 | kfree(p); |
130 | kfree(p); |
134 | } |
131 | } |
135 | - | ||
136 | 132 | ||
137 | /* only called when idp->lock is held */ |
133 | /* only called when idp->lock is held */ |
138 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
134 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
139 | { |
135 | { |
140 | p->ary[0] = idp->id_free; |
136 | p->ary[0] = idp->id_free; |
141 | idp->id_free = p; |
137 | idp->id_free = p; |
142 | idp->id_free_cnt++; |
138 | idp->id_free_cnt++; |
143 | } |
139 | } |
144 | 140 | ||
145 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
141 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
146 | { |
142 | { |
147 | unsigned long flags; |
143 | unsigned long flags; |
148 | 144 | ||
149 | /* |
145 | /* |
150 | * Depends on the return element being zeroed. |
146 | * Depends on the return element being zeroed. |
151 | */ |
147 | */ |
152 | // spin_lock_irqsave(&idp->lock, flags); |
148 | // spin_lock_irqsave(&idp->lock, flags); |
153 | __move_to_free_list(idp, p); |
149 | __move_to_free_list(idp, p); |
154 | // spin_unlock_irqrestore(&idp->lock, flags); |
150 | // spin_unlock_irqrestore(&idp->lock, flags); |
155 | } |
151 | } |
156 | 152 | ||
157 | static void idr_mark_full(struct idr_layer **pa, int id) |
153 | static void idr_mark_full(struct idr_layer **pa, int id) |
158 | { |
154 | { |
159 | struct idr_layer *p = pa[0]; |
155 | struct idr_layer *p = pa[0]; |
160 | int l = 0; |
156 | int l = 0; |
161 | 157 | ||
162 | __set_bit(id & IDR_MASK, &p->bitmap); |
158 | __set_bit(id & IDR_MASK, &p->bitmap); |
163 | /* |
159 | /* |
164 | * If this layer is full mark the bit in the layer above to |
160 | * If this layer is full mark the bit in the layer above to |
165 | * show that this part of the radix tree is full. This may |
161 | * show that this part of the radix tree is full. This may |
166 | * complete the layer above and require walking up the radix |
162 | * complete the layer above and require walking up the radix |
167 | * tree. |
163 | * tree. |
168 | */ |
164 | */ |
169 | while (p->bitmap == IDR_FULL) { |
165 | while (p->bitmap == IDR_FULL) { |
170 | if (!(p = pa[++l])) |
166 | if (!(p = pa[++l])) |
171 | break; |
167 | break; |
172 | id = id >> IDR_BITS; |
168 | id = id >> IDR_BITS; |
173 | __set_bit((id & IDR_MASK), &p->bitmap); |
169 | __set_bit((id & IDR_MASK), &p->bitmap); |
174 | } |
170 | } |
175 | } |
171 | } |
176 | - | ||
177 | - | ||
178 | 172 | ||
179 | /** |
173 | /** |
180 | * idr_pre_get - reserver resources for idr allocation |
174 | * idr_pre_get - reserve resources for idr allocation |
181 | * @idp: idr handle |
175 | * @idp: idr handle |
182 | * @gfp_mask: memory allocation flags |
176 | * @gfp_mask: memory allocation flags |
183 | * |
177 | * |
184 | * This function should be called prior to locking and calling the |
178 | * This function should be called prior to calling the idr_get_new* functions. |
185 | * idr_get_new* functions. It preallocates enough memory to satisfy |
179 | * It preallocates enough memory to satisfy the worst possible allocation. The |
- | 180 | * caller should pass in GFP_KERNEL if possible. This of course requires that |
|
186 | * the worst possible allocation. |
181 | * no spinning locks be held. |
187 | * |
182 | * |
188 | * If the system is REALLY out of memory this function returns 0, |
183 | * If the system is REALLY out of memory this function returns %0, |
189 | * otherwise 1. |
184 | * otherwise %1. |
190 | */ |
185 | */ |
191 | int idr_pre_get(struct idr *idp, u32_t gfp_mask) |
186 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
192 | { |
187 | { |
193 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
188 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
194 | struct idr_layer *new; |
189 | struct idr_layer *new; |
195 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
190 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
196 | if (new == NULL) |
191 | if (new == NULL) |
197 | return (0); |
192 | return (0); |
198 | move_to_free_list(idp, new); |
193 | move_to_free_list(idp, new); |
199 | } |
194 | } |
200 | return 1; |
195 | return 1; |
201 | } |
196 | } |
202 | 197 | ||
203 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
198 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
204 | { |
199 | { |
205 | int n, m, sh; |
200 | int n, m, sh; |
206 | struct idr_layer *p, *new; |
201 | struct idr_layer *p, *new; |
207 | int l, id, oid; |
202 | int l, id, oid; |
208 | unsigned long bm; |
203 | unsigned long bm; |
209 | 204 | ||
210 | id = *starting_id; |
205 | id = *starting_id; |
211 | restart: |
206 | restart: |
212 | p = idp->top; |
207 | p = idp->top; |
213 | l = idp->layers; |
208 | l = idp->layers; |
214 | pa[l--] = NULL; |
209 | pa[l--] = NULL; |
215 | while (1) { |
210 | while (1) { |
216 | /* |
211 | /* |
217 | * We run around this while until we reach the leaf node... |
212 | * We run around this while until we reach the leaf node... |
218 | */ |
213 | */ |
219 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
214 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
220 | bm = ~p->bitmap; |
215 | bm = ~p->bitmap; |
221 | m = find_next_bit(&bm, IDR_SIZE, n); |
216 | m = find_next_bit(&bm, IDR_SIZE, n); |
222 | if (m == IDR_SIZE) { |
217 | if (m == IDR_SIZE) { |
223 | /* no space available go back to previous layer. */ |
218 | /* no space available go back to previous layer. */ |
224 | l++; |
219 | l++; |
225 | oid = id; |
220 | oid = id; |
226 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
221 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
227 | 222 | ||
228 | /* if already at the top layer, we need to grow */ |
223 | /* if already at the top layer, we need to grow */ |
229 | if (!(p = pa[l])) { |
224 | if (!(p = pa[l])) { |
230 | *starting_id = id; |
225 | *starting_id = id; |
231 | return IDR_NEED_TO_GROW; |
226 | return IDR_NEED_TO_GROW; |
232 | } |
227 | } |
233 | 228 | ||
234 | /* If we need to go up one layer, continue the |
229 | /* If we need to go up one layer, continue the |
235 | * loop; otherwise, restart from the top. |
230 | * loop; otherwise, restart from the top. |
236 | */ |
231 | */ |
237 | sh = IDR_BITS * (l + 1); |
232 | sh = IDR_BITS * (l + 1); |
238 | if (oid >> sh == id >> sh) |
233 | if (oid >> sh == id >> sh) |
239 | continue; |
234 | continue; |
240 | else |
235 | else |
241 | goto restart; |
236 | goto restart; |
242 | } |
237 | } |
243 | if (m != n) { |
238 | if (m != n) { |
244 | sh = IDR_BITS*l; |
239 | sh = IDR_BITS*l; |
245 | id = ((id >> sh) ^ n ^ m) << sh; |
240 | id = ((id >> sh) ^ n ^ m) << sh; |
246 | } |
241 | } |
247 | if ((id >= MAX_ID_BIT) || (id < 0)) |
242 | if ((id >= MAX_ID_BIT) || (id < 0)) |
248 | return IDR_NOMORE_SPACE; |
243 | return IDR_NOMORE_SPACE; |
249 | if (l == 0) |
244 | if (l == 0) |
250 | break; |
245 | break; |
251 | /* |
246 | /* |
252 | * Create the layer below if it is missing. |
247 | * Create the layer below if it is missing. |
253 | */ |
248 | */ |
254 | if (!p->ary[m]) { |
249 | if (!p->ary[m]) { |
255 | new = get_from_free_list(idp); |
250 | new = get_from_free_list(idp); |
256 | if (!new) |
251 | if (!new) |
257 | return -1; |
252 | return -1; |
258 | new->layer = l-1; |
253 | new->layer = l-1; |
259 | rcu_assign_pointer(p->ary[m], new); |
254 | rcu_assign_pointer(p->ary[m], new); |
260 | p->count++; |
255 | p->count++; |
261 | } |
256 | } |
262 | pa[l--] = p; |
257 | pa[l--] = p; |
263 | p = p->ary[m]; |
258 | p = p->ary[m]; |
264 | } |
259 | } |
265 | 260 | ||
266 | pa[l] = p; |
261 | pa[l] = p; |
267 | return id; |
262 | return id; |
268 | } |
263 | } |
269 | - | ||
270 | 264 | ||
271 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
265 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
272 | struct idr_layer **pa) |
266 | struct idr_layer **pa) |
273 | { |
267 | { |
274 | struct idr_layer *p, *new; |
268 | struct idr_layer *p, *new; |
275 | int layers, v, id; |
269 | int layers, v, id; |
276 | unsigned long flags; |
270 | unsigned long flags; |
277 | 271 | ||
278 | id = starting_id; |
272 | id = starting_id; |
279 | build_up: |
273 | build_up: |
280 | p = idp->top; |
274 | p = idp->top; |
281 | layers = idp->layers; |
275 | layers = idp->layers; |
282 | if (unlikely(!p)) { |
276 | if (unlikely(!p)) { |
283 | if (!(p = get_from_free_list(idp))) |
277 | if (!(p = get_from_free_list(idp))) |
284 | return -1; |
278 | return -1; |
285 | p->layer = 0; |
279 | p->layer = 0; |
286 | layers = 1; |
280 | layers = 1; |
287 | } |
281 | } |
288 | /* |
282 | /* |
289 | * Add a new layer to the top of the tree if the requested |
283 | * Add a new layer to the top of the tree if the requested |
290 | * id is larger than the currently allocated space. |
284 | * id is larger than the currently allocated space. |
291 | */ |
285 | */ |
292 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
286 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
293 | layers++; |
287 | layers++; |
294 | if (!p->count) { |
288 | if (!p->count) { |
295 | /* special case: if the tree is currently empty, |
289 | /* special case: if the tree is currently empty, |
296 | * then we grow the tree by moving the top node |
290 | * then we grow the tree by moving the top node |
297 | * upwards. |
291 | * upwards. |
298 | */ |
292 | */ |
299 | p->layer++; |
293 | p->layer++; |
300 | continue; |
294 | continue; |
301 | } |
295 | } |
302 | if (!(new = get_from_free_list(idp))) { |
296 | if (!(new = get_from_free_list(idp))) { |
303 | /* |
297 | /* |
304 | * The allocation failed. If we built part of |
298 | * The allocation failed. If we built part of |
305 | * the structure tear it down. |
299 | * the structure tear it down. |
306 | */ |
300 | */ |
307 | // spin_lock_irqsave(&idp->lock, flags); |
301 | // spin_lock_irqsave(&idp->lock, flags); |
308 | for (new = p; p && p != idp->top; new = p) { |
302 | for (new = p; p && p != idp->top; new = p) { |
309 | p = p->ary[0]; |
303 | p = p->ary[0]; |
310 | new->ary[0] = NULL; |
304 | new->ary[0] = NULL; |
311 | new->bitmap = new->count = 0; |
305 | new->bitmap = new->count = 0; |
312 | __move_to_free_list(idp, new); |
306 | __move_to_free_list(idp, new); |
313 | } |
307 | } |
314 | // spin_unlock_irqrestore(&idp->lock, flags); |
308 | // spin_unlock_irqrestore(&idp->lock, flags); |
315 | return -1; |
309 | return -1; |
316 | } |
310 | } |
317 | new->ary[0] = p; |
311 | new->ary[0] = p; |
318 | new->count = 1; |
312 | new->count = 1; |
319 | new->layer = layers-1; |
313 | new->layer = layers-1; |
320 | if (p->bitmap == IDR_FULL) |
314 | if (p->bitmap == IDR_FULL) |
321 | __set_bit(0, &new->bitmap); |
315 | __set_bit(0, &new->bitmap); |
322 | p = new; |
316 | p = new; |
323 | } |
317 | } |
324 | rcu_assign_pointer(idp->top, p); |
318 | rcu_assign_pointer(idp->top, p); |
325 | idp->layers = layers; |
319 | idp->layers = layers; |
326 | v = sub_alloc(idp, &id, pa); |
320 | v = sub_alloc(idp, &id, pa); |
327 | if (v == IDR_NEED_TO_GROW) |
321 | if (v == IDR_NEED_TO_GROW) |
328 | goto build_up; |
322 | goto build_up; |
329 | return(v); |
323 | return(v); |
330 | } |
324 | } |
331 | 325 | ||
332 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
326 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
333 | { |
327 | { |
334 | struct idr_layer *pa[MAX_LEVEL]; |
328 | struct idr_layer *pa[MAX_LEVEL]; |
335 | int id; |
329 | int id; |
336 | 330 | ||
337 | id = idr_get_empty_slot(idp, starting_id, pa); |
331 | id = idr_get_empty_slot(idp, starting_id, pa); |
338 | if (id >= 0) { |
332 | if (id >= 0) { |
339 | /* |
333 | /* |
340 | * Successfully found an empty slot. Install the user |
334 | * Successfully found an empty slot. Install the user |
341 | * pointer and mark the slot full. |
335 | * pointer and mark the slot full. |
342 | */ |
336 | */ |
343 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
337 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
344 | (struct idr_layer *)ptr); |
338 | (struct idr_layer *)ptr); |
345 | pa[0]->count++; |
339 | pa[0]->count++; |
346 | idr_mark_full(pa, id); |
340 | idr_mark_full(pa, id); |
347 | } |
341 | } |
348 | 342 | ||
349 | return id; |
343 | return id; |
350 | } |
344 | } |
351 | 345 | ||
352 | /** |
346 | /** |
353 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
347 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
354 | * @idp: idr handle |
348 | * @idp: idr handle |
355 | * @ptr: pointer you want associated with the ide |
349 | * @ptr: pointer you want associated with the id |
356 | * @start_id: id to start search at |
350 | * @starting_id: id to start search at |
357 | * @id: pointer to the allocated handle |
351 | * @id: pointer to the allocated handle |
358 | * |
352 | * |
359 | * This is the allocate id function. It should be called with any |
353 | * This is the allocate id function. It should be called with any |
360 | * required locks. |
354 | * required locks. |
361 | * |
355 | * |
362 | * If memory is required, it will return -EAGAIN, you should unlock |
356 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
363 | * and go back to the idr_pre_get() call. If the idr is full, it will |
357 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
364 | * return -ENOSPC. |
358 | * IDR's preallocation and then retry the idr_get_new_above() call. |
365 | * |
359 | * |
- | 360 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
|
- | 361 | * |
|
366 | * @id returns a value in the range @starting_id ... 0x7fffffff |
362 | * @id returns a value in the range @starting_id ... %0x7fffffff |
367 | */ |
363 | */ |
368 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
364 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
369 | { |
365 | { |
370 | int rv; |
366 | int rv; |
- | 367 | ||
371 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
368 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
372 | /* |
369 | /* |
373 | * This is a cheap hack until the IDR code can be fixed to |
370 | * This is a cheap hack until the IDR code can be fixed to |
374 | * return proper error values. |
371 | * return proper error values. |
375 | */ |
372 | */ |
376 | if (rv < 0) |
373 | if (rv < 0) |
377 | { |
374 | { |
378 | dbgprintf("fail\n"); |
375 | dbgprintf("fail\n"); |
379 | return _idr_rc_to_errno(rv); |
376 | return _idr_rc_to_errno(rv); |
380 | }; |
377 | }; |
381 | *id = rv; |
378 | *id = rv; |
382 | return 0; |
379 | return 0; |
383 | } |
380 | } |
384 | 381 | ||
385 | /** |
382 | /** |
386 | * idr_get_new - allocate new idr entry |
383 | * idr_get_new - allocate new idr entry |
387 | * @idp: idr handle |
384 | * @idp: idr handle |
388 | * @ptr: pointer you want associated with the ide |
385 | * @ptr: pointer you want associated with the id |
389 | * @id: pointer to the allocated handle |
386 | * @id: pointer to the allocated handle |
390 | * |
387 | * |
- | 388 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
|
391 | * This is the allocate id function. It should be called with any |
389 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
392 | * required locks. |
390 | * IDR's preallocation and then retry the idr_get_new_above() call. |
393 | * |
391 | * |
394 | * If memory is required, it will return -EAGAIN, you should unlock |
392 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
395 | * and go back to the idr_pre_get() call. If the idr is full, it will |
- | |
396 | * return -ENOSPC. |
- | |
397 | * |
393 | * |
398 | * @id returns a value in the range 0 ... 0x7fffffff |
394 | * @id returns a value in the range %0 ... %0x7fffffff |
399 | */ |
395 | */ |
400 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
396 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
401 | { |
397 | { |
402 | int rv; |
398 | int rv; |
403 | 399 | ||
404 | rv = idr_get_new_above_int(idp, ptr, 0); |
400 | rv = idr_get_new_above_int(idp, ptr, 0); |
405 | /* |
401 | /* |
406 | * This is a cheap hack until the IDR code can be fixed to |
402 | * This is a cheap hack until the IDR code can be fixed to |
407 | * return proper error values. |
403 | * return proper error values. |
408 | */ |
404 | */ |
409 | if (rv < 0) |
405 | if (rv < 0) |
410 | return _idr_rc_to_errno(rv); |
406 | return _idr_rc_to_errno(rv); |
411 | *id = rv; |
407 | *id = rv; |
412 | return 0; |
408 | return 0; |
413 | } |
409 | } |
414 | 410 | ||
415 | static void idr_remove_warning(int id) |
411 | static void idr_remove_warning(int id) |
416 | { |
412 | { |
417 | printk(KERN_WARNING |
413 | printk(KERN_WARNING |
418 | "idr_remove called for id=%d which is not allocated.\n", id); |
414 | "idr_remove called for id=%d which is not allocated.\n", id); |
419 | // dump_stack(); |
415 | // dump_stack(); |
420 | } |
416 | } |
421 | 417 | ||
422 | static void sub_remove(struct idr *idp, int shift, int id) |
418 | static void sub_remove(struct idr *idp, int shift, int id) |
423 | { |
419 | { |
424 | struct idr_layer *p = idp->top; |
420 | struct idr_layer *p = idp->top; |
425 | struct idr_layer **pa[MAX_LEVEL]; |
421 | struct idr_layer **pa[MAX_LEVEL]; |
426 | struct idr_layer ***paa = &pa[0]; |
422 | struct idr_layer ***paa = &pa[0]; |
427 | struct idr_layer *to_free; |
423 | struct idr_layer *to_free; |
428 | int n; |
424 | int n; |
429 | 425 | ||
430 | *paa = NULL; |
426 | *paa = NULL; |
431 | *++paa = &idp->top; |
427 | *++paa = &idp->top; |
432 | 428 | ||
433 | while ((shift > 0) && p) { |
429 | while ((shift > 0) && p) { |
434 | n = (id >> shift) & IDR_MASK; |
430 | n = (id >> shift) & IDR_MASK; |
435 | __clear_bit(n, &p->bitmap); |
431 | __clear_bit(n, &p->bitmap); |
436 | *++paa = &p->ary[n]; |
432 | *++paa = &p->ary[n]; |
437 | p = p->ary[n]; |
433 | p = p->ary[n]; |
438 | shift -= IDR_BITS; |
434 | shift -= IDR_BITS; |
439 | } |
435 | } |
440 | n = id & IDR_MASK; |
436 | n = id & IDR_MASK; |
441 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
437 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
442 | __clear_bit(n, &p->bitmap); |
438 | __clear_bit(n, &p->bitmap); |
443 | rcu_assign_pointer(p->ary[n], NULL); |
439 | rcu_assign_pointer(p->ary[n], NULL); |
444 | to_free = NULL; |
440 | to_free = NULL; |
445 | while(*paa && ! --((**paa)->count)){ |
441 | while(*paa && ! --((**paa)->count)){ |
446 | if (to_free) |
442 | if (to_free) |
447 | free_layer(to_free); |
443 | free_layer(to_free); |
448 | to_free = **paa; |
444 | to_free = **paa; |
449 | **paa-- = NULL; |
445 | **paa-- = NULL; |
450 | } |
446 | } |
451 | if (!*paa) |
447 | if (!*paa) |
452 | idp->layers = 0; |
448 | idp->layers = 0; |
453 | if (to_free) |
449 | if (to_free) |
454 | free_layer(to_free); |
450 | free_layer(to_free); |
455 | } else |
451 | } else |
456 | idr_remove_warning(id); |
452 | idr_remove_warning(id); |
457 | } |
453 | } |
458 | 454 | ||
459 | /** |
455 | /** |
460 | * idr_remove - remove the given id and free it's slot |
456 | * idr_remove - remove the given id and free its slot |
461 | * @idp: idr handle |
457 | * @idp: idr handle |
462 | * @id: unique key |
458 | * @id: unique key |
463 | */ |
459 | */ |
464 | void idr_remove(struct idr *idp, int id) |
460 | void idr_remove(struct idr *idp, int id) |
465 | { |
461 | { |
466 | struct idr_layer *p; |
462 | struct idr_layer *p; |
467 | struct idr_layer *to_free; |
463 | struct idr_layer *to_free; |
468 | 464 | ||
469 | /* Mask off upper bits we don't use for the search. */ |
465 | /* Mask off upper bits we don't use for the search. */ |
470 | id &= MAX_ID_MASK; |
466 | id &= MAX_ID_MASK; |
471 | 467 | ||
472 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
468 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
473 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
469 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
474 | idp->top->ary[0]) { |
470 | idp->top->ary[0]) { |
475 | /* |
471 | /* |
476 | * Single child at leftmost slot: we can shrink the tree. |
472 | * Single child at leftmost slot: we can shrink the tree. |
477 | * This level is not needed anymore since when layers are |
473 | * This level is not needed anymore since when layers are |
478 | * inserted, they are inserted at the top of the existing |
474 | * inserted, they are inserted at the top of the existing |
479 | * tree. |
475 | * tree. |
480 | */ |
476 | */ |
481 | to_free = idp->top; |
477 | to_free = idp->top; |
482 | p = idp->top->ary[0]; |
478 | p = idp->top->ary[0]; |
483 | rcu_assign_pointer(idp->top, p); |
479 | rcu_assign_pointer(idp->top, p); |
484 | --idp->layers; |
480 | --idp->layers; |
485 | to_free->bitmap = to_free->count = 0; |
481 | to_free->bitmap = to_free->count = 0; |
486 | free_layer(to_free); |
482 | free_layer(to_free); |
487 | } |
483 | } |
488 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
484 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
489 | p = get_from_free_list(idp); |
485 | p = get_from_free_list(idp); |
490 | /* |
486 | /* |
491 | * Note: we don't call the rcu callback here, since the only |
487 | * Note: we don't call the rcu callback here, since the only |
492 | * layers that fall into the freelist are those that have been |
488 | * layers that fall into the freelist are those that have been |
493 | * preallocated. |
489 | * preallocated. |
494 | */ |
490 | */ |
495 | kfree(p); |
491 | kfree(p); |
496 | } |
492 | } |
497 | return; |
493 | return; |
498 | } |
494 | } |
499 | 495 | ||
500 | 496 | ||
501 | /** |
497 | /** |
502 | * idr_remove_all - remove all ids from the given idr tree |
498 | * idr_remove_all - remove all ids from the given idr tree |
503 | * @idp: idr handle |
499 | * @idp: idr handle |
504 | * |
500 | * |
505 | * idr_destroy() only frees up unused, cached idp_layers, but this |
501 | * idr_destroy() only frees up unused, cached idp_layers, but this |
506 | * function will remove all id mappings and leave all idp_layers |
502 | * function will remove all id mappings and leave all idp_layers |
507 | * unused. |
503 | * unused. |
508 | * |
504 | * |
509 | * A typical clean-up sequence for objects stored in an idr tree, will |
505 | * A typical clean-up sequence for objects stored in an idr tree will |
510 | * use idr_for_each() to free all objects, if necessay, then |
506 | * use idr_for_each() to free all objects, if necessay, then |
511 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
507 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
512 | * up the cached idr_layers. |
508 | * up the cached idr_layers. |
513 | */ |
509 | */ |
514 | void idr_remove_all(struct idr *idp) |
510 | void idr_remove_all(struct idr *idp) |
515 | { |
511 | { |
516 | int n, id, max; |
512 | int n, id, max; |
- | 513 | int bt_mask; |
|
517 | struct idr_layer *p; |
514 | struct idr_layer *p; |
518 | struct idr_layer *pa[MAX_LEVEL]; |
515 | struct idr_layer *pa[MAX_LEVEL]; |
519 | struct idr_layer **paa = &pa[0]; |
516 | struct idr_layer **paa = &pa[0]; |
520 | 517 | ||
521 | n = idp->layers * IDR_BITS; |
518 | n = idp->layers * IDR_BITS; |
522 | p = idp->top; |
519 | p = idp->top; |
523 | rcu_assign_pointer(idp->top, NULL); |
520 | rcu_assign_pointer(idp->top, NULL); |
524 | max = 1 << n; |
521 | max = 1 << n; |
525 | 522 | ||
526 | id = 0; |
523 | id = 0; |
527 | while (id < max) { |
524 | while (id < max) { |
528 | while (n > IDR_BITS && p) { |
525 | while (n > IDR_BITS && p) { |
529 | n -= IDR_BITS; |
526 | n -= IDR_BITS; |
530 | *paa++ = p; |
527 | *paa++ = p; |
531 | p = p->ary[(id >> n) & IDR_MASK]; |
528 | p = p->ary[(id >> n) & IDR_MASK]; |
532 | } |
529 | } |
- | 530 | ||
533 | 531 | bt_mask = id; |
|
- | 532 | id += 1 << n; |
|
534 | id += 1 << n; |
533 | /* Get the highest bit that the above add changed from 0->1. */ |
535 | while (n < fls(id)) { |
534 | while (n < fls(id ^ bt_mask)) { |
536 | if (p) |
535 | if (p) |
537 | free_layer(p); |
536 | free_layer(p); |
538 | n += IDR_BITS; |
537 | n += IDR_BITS; |
539 | p = *--paa; |
538 | p = *--paa; |
540 | } |
539 | } |
541 | } |
540 | } |
542 | idp->layers = 0; |
541 | idp->layers = 0; |
543 | } |
542 | } |
544 | 543 | ||
545 | /** |
544 | /** |
546 | * idr_destroy - release all cached layers within an idr tree |
545 | * idr_destroy - release all cached layers within an idr tree |
547 | * idp: idr handle |
546 | * @idp: idr handle |
548 | */ |
547 | */ |
549 | void idr_destroy(struct idr *idp) |
548 | void idr_destroy(struct idr *idp) |
550 | { |
549 | { |
551 | while (idp->id_free_cnt) { |
550 | while (idp->id_free_cnt) { |
552 | struct idr_layer *p = get_from_free_list(idp); |
551 | struct idr_layer *p = get_from_free_list(idp); |
553 | kfree(p); |
552 | kfree(p); |
554 | } |
553 | } |
555 | } |
554 | } |
556 | 555 | ||
557 | 556 | ||
558 | /** |
557 | /** |
559 | * idr_find - return pointer for given id |
558 | * idr_find - return pointer for given id |
560 | * @idp: idr handle |
559 | * @idp: idr handle |
561 | * @id: lookup key |
560 | * @id: lookup key |
562 | * |
561 | * |
563 | * Return the pointer given the id it has been registered with. A %NULL |
562 | * Return the pointer given the id it has been registered with. A %NULL |
564 | * return indicates that @id is not valid or you passed %NULL in |
563 | * return indicates that @id is not valid or you passed %NULL in |
565 | * idr_get_new(). |
564 | * idr_get_new(). |
566 | * |
565 | * |
567 | * This function can be called under rcu_read_lock(), given that the leaf |
566 | * This function can be called under rcu_read_lock(), given that the leaf |
568 | * pointers lifetimes are correctly managed. |
567 | * pointers lifetimes are correctly managed. |
569 | */ |
568 | */ |
570 | void *idr_find(struct idr *idp, int id) |
569 | void *idr_find(struct idr *idp, int id) |
571 | { |
570 | { |
572 | int n; |
571 | int n; |
573 | struct idr_layer *p; |
572 | struct idr_layer *p; |
574 | 573 | ||
575 | p = rcu_dereference(idp->top); |
574 | p = rcu_dereference(idp->top); |
576 | if (!p) |
575 | if (!p) |
577 | return NULL; |
576 | return NULL; |
578 | n = (p->layer+1) * IDR_BITS; |
577 | n = (p->layer+1) * IDR_BITS; |
579 | 578 | ||
580 | /* Mask off upper bits we don't use for the search. */ |
579 | /* Mask off upper bits we don't use for the search. */ |
581 | id &= MAX_ID_MASK; |
580 | id &= MAX_ID_MASK; |
582 | 581 | ||
583 | if (id >= (1 << n)) |
582 | if (id >= (1 << n)) |
584 | return NULL; |
583 | return NULL; |
585 | BUG_ON(n == 0); |
584 | BUG_ON(n == 0); |
586 | 585 | ||
587 | while (n > 0 && p) { |
586 | while (n > 0 && p) { |
588 | n -= IDR_BITS; |
587 | n -= IDR_BITS; |
589 | BUG_ON(n != p->layer*IDR_BITS); |
588 | BUG_ON(n != p->layer*IDR_BITS); |
590 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
589 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
591 | } |
590 | } |
592 | return((void *)p); |
591 | return((void *)p); |
593 | } |
592 | } |
594 | 593 | ||
595 | #if 0 |
594 | #if 0 |
596 | /** |
595 | /** |
597 | * idr_for_each - iterate through all stored pointers |
596 | * idr_for_each - iterate through all stored pointers |
598 | * @idp: idr handle |
597 | * @idp: idr handle |
599 | * @fn: function to be called for each pointer |
598 | * @fn: function to be called for each pointer |
600 | * @data: data passed back to callback function |
599 | * @data: data passed back to callback function |
601 | * |
600 | * |
602 | * Iterate over the pointers registered with the given idr. The |
601 | * Iterate over the pointers registered with the given idr. The |
603 | * callback function will be called for each pointer currently |
602 | * callback function will be called for each pointer currently |
604 | * registered, passing the id, the pointer and the data pointer passed |
603 | * registered, passing the id, the pointer and the data pointer passed |
605 | * to this function. It is not safe to modify the idr tree while in |
604 | * to this function. It is not safe to modify the idr tree while in |
606 | * the callback, so functions such as idr_get_new and idr_remove are |
605 | * the callback, so functions such as idr_get_new and idr_remove are |
607 | * not allowed. |
606 | * not allowed. |
608 | * |
607 | * |
609 | * We check the return of @fn each time. If it returns anything other |
608 | * We check the return of @fn each time. If it returns anything other |
610 | * than 0, we break out and return that value. |
609 | * than %0, we break out and return that value. |
611 | * |
610 | * |
612 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
611 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
613 | */ |
612 | */ |
614 | int idr_for_each(struct idr *idp, |
613 | int idr_for_each(struct idr *idp, |
615 | int (*fn)(int id, void *p, void *data), void *data) |
614 | int (*fn)(int id, void *p, void *data), void *data) |
616 | { |
615 | { |
617 | int n, id, max, error = 0; |
616 | int n, id, max, error = 0; |
618 | struct idr_layer *p; |
617 | struct idr_layer *p; |
619 | struct idr_layer *pa[MAX_LEVEL]; |
618 | struct idr_layer *pa[MAX_LEVEL]; |
620 | struct idr_layer **paa = &pa[0]; |
619 | struct idr_layer **paa = &pa[0]; |
621 | 620 | ||
622 | n = idp->layers * IDR_BITS; |
621 | n = idp->layers * IDR_BITS; |
623 | p = rcu_dereference(idp->top); |
622 | p = rcu_dereference(idp->top); |
624 | max = 1 << n; |
623 | max = 1 << n; |
625 | 624 | ||
626 | id = 0; |
625 | id = 0; |
627 | while (id < max) { |
626 | while (id < max) { |
628 | while (n > 0 && p) { |
627 | while (n > 0 && p) { |
629 | n -= IDR_BITS; |
628 | n -= IDR_BITS; |
630 | *paa++ = p; |
629 | *paa++ = p; |
631 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
630 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
632 | } |
631 | } |
633 | 632 | ||
634 | if (p) { |
633 | if (p) { |
635 | error = fn(id, (void *)p, data); |
634 | error = fn(id, (void *)p, data); |
636 | if (error) |
635 | if (error) |
637 | break; |
636 | break; |
638 | } |
637 | } |
639 | 638 | ||
640 | id += 1 << n; |
639 | id += 1 << n; |
641 | while (n < fls(id)) { |
640 | while (n < fls(id)) { |
642 | n += IDR_BITS; |
641 | n += IDR_BITS; |
643 | p = *--paa; |
642 | p = *--paa; |
644 | } |
643 | } |
645 | } |
644 | } |
646 | 645 | ||
647 | return error; |
646 | return error; |
648 | } |
647 | } |
649 | EXPORT_SYMBOL(idr_for_each); |
648 | EXPORT_SYMBOL(idr_for_each); |
650 | 649 | ||
651 | /** |
650 | /** |
652 | * idr_get_next - lookup next object of id to given id. |
651 | * idr_get_next - lookup next object of id to given id. |
653 | * @idp: idr handle |
652 | * @idp: idr handle |
654 | * @id: pointer to lookup key |
653 | * @nextidp: pointer to lookup key |
655 | * |
654 | * |
656 | * Returns pointer to registered object with id, which is next number to |
655 | * Returns pointer to registered object with id, which is next number to |
- | 656 | * given id. After being looked up, *@nextidp will be updated for the next |
|
657 | * given id. |
657 | * iteration. |
658 | */ |
658 | */ |
659 | 659 | ||
660 | void *idr_get_next(struct idr *idp, int *nextidp) |
660 | void *idr_get_next(struct idr *idp, int *nextidp) |
661 | { |
661 | { |
662 | struct idr_layer *p, *pa[MAX_LEVEL]; |
662 | struct idr_layer *p, *pa[MAX_LEVEL]; |
663 | struct idr_layer **paa = &pa[0]; |
663 | struct idr_layer **paa = &pa[0]; |
664 | int id = *nextidp; |
664 | int id = *nextidp; |
665 | int n, max; |
665 | int n, max; |
666 | 666 | ||
667 | /* find first ent */ |
667 | /* find first ent */ |
668 | n = idp->layers * IDR_BITS; |
668 | n = idp->layers * IDR_BITS; |
669 | max = 1 << n; |
669 | max = 1 << n; |
670 | p = rcu_dereference(idp->top); |
670 | p = rcu_dereference(idp->top); |
671 | if (!p) |
671 | if (!p) |
672 | return NULL; |
672 | return NULL; |
673 | 673 | ||
674 | while (id < max) { |
674 | while (id < max) { |
675 | while (n > 0 && p) { |
675 | while (n > 0 && p) { |
676 | n -= IDR_BITS; |
676 | n -= IDR_BITS; |
677 | *paa++ = p; |
677 | *paa++ = p; |
678 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
678 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
679 | } |
679 | } |
680 | 680 | ||
681 | if (p) { |
681 | if (p) { |
682 | *nextidp = id; |
682 | *nextidp = id; |
683 | return p; |
683 | return p; |
684 | } |
684 | } |
685 | 685 | ||
686 | id += 1 << n; |
686 | id += 1 << n; |
687 | while (n < fls(id)) { |
687 | while (n < fls(id)) { |
688 | n += IDR_BITS; |
688 | n += IDR_BITS; |
689 | p = *--paa; |
689 | p = *--paa; |
690 | } |
690 | } |
691 | } |
691 | } |
692 | return NULL; |
692 | return NULL; |
693 | } |
693 | } |
694 | 694 | ||
695 | 695 | ||
696 | 696 | ||
697 | /** |
697 | /** |
698 | * idr_replace - replace pointer for given id |
698 | * idr_replace - replace pointer for given id |
699 | * @idp: idr handle |
699 | * @idp: idr handle |
700 | * @ptr: pointer you want associated with the id |
700 | * @ptr: pointer you want associated with the id |
701 | * @id: lookup key |
701 | * @id: lookup key |
702 | * |
702 | * |
703 | * Replace the pointer registered with an id and return the old value. |
703 | * Replace the pointer registered with an id and return the old value. |
704 | * A -ENOENT return indicates that @id was not found. |
704 | * A %-ENOENT return indicates that @id was not found. |
705 | * A -EINVAL return indicates that @id was not within valid constraints. |
705 | * A %-EINVAL return indicates that @id was not within valid constraints. |
706 | * |
706 | * |
707 | * The caller must serialize with writers. |
707 | * The caller must serialize with writers. |
708 | */ |
708 | */ |
709 | void *idr_replace(struct idr *idp, void *ptr, int id) |
709 | void *idr_replace(struct idr *idp, void *ptr, int id) |
710 | { |
710 | { |
711 | int n; |
711 | int n; |
712 | struct idr_layer *p, *old_p; |
712 | struct idr_layer *p, *old_p; |
713 | 713 | ||
714 | p = idp->top; |
714 | p = idp->top; |
715 | if (!p) |
715 | if (!p) |
716 | return ERR_PTR(-EINVAL); |
716 | return ERR_PTR(-EINVAL); |
717 | 717 | ||
718 | n = (p->layer+1) * IDR_BITS; |
718 | n = (p->layer+1) * IDR_BITS; |
719 | 719 | ||
720 | id &= MAX_ID_MASK; |
720 | id &= MAX_ID_MASK; |
721 | 721 | ||
722 | if (id >= (1 << n)) |
722 | if (id >= (1 << n)) |
723 | return ERR_PTR(-EINVAL); |
723 | return ERR_PTR(-EINVAL); |
724 | 724 | ||
725 | n -= IDR_BITS; |
725 | n -= IDR_BITS; |
726 | while ((n > 0) && p) { |
726 | while ((n > 0) && p) { |
727 | p = p->ary[(id >> n) & IDR_MASK]; |
727 | p = p->ary[(id >> n) & IDR_MASK]; |
728 | n -= IDR_BITS; |
728 | n -= IDR_BITS; |
729 | } |
729 | } |
730 | 730 | ||
731 | n = id & IDR_MASK; |
731 | n = id & IDR_MASK; |
732 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
732 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
733 | return ERR_PTR(-ENOENT); |
733 | return ERR_PTR(-ENOENT); |
734 | 734 | ||
735 | old_p = p->ary[n]; |
735 | old_p = p->ary[n]; |
736 | rcu_assign_pointer(p->ary[n], ptr); |
736 | rcu_assign_pointer(p->ary[n], ptr); |
737 | 737 | ||
738 | return old_p; |
738 | return old_p; |
739 | } |
739 | } |
740 | EXPORT_SYMBOL(idr_replace); |
740 | EXPORT_SYMBOL(idr_replace); |
741 | 741 | ||
742 | 742 | ||
743 | #endif |
743 | #endif |
744 | 744 | ||
745 | 745 | ||
746 | void idr_init_cache(void) |
746 | void idr_init_cache(void) |
747 | { |
747 | { |
748 | //idr_layer_cache = kmem_cache_create("idr_layer_cache", |
748 | //idr_layer_cache = kmem_cache_create("idr_layer_cache", |
749 | // sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
749 | // sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
750 | } |
750 | } |
751 | 751 | ||
752 | /** |
752 | /** |
753 | * idr_init - initialize idr handle |
753 | * idr_init - initialize idr handle |
754 | * @idp: idr handle |
754 | * @idp: idr handle |
755 | * |
755 | * |
756 | * This function is use to set up the handle (@idp) that you will pass |
756 | * This function is use to set up the handle (@idp) that you will pass |
757 | * to the rest of the functions. |
757 | * to the rest of the functions. |
758 | */ |
758 | */ |
759 | void idr_init(struct idr *idp) |
759 | void idr_init(struct idr *idp) |
760 | { |
760 | { |
761 | memset(idp, 0, sizeof(struct idr)); |
761 | memset(idp, 0, sizeof(struct idr)); |
762 | // spin_lock_init(&idp->lock); |
762 | // spin_lock_init(&idp->lock); |
763 | } |
763 | } |
764 | 764 | ||
765 | #if 0 |
765 | #if 0 |
766 | 766 | ||
767 | /* |
767 | /* |
768 | * IDA - IDR based ID allocator |
768 | * IDA - IDR based ID allocator |
769 | * |
769 | * |
770 | * this is id allocator without id -> pointer translation. Memory |
770 | * This is id allocator without id -> pointer translation. Memory |
771 | * usage is much lower than full blown idr because each id only |
771 | * usage is much lower than full blown idr because each id only |
772 | * occupies a bit. ida uses a custom leaf node which contains |
772 | * occupies a bit. ida uses a custom leaf node which contains |
773 | * IDA_BITMAP_BITS slots. |
773 | * IDA_BITMAP_BITS slots. |
774 | * |
774 | * |
775 | * 2007-04-25 written by Tejun Heo |
775 | * 2007-04-25 written by Tejun Heo |
776 | */ |
776 | */ |
777 | 777 | ||
778 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
778 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
779 | { |
779 | { |
780 | unsigned long flags; |
780 | unsigned long flags; |
781 | 781 | ||
782 | if (!ida->free_bitmap) { |
782 | if (!ida->free_bitmap) { |
783 | spin_lock_irqsave(&ida->idr.lock, flags); |
783 | spin_lock_irqsave(&ida->idr.lock, flags); |
784 | if (!ida->free_bitmap) { |
784 | if (!ida->free_bitmap) { |
785 | ida->free_bitmap = bitmap; |
785 | ida->free_bitmap = bitmap; |
786 | bitmap = NULL; |
786 | bitmap = NULL; |
787 | } |
787 | } |
788 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
788 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
789 | } |
789 | } |
790 | 790 | ||
791 | kfree(bitmap); |
791 | kfree(bitmap); |
792 | } |
792 | } |
793 | 793 | ||
794 | /** |
794 | /** |
795 | * ida_pre_get - reserve resources for ida allocation |
795 | * ida_pre_get - reserve resources for ida allocation |
796 | * @ida: ida handle |
796 | * @ida: ida handle |
797 | * @gfp_mask: memory allocation flag |
797 | * @gfp_mask: memory allocation flag |
798 | * |
798 | * |
799 | * This function should be called prior to locking and calling the |
799 | * This function should be called prior to locking and calling the |
800 | * following function. It preallocates enough memory to satisfy the |
800 | * following function. It preallocates enough memory to satisfy the |
801 | * worst possible allocation. |
801 | * worst possible allocation. |
802 | * |
802 | * |
803 | * If the system is REALLY out of memory this function returns 0, |
803 | * If the system is REALLY out of memory this function returns %0, |
804 | * otherwise 1. |
804 | * otherwise %1. |
805 | */ |
805 | */ |
806 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
806 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
807 | { |
807 | { |
808 | /* allocate idr_layers */ |
808 | /* allocate idr_layers */ |
809 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
809 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
810 | return 0; |
810 | return 0; |
811 | 811 | ||
812 | /* allocate free_bitmap */ |
812 | /* allocate free_bitmap */ |
813 | if (!ida->free_bitmap) { |
813 | if (!ida->free_bitmap) { |
814 | struct ida_bitmap *bitmap; |
814 | struct ida_bitmap *bitmap; |
815 | 815 | ||
816 | bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask); |
816 | bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask); |
817 | if (!bitmap) |
817 | if (!bitmap) |
818 | return 0; |
818 | return 0; |
819 | 819 | ||
820 | free_bitmap(ida, bitmap); |
820 | free_bitmap(ida, bitmap); |
821 | } |
821 | } |
822 | 822 | ||
823 | return 1; |
823 | return 1; |
824 | } |
824 | } |
825 | EXPORT_SYMBOL(ida_pre_get); |
825 | EXPORT_SYMBOL(ida_pre_get); |
826 | 826 | ||
827 | /** |
827 | /** |
828 | * ida_get_new_above - allocate new ID above or equal to a start id |
828 | * ida_get_new_above - allocate new ID above or equal to a start id |
829 | * @ida: ida handle |
829 | * @ida: ida handle |
830 | * @staring_id: id to start search at |
830 | * @starting_id: id to start search at |
831 | * @p_id: pointer to the allocated handle |
831 | * @p_id: pointer to the allocated handle |
832 | * |
832 | * |
833 | * Allocate new ID above or equal to @ida. It should be called with |
833 | * Allocate new ID above or equal to @starting_id. It should be called |
834 | * any required locks. |
834 | * with any required locks. |
835 | * |
835 | * |
836 | * If memory is required, it will return -EAGAIN, you should unlock |
836 | * If memory is required, it will return %-EAGAIN, you should unlock |
837 | * and go back to the ida_pre_get() call. If the ida is full, it will |
837 | * and go back to the ida_pre_get() call. If the ida is full, it will |
838 | * return -ENOSPC. |
838 | * return %-ENOSPC. |
839 | * |
839 | * |
840 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. |
840 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
841 | */ |
841 | */ |
842 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
842 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
843 | { |
843 | { |
844 | struct idr_layer *pa[MAX_LEVEL]; |
844 | struct idr_layer *pa[MAX_LEVEL]; |
845 | struct ida_bitmap *bitmap; |
845 | struct ida_bitmap *bitmap; |
846 | unsigned long flags; |
846 | unsigned long flags; |
847 | int idr_id = starting_id / IDA_BITMAP_BITS; |
847 | int idr_id = starting_id / IDA_BITMAP_BITS; |
848 | int offset = starting_id % IDA_BITMAP_BITS; |
848 | int offset = starting_id % IDA_BITMAP_BITS; |
849 | int t, id; |
849 | int t, id; |
850 | 850 | ||
851 | restart: |
851 | restart: |
852 | /* get vacant slot */ |
852 | /* get vacant slot */ |
853 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
853 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
854 | if (t < 0) |
854 | if (t < 0) |
855 | return _idr_rc_to_errno(t); |
855 | return _idr_rc_to_errno(t); |
856 | 856 | ||
857 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
857 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
858 | return -ENOSPC; |
858 | return -ENOSPC; |
859 | 859 | ||
860 | if (t != idr_id) |
860 | if (t != idr_id) |
861 | offset = 0; |
861 | offset = 0; |
862 | idr_id = t; |
862 | idr_id = t; |
863 | 863 | ||
864 | /* if bitmap isn't there, create a new one */ |
864 | /* if bitmap isn't there, create a new one */ |
865 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
865 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
866 | if (!bitmap) { |
866 | if (!bitmap) { |
867 | spin_lock_irqsave(&ida->idr.lock, flags); |
867 | spin_lock_irqsave(&ida->idr.lock, flags); |
868 | bitmap = ida->free_bitmap; |
868 | bitmap = ida->free_bitmap; |
869 | ida->free_bitmap = NULL; |
869 | ida->free_bitmap = NULL; |
870 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
870 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
871 | 871 | ||
872 | if (!bitmap) |
872 | if (!bitmap) |
873 | return -EAGAIN; |
873 | return -EAGAIN; |
874 | 874 | ||
875 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
875 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
876 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
876 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
877 | (void *)bitmap); |
877 | (void *)bitmap); |
878 | pa[0]->count++; |
878 | pa[0]->count++; |
879 | } |
879 | } |
880 | 880 | ||
881 | /* lookup for empty slot */ |
881 | /* lookup for empty slot */ |
882 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
882 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
883 | if (t == IDA_BITMAP_BITS) { |
883 | if (t == IDA_BITMAP_BITS) { |
884 | /* no empty slot after offset, continue to the next chunk */ |
884 | /* no empty slot after offset, continue to the next chunk */ |
885 | idr_id++; |
885 | idr_id++; |
886 | offset = 0; |
886 | offset = 0; |
887 | goto restart; |
887 | goto restart; |
888 | } |
888 | } |
889 | 889 | ||
890 | id = idr_id * IDA_BITMAP_BITS + t; |
890 | id = idr_id * IDA_BITMAP_BITS + t; |
891 | if (id >= MAX_ID_BIT) |
891 | if (id >= MAX_ID_BIT) |
892 | return -ENOSPC; |
892 | return -ENOSPC; |
893 | 893 | ||
894 | __set_bit(t, bitmap->bitmap); |
894 | __set_bit(t, bitmap->bitmap); |
895 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
895 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
896 | idr_mark_full(pa, idr_id); |
896 | idr_mark_full(pa, idr_id); |
897 | 897 | ||
898 | *p_id = id; |
898 | *p_id = id; |
899 | 899 | ||
900 | /* Each leaf node can handle nearly a thousand slots and the |
900 | /* Each leaf node can handle nearly a thousand slots and the |
901 | * whole idea of ida is to have small memory foot print. |
901 | * whole idea of ida is to have small memory foot print. |
902 | * Throw away extra resources one by one after each successful |
902 | * Throw away extra resources one by one after each successful |
903 | * allocation. |
903 | * allocation. |
904 | */ |
904 | */ |
905 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
905 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
906 | struct idr_layer *p = get_from_free_list(&ida->idr); |
906 | struct idr_layer *p = get_from_free_list(&ida->idr); |
907 | if (p) |
907 | if (p) |
908 | kmem_cache_free(idr_layer_cache, p); |
908 | kmem_cache_free(idr_layer_cache, p); |
909 | } |
909 | } |
910 | 910 | ||
911 | return 0; |
911 | return 0; |
912 | } |
912 | } |
913 | EXPORT_SYMBOL(ida_get_new_above); |
913 | EXPORT_SYMBOL(ida_get_new_above); |
914 | 914 | ||
915 | /** |
915 | /** |
916 | * ida_get_new - allocate new ID |
916 | * ida_get_new - allocate new ID |
917 | * @ida: idr handle |
917 | * @ida: idr handle |
918 | * @p_id: pointer to the allocated handle |
918 | * @p_id: pointer to the allocated handle |
919 | * |
919 | * |
920 | * Allocate new ID. It should be called with any required locks. |
920 | * Allocate new ID. It should be called with any required locks. |
921 | * |
921 | * |
922 | * If memory is required, it will return -EAGAIN, you should unlock |
922 | * If memory is required, it will return %-EAGAIN, you should unlock |
923 | * and go back to the idr_pre_get() call. If the idr is full, it will |
923 | * and go back to the idr_pre_get() call. If the idr is full, it will |
924 | * return -ENOSPC. |
924 | * return %-ENOSPC. |
925 | * |
925 | * |
926 | * @id returns a value in the range 0 ... 0x7fffffff. |
926 | * @p_id returns a value in the range %0 ... %0x7fffffff. |
927 | */ |
927 | */ |
928 | int ida_get_new(struct ida *ida, int *p_id) |
928 | int ida_get_new(struct ida *ida, int *p_id) |
929 | { |
929 | { |
930 | return ida_get_new_above(ida, 0, p_id); |
930 | return ida_get_new_above(ida, 0, p_id); |
931 | } |
931 | } |
932 | EXPORT_SYMBOL(ida_get_new); |
932 | EXPORT_SYMBOL(ida_get_new); |
933 | 933 | ||
934 | /** |
934 | /** |
935 | * ida_remove - remove the given ID |
935 | * ida_remove - remove the given ID |
936 | * @ida: ida handle |
936 | * @ida: ida handle |
937 | * @id: ID to free |
937 | * @id: ID to free |
938 | */ |
938 | */ |
939 | void ida_remove(struct ida *ida, int id) |
939 | void ida_remove(struct ida *ida, int id) |
940 | { |
940 | { |
941 | struct idr_layer *p = ida->idr.top; |
941 | struct idr_layer *p = ida->idr.top; |
942 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
942 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
943 | int idr_id = id / IDA_BITMAP_BITS; |
943 | int idr_id = id / IDA_BITMAP_BITS; |
944 | int offset = id % IDA_BITMAP_BITS; |
944 | int offset = id % IDA_BITMAP_BITS; |
945 | int n; |
945 | int n; |
946 | struct ida_bitmap *bitmap; |
946 | struct ida_bitmap *bitmap; |
947 | 947 | ||
948 | /* clear full bits while looking up the leaf idr_layer */ |
948 | /* clear full bits while looking up the leaf idr_layer */ |
949 | while ((shift > 0) && p) { |
949 | while ((shift > 0) && p) { |
950 | n = (idr_id >> shift) & IDR_MASK; |
950 | n = (idr_id >> shift) & IDR_MASK; |
951 | __clear_bit(n, &p->bitmap); |
951 | __clear_bit(n, &p->bitmap); |
952 | p = p->ary[n]; |
952 | p = p->ary[n]; |
953 | shift -= IDR_BITS; |
953 | shift -= IDR_BITS; |
954 | } |
954 | } |
955 | 955 | ||
956 | if (p == NULL) |
956 | if (p == NULL) |
957 | goto err; |
957 | goto err; |
958 | 958 | ||
959 | n = idr_id & IDR_MASK; |
959 | n = idr_id & IDR_MASK; |
960 | __clear_bit(n, &p->bitmap); |
960 | __clear_bit(n, &p->bitmap); |
961 | 961 | ||
962 | bitmap = (void *)p->ary[n]; |
962 | bitmap = (void *)p->ary[n]; |
963 | if (!test_bit(offset, bitmap->bitmap)) |
963 | if (!test_bit(offset, bitmap->bitmap)) |
964 | goto err; |
964 | goto err; |
965 | 965 | ||
966 | /* update bitmap and remove it if empty */ |
966 | /* update bitmap and remove it if empty */ |
967 | __clear_bit(offset, bitmap->bitmap); |
967 | __clear_bit(offset, bitmap->bitmap); |
968 | if (--bitmap->nr_busy == 0) { |
968 | if (--bitmap->nr_busy == 0) { |
969 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ |
969 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ |
970 | idr_remove(&ida->idr, idr_id); |
970 | idr_remove(&ida->idr, idr_id); |
971 | free_bitmap(ida, bitmap); |
971 | free_bitmap(ida, bitmap); |
972 | } |
972 | } |
973 | 973 | ||
974 | return; |
974 | return; |
975 | 975 | ||
976 | err: |
976 | err: |
977 | printk(KERN_WARNING |
977 | printk(KERN_WARNING |
978 | "ida_remove called for id=%d which is not allocated.\n", id); |
978 | "ida_remove called for id=%d which is not allocated.\n", id); |
979 | } |
979 | } |
980 | EXPORT_SYMBOL(ida_remove); |
980 | EXPORT_SYMBOL(ida_remove); |
981 | 981 | ||
982 | /** |
982 | /** |
983 | * ida_destroy - release all cached layers within an ida tree |
983 | * ida_destroy - release all cached layers within an ida tree |
984 | * ida: ida handle |
984 | * @ida: ida handle |
985 | */ |
985 | */ |
986 | void ida_destroy(struct ida *ida) |
986 | void ida_destroy(struct ida *ida) |
987 | { |
987 | { |
988 | idr_destroy(&ida->idr); |
988 | idr_destroy(&ida->idr); |
989 | kfree(ida->free_bitmap); |
989 | kfree(ida->free_bitmap); |
990 | } |
990 | } |
991 | EXPORT_SYMBOL(ida_destroy); |
991 | EXPORT_SYMBOL(ida_destroy); |
992 | 992 | ||
993 | /** |
993 | /** |
994 | * ida_init - initialize ida handle |
994 | * ida_init - initialize ida handle |
995 | * @ida: ida handle |
995 | * @ida: ida handle |
996 | * |
996 | * |
997 | * This function is use to set up the handle (@ida) that you will pass |
997 | * This function is use to set up the handle (@ida) that you will pass |
998 | * to the rest of the functions. |
998 | * to the rest of the functions. |
999 | */ |
999 | */ |
1000 | void ida_init(struct ida *ida) |
1000 | void ida_init(struct ida *ida) |
1001 | { |
1001 | { |
1002 | memset(ida, 0, sizeof(struct ida)); |
1002 | memset(ida, 0, sizeof(struct ida)); |
1003 | idr_init(&ida->idr); |
1003 | idr_init(&ida->idr); |
1004 | 1004 | ||
1005 | } |
1005 | } |
1006 | EXPORT_SYMBOL(ida_init); |
1006 | EXPORT_SYMBOL(ida_init); |
1007 | 1007 | ||
1008 | 1008 | ||
1009 | #endif>><>>><>>><>>><>>><>><>>><>>><>>>><>>>><>><>>> |
1009 | #endif>><>>><>>><>>><>>><>><>>><>>><>>>><>>>><>><>>> |