Rev 1403 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1403 | Rev 1404 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
4 | * Distributed under the GNU GPL license version 2. |
4 | * Distributed under the GNU GPL license version 2. |
5 | * |
5 | * |
6 | * Modified by George Anzinger to reuse immediately and to use |
6 | * Modified by George Anzinger to reuse immediately and to use |
7 | * find bit instructions. Also removed _irq on spinlocks. |
7 | * find bit instructions. Also removed _irq on spinlocks. |
8 | * |
8 | * |
9 | * Modified by Nadia Derbey to make it RCU safe. |
9 | * Modified by Nadia Derbey to make it RCU safe. |
10 | * |
10 | * |
11 | * Small id to pointer translation service. |
11 | * Small id to pointer translation service. |
12 | * |
12 | * |
13 | * It uses a radix tree like structure as a sparse array indexed |
13 | * It uses a radix tree like structure as a sparse array indexed |
14 | * by the id to obtain the pointer. The bitmap makes allocating |
14 | * by the id to obtain the pointer. The bitmap makes allocating |
15 | * a new id quick. |
15 | * a new id quick. |
16 | * |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a |
17 | * You call it to allocate an id (an int) an associate with that id a |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
19 | * id to a user for him to pass back at a later time. You then pass |
19 | * id to a user for him to pass back at a later time. You then pass |
20 | * that id to this code and it returns your pointer. |
20 | * that id to this code and it returns your pointer. |
21 | 21 | ||
22 | * You can release ids at any time. When all ids are released, most of |
22 | * You can release ids at any time. When all ids are released, most of |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
24 | * don't need to go to the memory "store" during an id allocate, just |
24 | * don't need to go to the memory "store" during an id allocate, just |
25 | * so you don't need to be too concerned about locking and conflicts |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. |
26 | * with the slab allocator. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include |
29 | #include |
30 | 30 | ||
31 | #include |
31 | #include |
32 | //#include |
32 | //#include |
33 | #include "drm.h" |
33 | #include "drm.h" |
34 | #include "drmP.h" |
34 | #include "drmP.h" |
35 | #include "drm_crtc.h" |
35 | #include "drm_crtc.h" |
- | 36 | ||
- | 37 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
|
- | 38 | { |
|
- | 39 | const unsigned long *p = addr; |
|
- | 40 | unsigned long result = 0; |
|
- | 41 | unsigned long tmp; |
|
- | 42 | ||
- | 43 | while (size & ~(BITS_PER_LONG-1)) { |
|
- | 44 | if ((tmp = *(p++))) |
|
- | 45 | goto found; |
|
- | 46 | result += BITS_PER_LONG; |
|
- | 47 | size -= BITS_PER_LONG; |
|
- | 48 | } |
|
- | 49 | if (!size) |
|
- | 50 | return result; |
|
- | 51 | ||
- | 52 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
|
- | 53 | if (tmp == 0UL) /* Are any bits set? */ |
|
- | 54 | return result + size; /* Nope. */ |
|
- | 55 | found: |
|
- | 56 | return result + __ffs(tmp); |
|
- | 57 | } |
|
- | 58 | ||
- | 59 | int find_next_bit(const unsigned long *addr, int size, int offset) |
|
- | 60 | { |
|
- | 61 | const unsigned long *p = addr + (offset >> 5); |
|
- | 62 | int set = 0, bit = offset & 31, res; |
|
- | 63 | ||
- | 64 | if (bit) |
|
- | 65 | { |
|
- | 66 | /* |
|
- | 67 | * Look for nonzero in the first 32 bits: |
|
- | 68 | */ |
|
- | 69 | __asm__("bsfl %1,%0\n\t" |
|
- | 70 | "jne 1f\n\t" |
|
- | 71 | "movl $32, %0\n" |
|
- | 72 | "1:" |
|
- | 73 | : "=r" (set) |
|
- | 74 | : "r" (*p >> bit)); |
|
- | 75 | if (set < (32 - bit)) |
|
- | 76 | return set + offset; |
|
- | 77 | set = 32 - bit; |
|
- | 78 | p++; |
|
- | 79 | } |
|
- | 80 | /* |
|
- | 81 | * No set bit yet, search remaining full words for a bit |
|
- | 82 | */ |
|
- | 83 | res = find_first_bit (p, size - 32 * (p - addr)); |
|
- | 84 | return (offset + set + res); |
|
36 | 85 | } |
|
37 | 86 | ||
38 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
87 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
39 | 88 | ||
40 | #define rcu_dereference(p) ({ \ |
89 | #define rcu_dereference(p) ({ \ |
41 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
90 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
42 | (_________p1); \ |
91 | (_________p1); \ |
43 | }) |
92 | }) |
44 | 93 | ||
45 | #define rcu_assign_pointer(p, v) \ |
94 | #define rcu_assign_pointer(p, v) \ |
46 | ({ \ |
95 | ({ \ |
47 | if (!__builtin_constant_p(v) || \ |
96 | if (!__builtin_constant_p(v) || \ |
48 | ((v) != NULL)) \ |
97 | ((v) != NULL)) \ |
49 | (p) = (v); \ |
98 | (p) = (v); \ |
50 | }) |
99 | }) |
51 | 100 | ||
52 | //static struct kmem_cache *idr_layer_cache; |
101 | //static struct kmem_cache *idr_layer_cache; |
53 | 102 | ||
54 | 103 | ||
55 | 104 | ||
56 | 105 | ||
57 | 106 | ||
58 | static struct idr_layer *get_from_free_list(struct idr *idp) |
107 | static struct idr_layer *get_from_free_list(struct idr *idp) |
59 | { |
108 | { |
60 | struct idr_layer *p; |
109 | struct idr_layer *p; |
61 | unsigned long flags; |
110 | unsigned long flags; |
62 | 111 | ||
63 | // spin_lock_irqsave(&idp->lock, flags); |
112 | // spin_lock_irqsave(&idp->lock, flags); |
64 | if ((p = idp->id_free)) { |
113 | if ((p = idp->id_free)) { |
65 | idp->id_free = p->ary[0]; |
114 | idp->id_free = p->ary[0]; |
66 | idp->id_free_cnt--; |
115 | idp->id_free_cnt--; |
67 | p->ary[0] = NULL; |
116 | p->ary[0] = NULL; |
68 | } |
117 | } |
69 | // spin_unlock_irqrestore(&idp->lock, flags); |
118 | // spin_unlock_irqrestore(&idp->lock, flags); |
70 | return(p); |
119 | return(p); |
71 | } |
120 | } |
72 | 121 | ||
73 | 122 | ||
74 | static void idr_layer_rcu_free(struct rcu_head *head) |
123 | static void idr_layer_rcu_free(struct rcu_head *head) |
75 | { |
124 | { |
76 | struct idr_layer *layer; |
125 | struct idr_layer *layer; |
77 | 126 | ||
78 | layer = container_of(head, struct idr_layer, rcu_head); |
127 | layer = container_of(head, struct idr_layer, rcu_head); |
79 | kfree(layer); |
128 | kfree(layer); |
80 | } |
129 | } |
81 | 130 | ||
82 | 131 | ||
83 | 132 | ||
84 | static inline void free_layer(struct idr_layer *p) |
133 | static inline void free_layer(struct idr_layer *p) |
85 | { |
134 | { |
86 | kfree(p); |
135 | kfree(p); |
87 | } |
136 | } |
88 | 137 | ||
89 | 138 | ||
90 | /* only called when idp->lock is held */ |
139 | /* only called when idp->lock is held */ |
91 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
140 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
92 | { |
141 | { |
93 | p->ary[0] = idp->id_free; |
142 | p->ary[0] = idp->id_free; |
94 | idp->id_free = p; |
143 | idp->id_free = p; |
95 | idp->id_free_cnt++; |
144 | idp->id_free_cnt++; |
96 | } |
145 | } |
97 | 146 | ||
98 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
147 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
99 | { |
148 | { |
100 | unsigned long flags; |
149 | unsigned long flags; |
101 | 150 | ||
102 | /* |
151 | /* |
103 | * Depends on the return element being zeroed. |
152 | * Depends on the return element being zeroed. |
104 | */ |
153 | */ |
105 | // spin_lock_irqsave(&idp->lock, flags); |
154 | // spin_lock_irqsave(&idp->lock, flags); |
106 | __move_to_free_list(idp, p); |
155 | __move_to_free_list(idp, p); |
107 | // spin_unlock_irqrestore(&idp->lock, flags); |
156 | // spin_unlock_irqrestore(&idp->lock, flags); |
108 | } |
157 | } |
109 | 158 | ||
110 | static void idr_mark_full(struct idr_layer **pa, int id) |
159 | static void idr_mark_full(struct idr_layer **pa, int id) |
111 | { |
160 | { |
112 | struct idr_layer *p = pa[0]; |
161 | struct idr_layer *p = pa[0]; |
113 | int l = 0; |
162 | int l = 0; |
114 | 163 | ||
115 | __set_bit(id & IDR_MASK, &p->bitmap); |
164 | __set_bit(id & IDR_MASK, &p->bitmap); |
116 | /* |
165 | /* |
117 | * If this layer is full mark the bit in the layer above to |
166 | * If this layer is full mark the bit in the layer above to |
118 | * show that this part of the radix tree is full. This may |
167 | * show that this part of the radix tree is full. This may |
119 | * complete the layer above and require walking up the radix |
168 | * complete the layer above and require walking up the radix |
120 | * tree. |
169 | * tree. |
121 | */ |
170 | */ |
122 | while (p->bitmap == IDR_FULL) { |
171 | while (p->bitmap == IDR_FULL) { |
123 | if (!(p = pa[++l])) |
172 | if (!(p = pa[++l])) |
124 | break; |
173 | break; |
125 | id = id >> IDR_BITS; |
174 | id = id >> IDR_BITS; |
126 | __set_bit((id & IDR_MASK), &p->bitmap); |
175 | __set_bit((id & IDR_MASK), &p->bitmap); |
127 | } |
176 | } |
128 | } |
177 | } |
129 | 178 | ||
130 | 179 | ||
131 | 180 | ||
132 | /** |
181 | /** |
133 | * idr_pre_get - reserver resources for idr allocation |
182 | * idr_pre_get - reserver resources for idr allocation |
134 | * @idp: idr handle |
183 | * @idp: idr handle |
135 | * @gfp_mask: memory allocation flags |
184 | * @gfp_mask: memory allocation flags |
136 | * |
185 | * |
137 | * This function should be called prior to locking and calling the |
186 | * This function should be called prior to locking and calling the |
138 | * idr_get_new* functions. It preallocates enough memory to satisfy |
187 | * idr_get_new* functions. It preallocates enough memory to satisfy |
139 | * the worst possible allocation. |
188 | * the worst possible allocation. |
140 | * |
189 | * |
141 | * If the system is REALLY out of memory this function returns 0, |
190 | * If the system is REALLY out of memory this function returns 0, |
142 | * otherwise 1. |
191 | * otherwise 1. |
143 | */ |
192 | */ |
144 | int idr_pre_get(struct idr *idp, u32_t gfp_mask) |
193 | int idr_pre_get(struct idr *idp, u32_t gfp_mask) |
145 | { |
194 | { |
146 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
195 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
147 | struct idr_layer *new; |
196 | struct idr_layer *new; |
148 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
197 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
149 | if (new == NULL) |
198 | if (new == NULL) |
150 | return (0); |
199 | return (0); |
151 | move_to_free_list(idp, new); |
200 | move_to_free_list(idp, new); |
152 | } |
201 | } |
153 | return 1; |
202 | return 1; |
154 | } |
203 | } |
155 | EXPORT_SYMBOL(idr_pre_get); |
204 | EXPORT_SYMBOL(idr_pre_get); |
156 | 205 | ||
157 | 206 | ||
158 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
207 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
159 | { |
208 | { |
160 | int n, m, sh; |
209 | int n, m, sh; |
161 | struct idr_layer *p, *new; |
210 | struct idr_layer *p, *new; |
162 | int l, id, oid; |
211 | int l, id, oid; |
163 | unsigned long bm; |
212 | unsigned long bm; |
164 | 213 | ||
165 | id = *starting_id; |
214 | id = *starting_id; |
166 | restart: |
215 | restart: |
167 | p = idp->top; |
216 | p = idp->top; |
168 | l = idp->layers; |
217 | l = idp->layers; |
169 | pa[l--] = NULL; |
218 | pa[l--] = NULL; |
170 | while (1) { |
219 | while (1) { |
171 | /* |
220 | /* |
172 | * We run around this while until we reach the leaf node... |
221 | * We run around this while until we reach the leaf node... |
173 | */ |
222 | */ |
174 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
223 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
175 | bm = ~p->bitmap; |
224 | bm = ~p->bitmap; |
176 | m = find_next_bit(&bm, IDR_SIZE, n); |
225 | m = find_next_bit(&bm, IDR_SIZE, n); |
177 | if (m == IDR_SIZE) { |
226 | if (m == IDR_SIZE) { |
178 | /* no space available go back to previous layer. */ |
227 | /* no space available go back to previous layer. */ |
179 | l++; |
228 | l++; |
180 | oid = id; |
229 | oid = id; |
181 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
230 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
182 | 231 | ||
183 | /* if already at the top layer, we need to grow */ |
232 | /* if already at the top layer, we need to grow */ |
184 | if (!(p = pa[l])) { |
233 | if (!(p = pa[l])) { |
185 | *starting_id = id; |
234 | *starting_id = id; |
186 | return IDR_NEED_TO_GROW; |
235 | return IDR_NEED_TO_GROW; |
187 | } |
236 | } |
188 | 237 | ||
189 | /* If we need to go up one layer, continue the |
238 | /* If we need to go up one layer, continue the |
190 | * loop; otherwise, restart from the top. |
239 | * loop; otherwise, restart from the top. |
191 | */ |
240 | */ |
192 | sh = IDR_BITS * (l + 1); |
241 | sh = IDR_BITS * (l + 1); |
193 | if (oid >> sh == id >> sh) |
242 | if (oid >> sh == id >> sh) |
194 | continue; |
243 | continue; |
195 | else |
244 | else |
196 | goto restart; |
245 | goto restart; |
197 | } |
246 | } |
198 | if (m != n) { |
247 | if (m != n) { |
199 | sh = IDR_BITS*l; |
248 | sh = IDR_BITS*l; |
200 | id = ((id >> sh) ^ n ^ m) << sh; |
249 | id = ((id >> sh) ^ n ^ m) << sh; |
201 | } |
250 | } |
202 | if ((id >= MAX_ID_BIT) || (id < 0)) |
251 | if ((id >= MAX_ID_BIT) || (id < 0)) |
203 | return IDR_NOMORE_SPACE; |
252 | return IDR_NOMORE_SPACE; |
204 | if (l == 0) |
253 | if (l == 0) |
205 | break; |
254 | break; |
206 | /* |
255 | /* |
207 | * Create the layer below if it is missing. |
256 | * Create the layer below if it is missing. |
208 | */ |
257 | */ |
209 | if (!p->ary[m]) { |
258 | if (!p->ary[m]) { |
210 | new = get_from_free_list(idp); |
259 | new = get_from_free_list(idp); |
211 | if (!new) |
260 | if (!new) |
212 | return -1; |
261 | return -1; |
213 | new->layer = l-1; |
262 | new->layer = l-1; |
214 | rcu_assign_pointer(p->ary[m], new); |
263 | rcu_assign_pointer(p->ary[m], new); |
215 | p->count++; |
264 | p->count++; |
216 | } |
265 | } |
217 | pa[l--] = p; |
266 | pa[l--] = p; |
218 | p = p->ary[m]; |
267 | p = p->ary[m]; |
219 | } |
268 | } |
220 | 269 | ||
221 | pa[l] = p; |
270 | pa[l] = p; |
222 | return id; |
271 | return id; |
223 | } |
272 | } |
224 | 273 | ||
225 | 274 | ||
226 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
275 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
227 | struct idr_layer **pa) |
276 | struct idr_layer **pa) |
228 | { |
277 | { |
229 | struct idr_layer *p, *new; |
278 | struct idr_layer *p, *new; |
230 | int layers, v, id; |
279 | int layers, v, id; |
231 | unsigned long flags; |
280 | unsigned long flags; |
232 | 281 | ||
233 | id = starting_id; |
282 | id = starting_id; |
234 | build_up: |
283 | build_up: |
235 | p = idp->top; |
284 | p = idp->top; |
236 | layers = idp->layers; |
285 | layers = idp->layers; |
237 | if (unlikely(!p)) { |
286 | if (unlikely(!p)) { |
238 | if (!(p = get_from_free_list(idp))) |
287 | if (!(p = get_from_free_list(idp))) |
239 | return -1; |
288 | return -1; |
240 | p->layer = 0; |
289 | p->layer = 0; |
241 | layers = 1; |
290 | layers = 1; |
242 | } |
291 | } |
243 | /* |
292 | /* |
244 | * Add a new layer to the top of the tree if the requested |
293 | * Add a new layer to the top of the tree if the requested |
245 | * id is larger than the currently allocated space. |
294 | * id is larger than the currently allocated space. |
246 | */ |
295 | */ |
247 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
296 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
248 | layers++; |
297 | layers++; |
249 | if (!p->count) { |
298 | if (!p->count) { |
250 | /* special case: if the tree is currently empty, |
299 | /* special case: if the tree is currently empty, |
251 | * then we grow the tree by moving the top node |
300 | * then we grow the tree by moving the top node |
252 | * upwards. |
301 | * upwards. |
253 | */ |
302 | */ |
254 | p->layer++; |
303 | p->layer++; |
255 | continue; |
304 | continue; |
256 | } |
305 | } |
257 | if (!(new = get_from_free_list(idp))) { |
306 | if (!(new = get_from_free_list(idp))) { |
258 | /* |
307 | /* |
259 | * The allocation failed. If we built part of |
308 | * The allocation failed. If we built part of |
260 | * the structure tear it down. |
309 | * the structure tear it down. |
261 | */ |
310 | */ |
262 | // spin_lock_irqsave(&idp->lock, flags); |
311 | // spin_lock_irqsave(&idp->lock, flags); |
263 | for (new = p; p && p != idp->top; new = p) { |
312 | for (new = p; p && p != idp->top; new = p) { |
264 | p = p->ary[0]; |
313 | p = p->ary[0]; |
265 | new->ary[0] = NULL; |
314 | new->ary[0] = NULL; |
266 | new->bitmap = new->count = 0; |
315 | new->bitmap = new->count = 0; |
267 | __move_to_free_list(idp, new); |
316 | __move_to_free_list(idp, new); |
268 | } |
317 | } |
269 | // spin_unlock_irqrestore(&idp->lock, flags); |
318 | // spin_unlock_irqrestore(&idp->lock, flags); |
270 | return -1; |
319 | return -1; |
271 | } |
320 | } |
272 | new->ary[0] = p; |
321 | new->ary[0] = p; |
273 | new->count = 1; |
322 | new->count = 1; |
274 | new->layer = layers-1; |
323 | new->layer = layers-1; |
275 | if (p->bitmap == IDR_FULL) |
324 | if (p->bitmap == IDR_FULL) |
276 | __set_bit(0, &new->bitmap); |
325 | __set_bit(0, &new->bitmap); |
277 | p = new; |
326 | p = new; |
278 | } |
327 | } |
279 | rcu_assign_pointer(idp->top, p); |
328 | rcu_assign_pointer(idp->top, p); |
280 | idp->layers = layers; |
329 | idp->layers = layers; |
281 | v = sub_alloc(idp, &id, pa); |
330 | v = sub_alloc(idp, &id, pa); |
282 | if (v == IDR_NEED_TO_GROW) |
331 | if (v == IDR_NEED_TO_GROW) |
283 | goto build_up; |
332 | goto build_up; |
284 | return(v); |
333 | return(v); |
285 | } |
334 | } |
286 | 335 | ||
287 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
336 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
288 | { |
337 | { |
289 | struct idr_layer *pa[MAX_LEVEL]; |
338 | struct idr_layer *pa[MAX_LEVEL]; |
290 | int id; |
339 | int id; |
291 | 340 | ||
292 | id = idr_get_empty_slot(idp, starting_id, pa); |
341 | id = idr_get_empty_slot(idp, starting_id, pa); |
293 | if (id >= 0) { |
342 | if (id >= 0) { |
294 | /* |
343 | /* |
295 | * Successfully found an empty slot. Install the user |
344 | * Successfully found an empty slot. Install the user |
296 | * pointer and mark the slot full. |
345 | * pointer and mark the slot full. |
297 | */ |
346 | */ |
298 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
347 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
299 | (struct idr_layer *)ptr); |
348 | (struct idr_layer *)ptr); |
300 | pa[0]->count++; |
349 | pa[0]->count++; |
301 | idr_mark_full(pa, id); |
350 | idr_mark_full(pa, id); |
302 | } |
351 | } |
303 | 352 | ||
304 | return id; |
353 | return id; |
305 | } |
354 | } |
306 | 355 | ||
307 | /** |
356 | /** |
308 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
357 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
309 | * @idp: idr handle |
358 | * @idp: idr handle |
310 | * @ptr: pointer you want associated with the ide |
359 | * @ptr: pointer you want associated with the ide |
311 | * @start_id: id to start search at |
360 | * @start_id: id to start search at |
312 | * @id: pointer to the allocated handle |
361 | * @id: pointer to the allocated handle |
313 | * |
362 | * |
314 | * This is the allocate id function. It should be called with any |
363 | * This is the allocate id function. It should be called with any |
315 | * required locks. |
364 | * required locks. |
316 | * |
365 | * |
317 | * If memory is required, it will return -EAGAIN, you should unlock |
366 | * If memory is required, it will return -EAGAIN, you should unlock |
318 | * and go back to the idr_pre_get() call. If the idr is full, it will |
367 | * and go back to the idr_pre_get() call. If the idr is full, it will |
319 | * return -ENOSPC. |
368 | * return -ENOSPC. |
320 | * |
369 | * |
321 | * @id returns a value in the range @starting_id ... 0x7fffffff |
370 | * @id returns a value in the range @starting_id ... 0x7fffffff |
322 | */ |
371 | */ |
323 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
372 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
324 | { |
373 | { |
325 | int rv; |
374 | int rv; |
326 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
375 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
327 | /* |
376 | /* |
328 | * This is a cheap hack until the IDR code can be fixed to |
377 | * This is a cheap hack until the IDR code can be fixed to |
329 | * return proper error values. |
378 | * return proper error values. |
330 | */ |
379 | */ |
331 | if (rv < 0) |
380 | if (rv < 0) |
332 | { |
381 | { |
333 | dbgprintf("fail\n"); |
382 | dbgprintf("fail\n"); |
334 | return _idr_rc_to_errno(rv); |
383 | return _idr_rc_to_errno(rv); |
335 | }; |
384 | }; |
336 | *id = rv; |
385 | *id = rv; |
337 | return 0; |
386 | return 0; |
338 | } |
387 | } |
339 | EXPORT_SYMBOL(idr_get_new_above); |
388 | EXPORT_SYMBOL(idr_get_new_above); |
340 | 389 | ||
341 | /** |
390 | /** |
342 | * idr_get_new - allocate new idr entry |
391 | * idr_get_new - allocate new idr entry |
343 | * @idp: idr handle |
392 | * @idp: idr handle |
344 | * @ptr: pointer you want associated with the ide |
393 | * @ptr: pointer you want associated with the ide |
345 | * @id: pointer to the allocated handle |
394 | * @id: pointer to the allocated handle |
346 | * |
395 | * |
347 | * This is the allocate id function. It should be called with any |
396 | * This is the allocate id function. It should be called with any |
348 | * required locks. |
397 | * required locks. |
349 | * |
398 | * |
350 | * If memory is required, it will return -EAGAIN, you should unlock |
399 | * If memory is required, it will return -EAGAIN, you should unlock |
351 | * and go back to the idr_pre_get() call. If the idr is full, it will |
400 | * and go back to the idr_pre_get() call. If the idr is full, it will |
352 | * return -ENOSPC. |
401 | * return -ENOSPC. |
353 | * |
402 | * |
354 | * @id returns a value in the range 0 ... 0x7fffffff |
403 | * @id returns a value in the range 0 ... 0x7fffffff |
355 | */ |
404 | */ |
356 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
405 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
357 | { |
406 | { |
358 | int rv; |
407 | int rv; |
359 | 408 | ||
360 | rv = idr_get_new_above_int(idp, ptr, 0); |
409 | rv = idr_get_new_above_int(idp, ptr, 0); |
361 | /* |
410 | /* |
362 | * This is a cheap hack until the IDR code can be fixed to |
411 | * This is a cheap hack until the IDR code can be fixed to |
363 | * return proper error values. |
412 | * return proper error values. |
364 | */ |
413 | */ |
365 | if (rv < 0) |
414 | if (rv < 0) |
366 | return _idr_rc_to_errno(rv); |
415 | return _idr_rc_to_errno(rv); |
367 | *id = rv; |
416 | *id = rv; |
368 | return 0; |
417 | return 0; |
369 | } |
418 | } |
370 | EXPORT_SYMBOL(idr_get_new); |
419 | EXPORT_SYMBOL(idr_get_new); |
371 | 420 | ||
372 | static void idr_remove_warning(int id) |
421 | static void idr_remove_warning(int id) |
373 | { |
422 | { |
374 | printk(KERN_WARNING |
423 | printk(KERN_WARNING |
375 | "idr_remove called for id=%d which is not allocated.\n", id); |
424 | "idr_remove called for id=%d which is not allocated.\n", id); |
376 | // dump_stack(); |
425 | // dump_stack(); |
377 | } |
426 | } |
378 | 427 | ||
379 | static void sub_remove(struct idr *idp, int shift, int id) |
428 | static void sub_remove(struct idr *idp, int shift, int id) |
380 | { |
429 | { |
381 | struct idr_layer *p = idp->top; |
430 | struct idr_layer *p = idp->top; |
382 | struct idr_layer **pa[MAX_LEVEL]; |
431 | struct idr_layer **pa[MAX_LEVEL]; |
383 | struct idr_layer ***paa = &pa[0]; |
432 | struct idr_layer ***paa = &pa[0]; |
384 | struct idr_layer *to_free; |
433 | struct idr_layer *to_free; |
385 | int n; |
434 | int n; |
386 | 435 | ||
387 | *paa = NULL; |
436 | *paa = NULL; |
388 | *++paa = &idp->top; |
437 | *++paa = &idp->top; |
389 | 438 | ||
390 | while ((shift > 0) && p) { |
439 | while ((shift > 0) && p) { |
391 | n = (id >> shift) & IDR_MASK; |
440 | n = (id >> shift) & IDR_MASK; |
392 | __clear_bit(n, &p->bitmap); |
441 | __clear_bit(n, &p->bitmap); |
393 | *++paa = &p->ary[n]; |
442 | *++paa = &p->ary[n]; |
394 | p = p->ary[n]; |
443 | p = p->ary[n]; |
395 | shift -= IDR_BITS; |
444 | shift -= IDR_BITS; |
396 | } |
445 | } |
397 | n = id & IDR_MASK; |
446 | n = id & IDR_MASK; |
398 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
447 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
399 | __clear_bit(n, &p->bitmap); |
448 | __clear_bit(n, &p->bitmap); |
400 | rcu_assign_pointer(p->ary[n], NULL); |
449 | rcu_assign_pointer(p->ary[n], NULL); |
401 | to_free = NULL; |
450 | to_free = NULL; |
402 | while(*paa && ! --((**paa)->count)){ |
451 | while(*paa && ! --((**paa)->count)){ |
403 | if (to_free) |
452 | if (to_free) |
404 | free_layer(to_free); |
453 | free_layer(to_free); |
405 | to_free = **paa; |
454 | to_free = **paa; |
406 | **paa-- = NULL; |
455 | **paa-- = NULL; |
407 | } |
456 | } |
408 | if (!*paa) |
457 | if (!*paa) |
409 | idp->layers = 0; |
458 | idp->layers = 0; |
410 | if (to_free) |
459 | if (to_free) |
411 | free_layer(to_free); |
460 | free_layer(to_free); |
412 | } else |
461 | } else |
413 | idr_remove_warning(id); |
462 | idr_remove_warning(id); |
414 | } |
463 | } |
415 | 464 | ||
416 | /** |
465 | /** |
417 | * idr_remove - remove the given id and free it's slot |
466 | * idr_remove - remove the given id and free it's slot |
418 | * @idp: idr handle |
467 | * @idp: idr handle |
419 | * @id: unique key |
468 | * @id: unique key |
420 | */ |
469 | */ |
421 | void idr_remove(struct idr *idp, int id) |
470 | void idr_remove(struct idr *idp, int id) |
422 | { |
471 | { |
423 | struct idr_layer *p; |
472 | struct idr_layer *p; |
424 | struct idr_layer *to_free; |
473 | struct idr_layer *to_free; |
425 | 474 | ||
426 | /* Mask off upper bits we don't use for the search. */ |
475 | /* Mask off upper bits we don't use for the search. */ |
427 | id &= MAX_ID_MASK; |
476 | id &= MAX_ID_MASK; |
428 | 477 | ||
429 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
478 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
430 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
479 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
431 | idp->top->ary[0]) { |
480 | idp->top->ary[0]) { |
432 | /* |
481 | /* |
433 | * Single child at leftmost slot: we can shrink the tree. |
482 | * Single child at leftmost slot: we can shrink the tree. |
434 | * This level is not needed anymore since when layers are |
483 | * This level is not needed anymore since when layers are |
435 | * inserted, they are inserted at the top of the existing |
484 | * inserted, they are inserted at the top of the existing |
436 | * tree. |
485 | * tree. |
437 | */ |
486 | */ |
438 | to_free = idp->top; |
487 | to_free = idp->top; |
439 | p = idp->top->ary[0]; |
488 | p = idp->top->ary[0]; |
440 | rcu_assign_pointer(idp->top, p); |
489 | rcu_assign_pointer(idp->top, p); |
441 | --idp->layers; |
490 | --idp->layers; |
442 | to_free->bitmap = to_free->count = 0; |
491 | to_free->bitmap = to_free->count = 0; |
443 | free_layer(to_free); |
492 | free_layer(to_free); |
444 | } |
493 | } |
445 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
494 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
446 | p = get_from_free_list(idp); |
495 | p = get_from_free_list(idp); |
447 | /* |
496 | /* |
448 | * Note: we don't call the rcu callback here, since the only |
497 | * Note: we don't call the rcu callback here, since the only |
449 | * layers that fall into the freelist are those that have been |
498 | * layers that fall into the freelist are those that have been |
450 | * preallocated. |
499 | * preallocated. |
451 | */ |
500 | */ |
452 | kfree(p); |
501 | kfree(p); |
453 | } |
502 | } |
454 | return; |
503 | return; |
455 | } |
504 | } |
456 | EXPORT_SYMBOL(idr_remove); |
505 | EXPORT_SYMBOL(idr_remove); |
457 | 506 | ||
458 | 507 | ||
459 | /** |
508 | /** |
460 | * idr_remove_all - remove all ids from the given idr tree |
509 | * idr_remove_all - remove all ids from the given idr tree |
461 | * @idp: idr handle |
510 | * @idp: idr handle |
462 | * |
511 | * |
463 | * idr_destroy() only frees up unused, cached idp_layers, but this |
512 | * idr_destroy() only frees up unused, cached idp_layers, but this |
464 | * function will remove all id mappings and leave all idp_layers |
513 | * function will remove all id mappings and leave all idp_layers |
465 | * unused. |
514 | * unused. |
466 | * |
515 | * |
467 | * A typical clean-up sequence for objects stored in an idr tree, will |
516 | * A typical clean-up sequence for objects stored in an idr tree, will |
468 | * use idr_for_each() to free all objects, if necessay, then |
517 | * use idr_for_each() to free all objects, if necessay, then |
469 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
518 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
470 | * up the cached idr_layers. |
519 | * up the cached idr_layers. |
471 | */ |
520 | */ |
472 | void idr_remove_all(struct idr *idp) |
521 | void idr_remove_all(struct idr *idp) |
473 | { |
522 | { |
474 | int n, id, max; |
523 | int n, id, max; |
475 | struct idr_layer *p; |
524 | struct idr_layer *p; |
476 | struct idr_layer *pa[MAX_LEVEL]; |
525 | struct idr_layer *pa[MAX_LEVEL]; |
477 | struct idr_layer **paa = &pa[0]; |
526 | struct idr_layer **paa = &pa[0]; |
478 | 527 | ||
479 | n = idp->layers * IDR_BITS; |
528 | n = idp->layers * IDR_BITS; |
480 | p = idp->top; |
529 | p = idp->top; |
481 | rcu_assign_pointer(idp->top, NULL); |
530 | rcu_assign_pointer(idp->top, NULL); |
482 | max = 1 << n; |
531 | max = 1 << n; |
483 | 532 | ||
484 | id = 0; |
533 | id = 0; |
485 | while (id < max) { |
534 | while (id < max) { |
486 | while (n > IDR_BITS && p) { |
535 | while (n > IDR_BITS && p) { |
487 | n -= IDR_BITS; |
536 | n -= IDR_BITS; |
488 | *paa++ = p; |
537 | *paa++ = p; |
489 | p = p->ary[(id >> n) & IDR_MASK]; |
538 | p = p->ary[(id >> n) & IDR_MASK]; |
490 | } |
539 | } |
491 | 540 | ||
492 | id += 1 << n; |
541 | id += 1 << n; |
493 | while (n < fls(id)) { |
542 | while (n < fls(id)) { |
494 | if (p) |
543 | if (p) |
495 | free_layer(p); |
544 | free_layer(p); |
496 | n += IDR_BITS; |
545 | n += IDR_BITS; |
497 | p = *--paa; |
546 | p = *--paa; |
498 | } |
547 | } |
499 | } |
548 | } |
500 | idp->layers = 0; |
549 | idp->layers = 0; |
501 | } |
550 | } |
502 | EXPORT_SYMBOL(idr_remove_all); |
551 | EXPORT_SYMBOL(idr_remove_all); |
503 | 552 | ||
504 | /** |
553 | /** |
505 | * idr_destroy - release all cached layers within an idr tree |
554 | * idr_destroy - release all cached layers within an idr tree |
506 | * idp: idr handle |
555 | * idp: idr handle |
507 | */ |
556 | */ |
508 | void idr_destroy(struct idr *idp) |
557 | void idr_destroy(struct idr *idp) |
509 | { |
558 | { |
510 | while (idp->id_free_cnt) { |
559 | while (idp->id_free_cnt) { |
511 | struct idr_layer *p = get_from_free_list(idp); |
560 | struct idr_layer *p = get_from_free_list(idp); |
512 | kfree(p); |
561 | kfree(p); |
513 | } |
562 | } |
514 | } |
563 | } |
515 | EXPORT_SYMBOL(idr_destroy); |
564 | EXPORT_SYMBOL(idr_destroy); |
516 | 565 | ||
517 | 566 | ||
518 | /** |
567 | /** |
519 | * idr_find - return pointer for given id |
568 | * idr_find - return pointer for given id |
520 | * @idp: idr handle |
569 | * @idp: idr handle |
521 | * @id: lookup key |
570 | * @id: lookup key |
522 | * |
571 | * |
523 | * Return the pointer given the id it has been registered with. A %NULL |
572 | * Return the pointer given the id it has been registered with. A %NULL |
524 | * return indicates that @id is not valid or you passed %NULL in |
573 | * return indicates that @id is not valid or you passed %NULL in |
525 | * idr_get_new(). |
574 | * idr_get_new(). |
526 | * |
575 | * |
527 | * This function can be called under rcu_read_lock(), given that the leaf |
576 | * This function can be called under rcu_read_lock(), given that the leaf |
528 | * pointers lifetimes are correctly managed. |
577 | * pointers lifetimes are correctly managed. |
529 | */ |
578 | */ |
530 | void *idr_find(struct idr *idp, int id) |
579 | void *idr_find(struct idr *idp, int id) |
531 | { |
580 | { |
532 | int n; |
581 | int n; |
533 | struct idr_layer *p; |
582 | struct idr_layer *p; |
534 | 583 | ||
535 | p = rcu_dereference(idp->top); |
584 | p = rcu_dereference(idp->top); |
536 | if (!p) |
585 | if (!p) |
537 | return NULL; |
586 | return NULL; |
538 | n = (p->layer+1) * IDR_BITS; |
587 | n = (p->layer+1) * IDR_BITS; |
539 | 588 | ||
540 | /* Mask off upper bits we don't use for the search. */ |
589 | /* Mask off upper bits we don't use for the search. */ |
541 | id &= MAX_ID_MASK; |
590 | id &= MAX_ID_MASK; |
542 | 591 | ||
543 | if (id >= (1 << n)) |
592 | if (id >= (1 << n)) |
544 | return NULL; |
593 | return NULL; |
545 | BUG_ON(n == 0); |
594 | BUG_ON(n == 0); |
546 | 595 | ||
547 | while (n > 0 && p) { |
596 | while (n > 0 && p) { |
548 | n -= IDR_BITS; |
597 | n -= IDR_BITS; |
549 | BUG_ON(n != p->layer*IDR_BITS); |
598 | BUG_ON(n != p->layer*IDR_BITS); |
550 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
599 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
551 | } |
600 | } |
552 | return((void *)p); |
601 | return((void *)p); |
553 | } |
602 | } |
554 | EXPORT_SYMBOL(idr_find); |
603 | EXPORT_SYMBOL(idr_find); |
555 | 604 | ||
556 | #if 0 |
605 | #if 0 |
557 | /** |
606 | /** |
558 | * idr_for_each - iterate through all stored pointers |
607 | * idr_for_each - iterate through all stored pointers |
559 | * @idp: idr handle |
608 | * @idp: idr handle |
560 | * @fn: function to be called for each pointer |
609 | * @fn: function to be called for each pointer |
561 | * @data: data passed back to callback function |
610 | * @data: data passed back to callback function |
562 | * |
611 | * |
563 | * Iterate over the pointers registered with the given idr. The |
612 | * Iterate over the pointers registered with the given idr. The |
564 | * callback function will be called for each pointer currently |
613 | * callback function will be called for each pointer currently |
565 | * registered, passing the id, the pointer and the data pointer passed |
614 | * registered, passing the id, the pointer and the data pointer passed |
566 | * to this function. It is not safe to modify the idr tree while in |
615 | * to this function. It is not safe to modify the idr tree while in |
567 | * the callback, so functions such as idr_get_new and idr_remove are |
616 | * the callback, so functions such as idr_get_new and idr_remove are |
568 | * not allowed. |
617 | * not allowed. |
569 | * |
618 | * |
570 | * We check the return of @fn each time. If it returns anything other |
619 | * We check the return of @fn each time. If it returns anything other |
571 | * than 0, we break out and return that value. |
620 | * than 0, we break out and return that value. |
572 | * |
621 | * |
573 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
622 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
574 | */ |
623 | */ |
575 | int idr_for_each(struct idr *idp, |
624 | int idr_for_each(struct idr *idp, |
576 | int (*fn)(int id, void *p, void *data), void *data) |
625 | int (*fn)(int id, void *p, void *data), void *data) |
577 | { |
626 | { |
578 | int n, id, max, error = 0; |
627 | int n, id, max, error = 0; |
579 | struct idr_layer *p; |
628 | struct idr_layer *p; |
580 | struct idr_layer *pa[MAX_LEVEL]; |
629 | struct idr_layer *pa[MAX_LEVEL]; |
581 | struct idr_layer **paa = &pa[0]; |
630 | struct idr_layer **paa = &pa[0]; |
582 | 631 | ||
583 | n = idp->layers * IDR_BITS; |
632 | n = idp->layers * IDR_BITS; |
584 | p = rcu_dereference(idp->top); |
633 | p = rcu_dereference(idp->top); |
585 | max = 1 << n; |
634 | max = 1 << n; |
586 | 635 | ||
587 | id = 0; |
636 | id = 0; |
588 | while (id < max) { |
637 | while (id < max) { |
589 | while (n > 0 && p) { |
638 | while (n > 0 && p) { |
590 | n -= IDR_BITS; |
639 | n -= IDR_BITS; |
591 | *paa++ = p; |
640 | *paa++ = p; |
592 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
641 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
593 | } |
642 | } |
594 | 643 | ||
595 | if (p) { |
644 | if (p) { |
596 | error = fn(id, (void *)p, data); |
645 | error = fn(id, (void *)p, data); |
597 | if (error) |
646 | if (error) |
598 | break; |
647 | break; |
599 | } |
648 | } |
600 | 649 | ||
601 | id += 1 << n; |
650 | id += 1 << n; |
602 | while (n < fls(id)) { |
651 | while (n < fls(id)) { |
603 | n += IDR_BITS; |
652 | n += IDR_BITS; |
604 | p = *--paa; |
653 | p = *--paa; |
605 | } |
654 | } |
606 | } |
655 | } |
607 | 656 | ||
608 | return error; |
657 | return error; |
609 | } |
658 | } |
610 | EXPORT_SYMBOL(idr_for_each); |
659 | EXPORT_SYMBOL(idr_for_each); |
611 | 660 | ||
612 | /** |
661 | /** |
613 | * idr_get_next - lookup next object of id to given id. |
662 | * idr_get_next - lookup next object of id to given id. |
614 | * @idp: idr handle |
663 | * @idp: idr handle |
615 | * @id: pointer to lookup key |
664 | * @id: pointer to lookup key |
616 | * |
665 | * |
617 | * Returns pointer to registered object with id, which is next number to |
666 | * Returns pointer to registered object with id, which is next number to |
618 | * given id. |
667 | * given id. |
619 | */ |
668 | */ |
620 | 669 | ||
621 | void *idr_get_next(struct idr *idp, int *nextidp) |
670 | void *idr_get_next(struct idr *idp, int *nextidp) |
622 | { |
671 | { |
623 | struct idr_layer *p, *pa[MAX_LEVEL]; |
672 | struct idr_layer *p, *pa[MAX_LEVEL]; |
624 | struct idr_layer **paa = &pa[0]; |
673 | struct idr_layer **paa = &pa[0]; |
625 | int id = *nextidp; |
674 | int id = *nextidp; |
626 | int n, max; |
675 | int n, max; |
627 | 676 | ||
628 | /* find first ent */ |
677 | /* find first ent */ |
629 | n = idp->layers * IDR_BITS; |
678 | n = idp->layers * IDR_BITS; |
630 | max = 1 << n; |
679 | max = 1 << n; |
631 | p = rcu_dereference(idp->top); |
680 | p = rcu_dereference(idp->top); |
632 | if (!p) |
681 | if (!p) |
633 | return NULL; |
682 | return NULL; |
634 | 683 | ||
635 | while (id < max) { |
684 | while (id < max) { |
636 | while (n > 0 && p) { |
685 | while (n > 0 && p) { |
637 | n -= IDR_BITS; |
686 | n -= IDR_BITS; |
638 | *paa++ = p; |
687 | *paa++ = p; |
639 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
688 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
640 | } |
689 | } |
641 | 690 | ||
642 | if (p) { |
691 | if (p) { |
643 | *nextidp = id; |
692 | *nextidp = id; |
644 | return p; |
693 | return p; |
645 | } |
694 | } |
646 | 695 | ||
647 | id += 1 << n; |
696 | id += 1 << n; |
648 | while (n < fls(id)) { |
697 | while (n < fls(id)) { |
649 | n += IDR_BITS; |
698 | n += IDR_BITS; |
650 | p = *--paa; |
699 | p = *--paa; |
651 | } |
700 | } |
652 | } |
701 | } |
653 | return NULL; |
702 | return NULL; |
654 | } |
703 | } |
655 | 704 | ||
656 | 705 | ||
657 | 706 | ||
658 | /** |
707 | /** |
659 | * idr_replace - replace pointer for given id |
708 | * idr_replace - replace pointer for given id |
660 | * @idp: idr handle |
709 | * @idp: idr handle |
661 | * @ptr: pointer you want associated with the id |
710 | * @ptr: pointer you want associated with the id |
662 | * @id: lookup key |
711 | * @id: lookup key |
663 | * |
712 | * |
664 | * Replace the pointer registered with an id and return the old value. |
713 | * Replace the pointer registered with an id and return the old value. |
665 | * A -ENOENT return indicates that @id was not found. |
714 | * A -ENOENT return indicates that @id was not found. |
666 | * A -EINVAL return indicates that @id was not within valid constraints. |
715 | * A -EINVAL return indicates that @id was not within valid constraints. |
667 | * |
716 | * |
668 | * The caller must serialize with writers. |
717 | * The caller must serialize with writers. |
669 | */ |
718 | */ |
670 | void *idr_replace(struct idr *idp, void *ptr, int id) |
719 | void *idr_replace(struct idr *idp, void *ptr, int id) |
671 | { |
720 | { |
672 | int n; |
721 | int n; |
673 | struct idr_layer *p, *old_p; |
722 | struct idr_layer *p, *old_p; |
674 | 723 | ||
675 | p = idp->top; |
724 | p = idp->top; |
676 | if (!p) |
725 | if (!p) |
677 | return ERR_PTR(-EINVAL); |
726 | return ERR_PTR(-EINVAL); |
678 | 727 | ||
679 | n = (p->layer+1) * IDR_BITS; |
728 | n = (p->layer+1) * IDR_BITS; |
680 | 729 | ||
681 | id &= MAX_ID_MASK; |
730 | id &= MAX_ID_MASK; |
682 | 731 | ||
683 | if (id >= (1 << n)) |
732 | if (id >= (1 << n)) |
684 | return ERR_PTR(-EINVAL); |
733 | return ERR_PTR(-EINVAL); |
685 | 734 | ||
686 | n -= IDR_BITS; |
735 | n -= IDR_BITS; |
687 | while ((n > 0) && p) { |
736 | while ((n > 0) && p) { |
688 | p = p->ary[(id >> n) & IDR_MASK]; |
737 | p = p->ary[(id >> n) & IDR_MASK]; |
689 | n -= IDR_BITS; |
738 | n -= IDR_BITS; |
690 | } |
739 | } |
691 | 740 | ||
692 | n = id & IDR_MASK; |
741 | n = id & IDR_MASK; |
693 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
742 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
694 | return ERR_PTR(-ENOENT); |
743 | return ERR_PTR(-ENOENT); |
695 | 744 | ||
696 | old_p = p->ary[n]; |
745 | old_p = p->ary[n]; |
697 | rcu_assign_pointer(p->ary[n], ptr); |
746 | rcu_assign_pointer(p->ary[n], ptr); |
698 | 747 | ||
699 | return old_p; |
748 | return old_p; |
700 | } |
749 | } |
701 | EXPORT_SYMBOL(idr_replace); |
750 | EXPORT_SYMBOL(idr_replace); |
702 | 751 | ||
703 | 752 | ||
704 | #endif |
753 | #endif |
705 | 754 | ||
706 | 755 | ||
707 | void idr_init_cache(void) |
756 | void idr_init_cache(void) |
708 | { |
757 | { |
709 | //idr_layer_cache = kmem_cache_create("idr_layer_cache", |
758 | //idr_layer_cache = kmem_cache_create("idr_layer_cache", |
710 | // sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
759 | // sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
711 | } |
760 | } |
712 | 761 | ||
713 | /** |
762 | /** |
714 | * idr_init - initialize idr handle |
763 | * idr_init - initialize idr handle |
715 | * @idp: idr handle |
764 | * @idp: idr handle |
716 | * |
765 | * |
717 | * This function is use to set up the handle (@idp) that you will pass |
766 | * This function is use to set up the handle (@idp) that you will pass |
718 | * to the rest of the functions. |
767 | * to the rest of the functions. |
719 | */ |
768 | */ |
720 | void idr_init(struct idr *idp) |
769 | void idr_init(struct idr *idp) |
721 | { |
770 | { |
722 | memset(idp, 0, sizeof(struct idr)); |
771 | memset(idp, 0, sizeof(struct idr)); |
723 | // spin_lock_init(&idp->lock); |
772 | // spin_lock_init(&idp->lock); |
724 | } |
773 | } |
725 | EXPORT_SYMBOL(idr_init); |
774 | EXPORT_SYMBOL(idr_init); |
726 | 775 | ||
727 | #if 0 |
776 | #if 0 |
728 | 777 | ||
729 | /* |
778 | /* |
730 | * IDA - IDR based ID allocator |
779 | * IDA - IDR based ID allocator |
731 | * |
780 | * |
732 | * this is id allocator without id -> pointer translation. Memory |
781 | * this is id allocator without id -> pointer translation. Memory |
733 | * usage is much lower than full blown idr because each id only |
782 | * usage is much lower than full blown idr because each id only |
734 | * occupies a bit. ida uses a custom leaf node which contains |
783 | * occupies a bit. ida uses a custom leaf node which contains |
735 | * IDA_BITMAP_BITS slots. |
784 | * IDA_BITMAP_BITS slots. |
736 | * |
785 | * |
737 | * 2007-04-25 written by Tejun Heo |
786 | * 2007-04-25 written by Tejun Heo |
738 | */ |
787 | */ |
739 | 788 | ||
740 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
789 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
741 | { |
790 | { |
742 | unsigned long flags; |
791 | unsigned long flags; |
743 | 792 | ||
744 | if (!ida->free_bitmap) { |
793 | if (!ida->free_bitmap) { |
745 | spin_lock_irqsave(&ida->idr.lock, flags); |
794 | spin_lock_irqsave(&ida->idr.lock, flags); |
746 | if (!ida->free_bitmap) { |
795 | if (!ida->free_bitmap) { |
747 | ida->free_bitmap = bitmap; |
796 | ida->free_bitmap = bitmap; |
748 | bitmap = NULL; |
797 | bitmap = NULL; |
749 | } |
798 | } |
750 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
799 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
751 | } |
800 | } |
752 | 801 | ||
753 | kfree(bitmap); |
802 | kfree(bitmap); |
754 | } |
803 | } |
755 | 804 | ||
756 | /** |
805 | /** |
757 | * ida_pre_get - reserve resources for ida allocation |
806 | * ida_pre_get - reserve resources for ida allocation |
758 | * @ida: ida handle |
807 | * @ida: ida handle |
759 | * @gfp_mask: memory allocation flag |
808 | * @gfp_mask: memory allocation flag |
760 | * |
809 | * |
761 | * This function should be called prior to locking and calling the |
810 | * This function should be called prior to locking and calling the |
762 | * following function. It preallocates enough memory to satisfy the |
811 | * following function. It preallocates enough memory to satisfy the |
763 | * worst possible allocation. |
812 | * worst possible allocation. |
764 | * |
813 | * |
765 | * If the system is REALLY out of memory this function returns 0, |
814 | * If the system is REALLY out of memory this function returns 0, |
766 | * otherwise 1. |
815 | * otherwise 1. |
767 | */ |
816 | */ |
768 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
817 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
769 | { |
818 | { |
770 | /* allocate idr_layers */ |
819 | /* allocate idr_layers */ |
771 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
820 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
772 | return 0; |
821 | return 0; |
773 | 822 | ||
774 | /* allocate free_bitmap */ |
823 | /* allocate free_bitmap */ |
775 | if (!ida->free_bitmap) { |
824 | if (!ida->free_bitmap) { |
776 | struct ida_bitmap *bitmap; |
825 | struct ida_bitmap *bitmap; |
777 | 826 | ||
778 | bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask); |
827 | bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask); |
779 | if (!bitmap) |
828 | if (!bitmap) |
780 | return 0; |
829 | return 0; |
781 | 830 | ||
782 | free_bitmap(ida, bitmap); |
831 | free_bitmap(ida, bitmap); |
783 | } |
832 | } |
784 | 833 | ||
785 | return 1; |
834 | return 1; |
786 | } |
835 | } |
787 | EXPORT_SYMBOL(ida_pre_get); |
836 | EXPORT_SYMBOL(ida_pre_get); |
788 | 837 | ||
789 | /** |
838 | /** |
790 | * ida_get_new_above - allocate new ID above or equal to a start id |
839 | * ida_get_new_above - allocate new ID above or equal to a start id |
791 | * @ida: ida handle |
840 | * @ida: ida handle |
792 | * @staring_id: id to start search at |
841 | * @staring_id: id to start search at |
793 | * @p_id: pointer to the allocated handle |
842 | * @p_id: pointer to the allocated handle |
794 | * |
843 | * |
795 | * Allocate new ID above or equal to @ida. It should be called with |
844 | * Allocate new ID above or equal to @ida. It should be called with |
796 | * any required locks. |
845 | * any required locks. |
797 | * |
846 | * |
798 | * If memory is required, it will return -EAGAIN, you should unlock |
847 | * If memory is required, it will return -EAGAIN, you should unlock |
799 | * and go back to the ida_pre_get() call. If the ida is full, it will |
848 | * and go back to the ida_pre_get() call. If the ida is full, it will |
800 | * return -ENOSPC. |
849 | * return -ENOSPC. |
801 | * |
850 | * |
802 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. |
851 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. |
803 | */ |
852 | */ |
804 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
853 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
805 | { |
854 | { |
806 | struct idr_layer *pa[MAX_LEVEL]; |
855 | struct idr_layer *pa[MAX_LEVEL]; |
807 | struct ida_bitmap *bitmap; |
856 | struct ida_bitmap *bitmap; |
808 | unsigned long flags; |
857 | unsigned long flags; |
809 | int idr_id = starting_id / IDA_BITMAP_BITS; |
858 | int idr_id = starting_id / IDA_BITMAP_BITS; |
810 | int offset = starting_id % IDA_BITMAP_BITS; |
859 | int offset = starting_id % IDA_BITMAP_BITS; |
811 | int t, id; |
860 | int t, id; |
812 | 861 | ||
813 | restart: |
862 | restart: |
814 | /* get vacant slot */ |
863 | /* get vacant slot */ |
815 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
864 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
816 | if (t < 0) |
865 | if (t < 0) |
817 | return _idr_rc_to_errno(t); |
866 | return _idr_rc_to_errno(t); |
818 | 867 | ||
819 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
868 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
820 | return -ENOSPC; |
869 | return -ENOSPC; |
821 | 870 | ||
822 | if (t != idr_id) |
871 | if (t != idr_id) |
823 | offset = 0; |
872 | offset = 0; |
824 | idr_id = t; |
873 | idr_id = t; |
825 | 874 | ||
826 | /* if bitmap isn't there, create a new one */ |
875 | /* if bitmap isn't there, create a new one */ |
827 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
876 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
828 | if (!bitmap) { |
877 | if (!bitmap) { |
829 | spin_lock_irqsave(&ida->idr.lock, flags); |
878 | spin_lock_irqsave(&ida->idr.lock, flags); |
830 | bitmap = ida->free_bitmap; |
879 | bitmap = ida->free_bitmap; |
831 | ida->free_bitmap = NULL; |
880 | ida->free_bitmap = NULL; |
832 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
881 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
833 | 882 | ||
834 | if (!bitmap) |
883 | if (!bitmap) |
835 | return -EAGAIN; |
884 | return -EAGAIN; |
836 | 885 | ||
837 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
886 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
838 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
887 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
839 | (void *)bitmap); |
888 | (void *)bitmap); |
840 | pa[0]->count++; |
889 | pa[0]->count++; |
841 | } |
890 | } |
842 | 891 | ||
843 | /* lookup for empty slot */ |
892 | /* lookup for empty slot */ |
844 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
893 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
845 | if (t == IDA_BITMAP_BITS) { |
894 | if (t == IDA_BITMAP_BITS) { |
846 | /* no empty slot after offset, continue to the next chunk */ |
895 | /* no empty slot after offset, continue to the next chunk */ |
847 | idr_id++; |
896 | idr_id++; |
848 | offset = 0; |
897 | offset = 0; |
849 | goto restart; |
898 | goto restart; |
850 | } |
899 | } |
851 | 900 | ||
852 | id = idr_id * IDA_BITMAP_BITS + t; |
901 | id = idr_id * IDA_BITMAP_BITS + t; |
853 | if (id >= MAX_ID_BIT) |
902 | if (id >= MAX_ID_BIT) |
854 | return -ENOSPC; |
903 | return -ENOSPC; |
855 | 904 | ||
856 | __set_bit(t, bitmap->bitmap); |
905 | __set_bit(t, bitmap->bitmap); |
857 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
906 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
858 | idr_mark_full(pa, idr_id); |
907 | idr_mark_full(pa, idr_id); |
859 | 908 | ||
860 | *p_id = id; |
909 | *p_id = id; |
861 | 910 | ||
862 | /* Each leaf node can handle nearly a thousand slots and the |
911 | /* Each leaf node can handle nearly a thousand slots and the |
863 | * whole idea of ida is to have small memory foot print. |
912 | * whole idea of ida is to have small memory foot print. |
864 | * Throw away extra resources one by one after each successful |
913 | * Throw away extra resources one by one after each successful |
865 | * allocation. |
914 | * allocation. |
866 | */ |
915 | */ |
867 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
916 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
868 | struct idr_layer *p = get_from_free_list(&ida->idr); |
917 | struct idr_layer *p = get_from_free_list(&ida->idr); |
869 | if (p) |
918 | if (p) |
870 | kmem_cache_free(idr_layer_cache, p); |
919 | kmem_cache_free(idr_layer_cache, p); |
871 | } |
920 | } |
872 | 921 | ||
873 | return 0; |
922 | return 0; |
874 | } |
923 | } |
875 | EXPORT_SYMBOL(ida_get_new_above); |
924 | EXPORT_SYMBOL(ida_get_new_above); |
876 | 925 | ||
877 | /** |
926 | /** |
878 | * ida_get_new - allocate new ID |
927 | * ida_get_new - allocate new ID |
879 | * @ida: idr handle |
928 | * @ida: idr handle |
880 | * @p_id: pointer to the allocated handle |
929 | * @p_id: pointer to the allocated handle |
881 | * |
930 | * |
882 | * Allocate new ID. It should be called with any required locks. |
931 | * Allocate new ID. It should be called with any required locks. |
883 | * |
932 | * |
884 | * If memory is required, it will return -EAGAIN, you should unlock |
933 | * If memory is required, it will return -EAGAIN, you should unlock |
885 | * and go back to the idr_pre_get() call. If the idr is full, it will |
934 | * and go back to the idr_pre_get() call. If the idr is full, it will |
886 | * return -ENOSPC. |
935 | * return -ENOSPC. |
887 | * |
936 | * |
888 | * @id returns a value in the range 0 ... 0x7fffffff. |
937 | * @id returns a value in the range 0 ... 0x7fffffff. |
889 | */ |
938 | */ |
890 | int ida_get_new(struct ida *ida, int *p_id) |
939 | int ida_get_new(struct ida *ida, int *p_id) |
891 | { |
940 | { |
892 | return ida_get_new_above(ida, 0, p_id); |
941 | return ida_get_new_above(ida, 0, p_id); |
893 | } |
942 | } |
894 | EXPORT_SYMBOL(ida_get_new); |
943 | EXPORT_SYMBOL(ida_get_new); |
895 | 944 | ||
896 | /** |
945 | /** |
897 | * ida_remove - remove the given ID |
946 | * ida_remove - remove the given ID |
898 | * @ida: ida handle |
947 | * @ida: ida handle |
899 | * @id: ID to free |
948 | * @id: ID to free |
900 | */ |
949 | */ |
901 | void ida_remove(struct ida *ida, int id) |
950 | void ida_remove(struct ida *ida, int id) |
902 | { |
951 | { |
903 | struct idr_layer *p = ida->idr.top; |
952 | struct idr_layer *p = ida->idr.top; |
904 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
953 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
905 | int idr_id = id / IDA_BITMAP_BITS; |
954 | int idr_id = id / IDA_BITMAP_BITS; |
906 | int offset = id % IDA_BITMAP_BITS; |
955 | int offset = id % IDA_BITMAP_BITS; |
907 | int n; |
956 | int n; |
908 | struct ida_bitmap *bitmap; |
957 | struct ida_bitmap *bitmap; |
909 | 958 | ||
910 | /* clear full bits while looking up the leaf idr_layer */ |
959 | /* clear full bits while looking up the leaf idr_layer */ |
911 | while ((shift > 0) && p) { |
960 | while ((shift > 0) && p) { |
912 | n = (idr_id >> shift) & IDR_MASK; |
961 | n = (idr_id >> shift) & IDR_MASK; |
913 | __clear_bit(n, &p->bitmap); |
962 | __clear_bit(n, &p->bitmap); |
914 | p = p->ary[n]; |
963 | p = p->ary[n]; |
915 | shift -= IDR_BITS; |
964 | shift -= IDR_BITS; |
916 | } |
965 | } |
917 | 966 | ||
918 | if (p == NULL) |
967 | if (p == NULL) |
919 | goto err; |
968 | goto err; |
920 | 969 | ||
921 | n = idr_id & IDR_MASK; |
970 | n = idr_id & IDR_MASK; |
922 | __clear_bit(n, &p->bitmap); |
971 | __clear_bit(n, &p->bitmap); |
923 | 972 | ||
924 | bitmap = (void *)p->ary[n]; |
973 | bitmap = (void *)p->ary[n]; |
925 | if (!test_bit(offset, bitmap->bitmap)) |
974 | if (!test_bit(offset, bitmap->bitmap)) |
926 | goto err; |
975 | goto err; |
927 | 976 | ||
928 | /* update bitmap and remove it if empty */ |
977 | /* update bitmap and remove it if empty */ |
929 | __clear_bit(offset, bitmap->bitmap); |
978 | __clear_bit(offset, bitmap->bitmap); |
930 | if (--bitmap->nr_busy == 0) { |
979 | if (--bitmap->nr_busy == 0) { |
931 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ |
980 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ |
932 | idr_remove(&ida->idr, idr_id); |
981 | idr_remove(&ida->idr, idr_id); |
933 | free_bitmap(ida, bitmap); |
982 | free_bitmap(ida, bitmap); |
934 | } |
983 | } |
935 | 984 | ||
936 | return; |
985 | return; |
937 | 986 | ||
938 | err: |
987 | err: |
939 | printk(KERN_WARNING |
988 | printk(KERN_WARNING |
940 | "ida_remove called for id=%d which is not allocated.\n", id); |
989 | "ida_remove called for id=%d which is not allocated.\n", id); |
941 | } |
990 | } |
942 | EXPORT_SYMBOL(ida_remove); |
991 | EXPORT_SYMBOL(ida_remove); |
943 | 992 | ||
944 | /** |
993 | /** |
945 | * ida_destroy - release all cached layers within an ida tree |
994 | * ida_destroy - release all cached layers within an ida tree |
946 | * ida: ida handle |
995 | * ida: ida handle |
947 | */ |
996 | */ |
948 | void ida_destroy(struct ida *ida) |
997 | void ida_destroy(struct ida *ida) |
949 | { |
998 | { |
950 | idr_destroy(&ida->idr); |
999 | idr_destroy(&ida->idr); |
951 | kfree(ida->free_bitmap); |
1000 | kfree(ida->free_bitmap); |
952 | } |
1001 | } |
953 | EXPORT_SYMBOL(ida_destroy); |
1002 | EXPORT_SYMBOL(ida_destroy); |
954 | 1003 | ||
955 | /** |
1004 | /** |
956 | * ida_init - initialize ida handle |
1005 | * ida_init - initialize ida handle |
957 | * @ida: ida handle |
1006 | * @ida: ida handle |
958 | * |
1007 | * |
959 | * This function is use to set up the handle (@ida) that you will pass |
1008 | * This function is use to set up the handle (@ida) that you will pass |
960 | * to the rest of the functions. |
1009 | * to the rest of the functions. |
961 | */ |
1010 | */ |
962 | void ida_init(struct ida *ida) |
1011 | void ida_init(struct ida *ida) |
963 | { |
1012 | { |
964 | memset(ida, 0, sizeof(struct ida)); |
1013 | memset(ida, 0, sizeof(struct ida)); |
965 | idr_init(&ida->idr); |
1014 | idr_init(&ida->idr); |
966 | 1015 | ||
967 | } |
1016 | } |
968 | EXPORT_SYMBOL(ida_init); |
1017 | EXPORT_SYMBOL(ida_init); |
969 | 1018 | ||
970 | 1019 | ||
971 | #endif>><>>><>>><>>><>>><>><>>><>>><>>>><>>>><>><>> |
1020 | #endif>><>>><>>><>>><>>><>><>>><>>><>>>><>>>><>><>>> |