Subversion Repositories Kolibri OS

Rev

Rev 5056 | Rev 6934 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1412 serge 1
/*
2
 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3
 *	Copyright (C) 2002 by Concurrent Computer Corporation
4
 *	Distributed under the GNU GPL license version 2.
5
 *
6
 * Modified by George Anzinger to reuse immediately and to use
7
 * find bit instructions.  Also removed _irq on spinlocks.
8
 *
9
 * Modified by Nadia Derbey to make it RCU safe.
10
 *
11
 * Small id to pointer translation service.
12
 *
13
 * It uses a radix tree like structure as a sparse array indexed
14
 * by the id to obtain the pointer.  The bitmap makes allocating
15
 * a new id quick.
16
 *
17
 * You call it to allocate an id (an int) an associate with that id a
18
 * pointer or what ever, we treat it as a (void *).  You can pass this
19
 * id to a user for him to pass back at a later time.  You then pass
20
 * that id to this code and it returns your pointer.
21
 */
22
 
5270 serge 23
#ifndef TEST                        // to test in user space...
24
#include 
3391 Serge 25
#include 
5270 serge 26
#endif
27
#include 
1412 serge 28
#include 
29
#include 
5270 serge 30
#include 
1412 serge 31
 
4103 Serge 32
 
1412 serge 33
 
34
 
3391 Serge 35
#define MAX_IDR_SHIFT		(sizeof(int) * 8 - 1)
36
#define MAX_IDR_BIT		(1U << MAX_IDR_SHIFT)
1412 serge 37
 
3391 Serge 38
/* Leave the possibility of an incomplete final layer */
39
#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
1412 serge 40
 
3391 Serge 41
/* Number of id_layer structs to leave in free list */
42
#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
1412 serge 43
 
3391 Serge 44
static struct idr_layer *idr_preload_head;
45
static int idr_preload_cnt;
1412 serge 46
 
4103 Serge 47
static DEFINE_SPINLOCK(simple_ida_lock);
1412 serge 48
 
3391 Serge 49
/* the maximum ID which can be allocated given idr->layers */
50
static int idr_max(int layers)
51
{
52
	int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
1412 serge 53
 
3391 Serge 54
	return (1 << bits) - 1;
55
}
1412 serge 56
 
3391 Serge 57
/*
58
 * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
59
 * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
60
 * so on.
61
 */
62
static int idr_layer_prefix_mask(int layer)
63
{
64
	return ~idr_max(layer + 1);
65
}
1412 serge 66
 
67
static struct idr_layer *get_from_free_list(struct idr *idp)
68
{
69
	struct idr_layer *p;
70
	unsigned long flags;
71
 
3391 Serge 72
	spin_lock_irqsave(&idp->lock, flags);
1412 serge 73
	if ((p = idp->id_free)) {
74
		idp->id_free = p->ary[0];
75
		idp->id_free_cnt--;
76
		p->ary[0] = NULL;
77
	}
3391 Serge 78
	spin_unlock_irqrestore(&idp->lock, flags);
1412 serge 79
	return(p);
80
}
81
 
3391 Serge 82
/**
83
 * idr_layer_alloc - allocate a new idr_layer
84
 * @gfp_mask: allocation mask
85
 * @layer_idr: optional idr to allocate from
86
 *
87
 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
88
 * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
89
 * an idr_layer from @idr->id_free.
90
 *
91
 * @layer_idr is to maintain backward compatibility with the old alloc
92
 * interface - idr_pre_get() and idr_get_new*() - and will be removed
93
 * together with per-pool preload buffer.
94
 */
95
static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
96
{
97
	struct idr_layer *new;
98
 
99
	/* this is the old path, bypass to get_from_free_list() */
100
	if (layer_idr)
101
		return get_from_free_list(layer_idr);
102
 
103
	/* try to allocate directly from kmem_cache */
104
	new = kzalloc(sizeof(struct idr_layer), gfp_mask);
105
	if (new)
106
		return new;
107
 
108
 
109
	new = idr_preload_head;
110
	if (new) {
111
		idr_preload_head = new->ary[0];
112
		idr_preload_cnt--;
113
		new->ary[0] = NULL;
114
	}
115
	preempt_enable();
116
	return new;
117
}
118
 
1412 serge 119
static void idr_layer_rcu_free(struct rcu_head *head)
120
{
121
	struct idr_layer *layer;
122
 
123
    layer = container_of(head, struct idr_layer, rcu_head);
124
    kfree(layer);
125
}
126
 
3391 Serge 127
static inline void free_layer(struct idr *idr, struct idr_layer *p)
1412 serge 128
{
5056 serge 129
	if (idr->hint == p)
3391 Serge 130
		RCU_INIT_POINTER(idr->hint, NULL);
5270 serge 131
	call_rcu(&p->rcu_head, idr_layer_rcu_free);
1412 serge 132
}
133
 
134
/* only called when idp->lock is held */
135
static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
136
{
137
	p->ary[0] = idp->id_free;
138
	idp->id_free = p;
139
	idp->id_free_cnt++;
140
}
141
 
142
static void move_to_free_list(struct idr *idp, struct idr_layer *p)
143
{
144
	unsigned long flags;
145
 
146
	/*
147
	 * Depends on the return element being zeroed.
148
	 */
3391 Serge 149
	spin_lock_irqsave(&idp->lock, flags);
1412 serge 150
	__move_to_free_list(idp, p);
3391 Serge 151
	spin_unlock_irqrestore(&idp->lock, flags);
1412 serge 152
}
153
 
154
static void idr_mark_full(struct idr_layer **pa, int id)
155
{
156
	struct idr_layer *p = pa[0];
157
	int l = 0;
158
 
3391 Serge 159
	__set_bit(id & IDR_MASK, p->bitmap);
1412 serge 160
	/*
161
	 * If this layer is full mark the bit in the layer above to
162
	 * show that this part of the radix tree is full.  This may
163
	 * complete the layer above and require walking up the radix
164
	 * tree.
165
	 */
3391 Serge 166
	while (bitmap_full(p->bitmap, IDR_SIZE)) {
1412 serge 167
		if (!(p = pa[++l]))
168
			break;
169
		id = id >> IDR_BITS;
3391 Serge 170
		__set_bit((id & IDR_MASK), p->bitmap);
1412 serge 171
	}
172
}
173
 
5056 serge 174
static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1412 serge 175
{
3391 Serge 176
	while (idp->id_free_cnt < MAX_IDR_FREE) {
1412 serge 177
       struct idr_layer *new;
178
       new = kzalloc(sizeof(struct idr_layer), gfp_mask);
179
       if (new == NULL)
180
           return (0);
181
       move_to_free_list(idp, new);
182
   }
183
   return 1;
184
}
185
 
3391 Serge 186
/**
187
 * sub_alloc - try to allocate an id without growing the tree depth
188
 * @idp: idr handle
189
 * @starting_id: id to start search at
190
 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
191
 * @gfp_mask: allocation mask for idr_layer_alloc()
192
 * @layer_idr: optional idr passed to idr_layer_alloc()
193
 *
194
 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
195
 * growing its depth.  Returns
196
 *
197
 *  the allocated id >= 0 if successful,
198
 *  -EAGAIN if the tree needs to grow for allocation to succeed,
199
 *  -ENOSPC if the id space is exhausted,
200
 *  -ENOMEM if more idr_layers need to be allocated.
201
 */
202
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
203
		     gfp_t gfp_mask, struct idr *layer_idr)
1412 serge 204
{
205
	int n, m, sh;
206
	struct idr_layer *p, *new;
207
	int l, id, oid;
208
 
209
	id = *starting_id;
210
 restart:
211
	p = idp->top;
212
	l = idp->layers;
213
	pa[l--] = NULL;
214
	while (1) {
215
		/*
216
		 * We run around this while until we reach the leaf node...
217
		 */
218
		n = (id >> (IDR_BITS*l)) & IDR_MASK;
3391 Serge 219
		m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
1412 serge 220
		if (m == IDR_SIZE) {
221
			/* no space available go back to previous layer. */
222
			l++;
223
			oid = id;
224
			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
225
 
226
			/* if already at the top layer, we need to grow */
5056 serge 227
			if (id > idr_max(idp->layers)) {
1412 serge 228
				*starting_id = id;
3391 Serge 229
				return -EAGAIN;
1412 serge 230
			}
3391 Serge 231
			p = pa[l];
232
			BUG_ON(!p);
1412 serge 233
 
234
			/* If we need to go up one layer, continue the
235
			 * loop; otherwise, restart from the top.
236
			 */
237
			sh = IDR_BITS * (l + 1);
238
			if (oid >> sh == id >> sh)
239
				continue;
240
			else
241
				goto restart;
242
		}
243
		if (m != n) {
244
			sh = IDR_BITS*l;
245
			id = ((id >> sh) ^ n ^ m) << sh;
246
		}
3391 Serge 247
		if ((id >= MAX_IDR_BIT) || (id < 0))
248
			return -ENOSPC;
1412 serge 249
		if (l == 0)
250
			break;
251
		/*
252
		 * Create the layer below if it is missing.
253
		 */
254
		if (!p->ary[m]) {
3391 Serge 255
			new = idr_layer_alloc(gfp_mask, layer_idr);
1412 serge 256
			if (!new)
3391 Serge 257
				return -ENOMEM;
1412 serge 258
			new->layer = l-1;
3391 Serge 259
			new->prefix = id & idr_layer_prefix_mask(new->layer);
1412 serge 260
			rcu_assign_pointer(p->ary[m], new);
261
			p->count++;
262
		}
263
		pa[l--] = p;
264
		p = p->ary[m];
265
	}
266
 
267
	pa[l] = p;
268
	return id;
269
}
270
 
271
static int idr_get_empty_slot(struct idr *idp, int starting_id,
3391 Serge 272
			      struct idr_layer **pa, gfp_t gfp_mask,
273
			      struct idr *layer_idr)
1412 serge 274
{
275
	struct idr_layer *p, *new;
276
	int layers, v, id;
277
	unsigned long flags;
278
 
279
	id = starting_id;
280
build_up:
281
	p = idp->top;
282
	layers = idp->layers;
283
	if (unlikely(!p)) {
3391 Serge 284
		if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
285
			return -ENOMEM;
1412 serge 286
		p->layer = 0;
287
		layers = 1;
288
	}
289
	/*
290
	 * Add a new layer to the top of the tree if the requested
291
	 * id is larger than the currently allocated space.
292
	 */
3391 Serge 293
	while (id > idr_max(layers)) {
1412 serge 294
		layers++;
295
		if (!p->count) {
296
			/* special case: if the tree is currently empty,
297
			 * then we grow the tree by moving the top node
298
			 * upwards.
299
			 */
300
			p->layer++;
3391 Serge 301
			WARN_ON_ONCE(p->prefix);
1412 serge 302
			continue;
303
		}
3391 Serge 304
		if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
1412 serge 305
			/*
306
			 * The allocation failed.  If we built part of
307
			 * the structure tear it down.
308
			 */
3391 Serge 309
			spin_lock_irqsave(&idp->lock, flags);
1412 serge 310
			for (new = p; p && p != idp->top; new = p) {
311
				p = p->ary[0];
312
				new->ary[0] = NULL;
3391 Serge 313
				new->count = 0;
314
				bitmap_clear(new->bitmap, 0, IDR_SIZE);
1412 serge 315
				__move_to_free_list(idp, new);
316
			}
3391 Serge 317
			spin_unlock_irqrestore(&idp->lock, flags);
318
			return -ENOMEM;
1412 serge 319
		}
320
		new->ary[0] = p;
321
		new->count = 1;
322
		new->layer = layers-1;
3391 Serge 323
		new->prefix = id & idr_layer_prefix_mask(new->layer);
324
		if (bitmap_full(p->bitmap, IDR_SIZE))
325
			__set_bit(0, new->bitmap);
1412 serge 326
		p = new;
327
	}
328
	rcu_assign_pointer(idp->top, p);
329
	idp->layers = layers;
3391 Serge 330
	v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
331
	if (v == -EAGAIN)
1412 serge 332
		goto build_up;
333
	return(v);
334
}
335
 
3391 Serge 336
/*
337
 * @id and @pa are from a successful allocation from idr_get_empty_slot().
338
 * Install the user pointer @ptr and mark the slot full.
339
 */
340
static void idr_fill_slot(struct idr *idr, void *ptr, int id,
341
			  struct idr_layer **pa)
1412 serge 342
{
3391 Serge 343
	/* update hint used for lookup, cleared from free_layer() */
344
	rcu_assign_pointer(idr->hint, pa[0]);
1412 serge 345
 
3391 Serge 346
	rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
1412 serge 347
		pa[0]->count++;
348
		idr_mark_full(pa, id);
349
}
350
 
2966 Serge 351
 
1412 serge 352
/**
3391 Serge 353
 * idr_preload - preload for idr_alloc()
354
 * @gfp_mask: allocation mask to use for preloading
1412 serge 355
 *
3391 Serge 356
 * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
357
 * process context and each idr_preload() invocation should be matched with
358
 * idr_preload_end().  Note that preemption is disabled while preloaded.
1412 serge 359
 *
3391 Serge 360
 * The first idr_alloc() in the preloaded section can be treated as if it
361
 * were invoked with @gfp_mask used for preloading.  This allows using more
362
 * permissive allocation masks for idrs protected by spinlocks.
1412 serge 363
 *
3391 Serge 364
 * For example, if idr_alloc() below fails, the failure can be treated as
365
 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
366
 *
367
 *	idr_preload(GFP_KERNEL);
368
 *	spin_lock(lock);
369
 *
370
 *	id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
371
 *
372
 *	spin_unlock(lock);
373
 *	idr_preload_end();
374
 *	if (id < 0)
375
 *		error;
1412 serge 376
 */
3391 Serge 377
void idr_preload(gfp_t gfp_mask)
1412 serge 378
{
379
 
380
	/*
3391 Serge 381
	 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
382
	 * return value from idr_alloc() needs to be checked for failure
383
	 * anyway.  Silently give up if allocation fails.  The caller can
384
	 * treat failures from idr_alloc() as if idr_alloc() were called
385
	 * with @gfp_mask which should be enough.
1412 serge 386
	 */
3391 Serge 387
	while (idr_preload_cnt < MAX_IDR_FREE) {
388
		struct idr_layer *new;
389
 
390
		new = kzalloc(sizeof(struct idr_layer), gfp_mask);
391
		if (!new)
392
			break;
393
 
394
		/* link the new one to per-cpu preload list */
395
		new->ary[0] = idr_preload_head;
396
		idr_preload_head = new;
397
		idr_preload_cnt++;
398
	}
1412 serge 399
}
3391 Serge 400
EXPORT_SYMBOL(idr_preload);
1412 serge 401
 
3391 Serge 402
/**
403
 * idr_alloc - allocate new idr entry
404
 * @idr: the (initialized) idr
405
 * @ptr: pointer to be associated with the new id
406
 * @start: the minimum id (inclusive)
407
 * @end: the maximum id (exclusive, <= 0 for max)
408
 * @gfp_mask: memory allocation flags
409
 *
410
 * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
411
 * available in the specified range, returns -ENOSPC.  On memory allocation
412
 * failure, returns -ENOMEM.
413
 *
414
 * Note that @end is treated as max when <= 0.  This is to always allow
415
 * using @start + N as @end as long as N is inside integer range.
416
 *
417
 * The user is responsible for exclusively synchronizing all operations
418
 * which may modify @idr.  However, read-only accesses such as idr_find()
419
 * or iteration can be performed under RCU read lock provided the user
420
 * destroys @ptr in RCU-safe way after removal from idr.
421
 */
422
int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
423
{
424
	int max = end > 0 ? end - 1 : INT_MAX;	/* inclusive upper limit */
425
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
426
	int id;
427
 
428
	/* sanity checks */
429
	if (WARN_ON_ONCE(start < 0))
430
		return -EINVAL;
431
	if (unlikely(max < start))
432
		return -ENOSPC;
433
 
434
	/* allocate id */
435
	id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
436
	if (unlikely(id < 0))
437
		return id;
438
	if (unlikely(id > max))
439
		return -ENOSPC;
440
 
441
	idr_fill_slot(idr, ptr, id, pa);
442
	return id;
443
}
444
EXPORT_SYMBOL_GPL(idr_alloc);
445
 
4103 Serge 446
/**
447
 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
448
 * @idr: the (initialized) idr
449
 * @ptr: pointer to be associated with the new id
450
 * @start: the minimum id (inclusive)
451
 * @end: the maximum id (exclusive, <= 0 for max)
452
 * @gfp_mask: memory allocation flags
453
 *
454
 * Essentially the same as idr_alloc, but prefers to allocate progressively
455
 * higher ids if it can. If the "cur" counter wraps, then it will start again
456
 * at the "start" end of the range and allocate one that has already been used.
457
 */
458
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
459
			gfp_t gfp_mask)
460
{
461
	int id;
462
 
463
	id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
464
	if (id == -ENOSPC)
465
		id = idr_alloc(idr, ptr, start, end, gfp_mask);
466
 
467
	if (likely(id >= 0))
468
		idr->cur = id + 1;
469
	return id;
470
}
471
EXPORT_SYMBOL(idr_alloc_cyclic);
472
 
1412 serge 473
static void idr_remove_warning(int id)
474
{
4065 Serge 475
	WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
1412 serge 476
}
477
 
478
static void sub_remove(struct idr *idp, int shift, int id)
479
{
480
	struct idr_layer *p = idp->top;
3391 Serge 481
	struct idr_layer **pa[MAX_IDR_LEVEL + 1];
1412 serge 482
	struct idr_layer ***paa = &pa[0];
483
	struct idr_layer *to_free;
484
	int n;
485
 
486
	*paa = NULL;
487
	*++paa = &idp->top;
488
 
489
	while ((shift > 0) && p) {
490
		n = (id >> shift) & IDR_MASK;
3391 Serge 491
		__clear_bit(n, p->bitmap);
1412 serge 492
		*++paa = &p->ary[n];
493
		p = p->ary[n];
494
		shift -= IDR_BITS;
495
	}
496
	n = id & IDR_MASK;
3391 Serge 497
	if (likely(p != NULL && test_bit(n, p->bitmap))) {
498
		__clear_bit(n, p->bitmap);
5270 serge 499
		RCU_INIT_POINTER(p->ary[n], NULL);
1412 serge 500
		to_free = NULL;
501
		while(*paa && ! --((**paa)->count)){
502
			if (to_free)
3391 Serge 503
				free_layer(idp, to_free);
1412 serge 504
			to_free = **paa;
505
			**paa-- = NULL;
506
		}
507
		if (!*paa)
508
			idp->layers = 0;
509
		if (to_free)
3391 Serge 510
			free_layer(idp, to_free);
1412 serge 511
	} else
512
		idr_remove_warning(id);
513
}
514
 
515
/**
2966 Serge 516
 * idr_remove - remove the given id and free its slot
1412 serge 517
 * @idp: idr handle
518
 * @id: unique key
519
 */
520
void idr_remove(struct idr *idp, int id)
521
{
522
	struct idr_layer *p;
523
	struct idr_layer *to_free;
524
 
4065 Serge 525
	if (id < 0)
3391 Serge 526
		return;
1412 serge 527
 
5056 serge 528
	if (id > idr_max(idp->layers)) {
529
		idr_remove_warning(id);
530
		return;
531
	}
532
 
1412 serge 533
	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
534
	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
535
	    idp->top->ary[0]) {
536
		/*
537
		 * Single child at leftmost slot: we can shrink the tree.
538
		 * This level is not needed anymore since when layers are
539
		 * inserted, they are inserted at the top of the existing
540
		 * tree.
541
		 */
542
		to_free = idp->top;
543
		p = idp->top->ary[0];
544
		rcu_assign_pointer(idp->top, p);
545
		--idp->layers;
3391 Serge 546
		to_free->count = 0;
547
		bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
548
		free_layer(idp, to_free);
1412 serge 549
	}
550
}
3391 Serge 551
EXPORT_SYMBOL(idr_remove);
1412 serge 552
 
5056 serge 553
static void __idr_remove_all(struct idr *idp)
1412 serge 554
{
555
	int n, id, max;
2966 Serge 556
	int bt_mask;
1412 serge 557
	struct idr_layer *p;
3391 Serge 558
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 559
	struct idr_layer **paa = &pa[0];
560
 
561
	n = idp->layers * IDR_BITS;
5056 serge 562
	*paa = idp->top;
5270 serge 563
	RCU_INIT_POINTER(idp->top, NULL);
3391 Serge 564
	max = idr_max(idp->layers);
1412 serge 565
 
566
	id = 0;
3391 Serge 567
	while (id >= 0 && id <= max) {
5056 serge 568
		p = *paa;
1412 serge 569
		while (n > IDR_BITS && p) {
570
			n -= IDR_BITS;
571
			p = p->ary[(id >> n) & IDR_MASK];
5056 serge 572
			*++paa = p;
1412 serge 573
		}
574
 
2966 Serge 575
		bt_mask = id;
1412 serge 576
		id += 1 << n;
2966 Serge 577
		/* Get the highest bit that the above add changed from 0->1. */
578
		while (n < fls(id ^ bt_mask)) {
5056 serge 579
			if (*paa)
580
				free_layer(idp, *paa);
1412 serge 581
			n += IDR_BITS;
5056 serge 582
			--paa;
1412 serge 583
		}
584
	}
585
	idp->layers = 0;
586
}
587
 
588
/**
589
 * idr_destroy - release all cached layers within an idr tree
2966 Serge 590
 * @idp: idr handle
3391 Serge 591
 *
592
 * Free all id mappings and all idp_layers.  After this function, @idp is
593
 * completely unused and can be freed / recycled.  The caller is
594
 * responsible for ensuring that no one else accesses @idp during or after
595
 * idr_destroy().
596
 *
597
 * A typical clean-up sequence for objects stored in an idr tree will use
5270 serge 598
 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
3391 Serge 599
 * free up the id mappings and cached idr_layers.
1412 serge 600
 */
601
void idr_destroy(struct idr *idp)
602
{
3391 Serge 603
	__idr_remove_all(idp);
604
 
1412 serge 605
	while (idp->id_free_cnt) {
606
		struct idr_layer *p = get_from_free_list(idp);
607
        kfree(p);
608
	}
609
}
3391 Serge 610
EXPORT_SYMBOL(idr_destroy);
1412 serge 611
 
3391 Serge 612
void *idr_find_slowpath(struct idr *idp, int id)
1412 serge 613
{
614
	int n;
615
	struct idr_layer *p;
616
 
4065 Serge 617
	if (id < 0)
3391 Serge 618
		return NULL;
619
 
620
	p = rcu_dereference_raw(idp->top);
1412 serge 621
	if (!p)
622
		return NULL;
623
	n = (p->layer+1) * IDR_BITS;
624
 
3391 Serge 625
	if (id > idr_max(p->layer + 1))
1412 serge 626
		return NULL;
627
	BUG_ON(n == 0);
628
 
629
	while (n > 0 && p) {
630
		n -= IDR_BITS;
631
		BUG_ON(n != p->layer*IDR_BITS);
3391 Serge 632
		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
1412 serge 633
	}
634
	return((void *)p);
635
}
3391 Serge 636
EXPORT_SYMBOL(idr_find_slowpath);
1412 serge 637
 
638
/**
639
 * idr_for_each - iterate through all stored pointers
640
 * @idp: idr handle
641
 * @fn: function to be called for each pointer
642
 * @data: data passed back to callback function
643
 *
644
 * Iterate over the pointers registered with the given idr.  The
645
 * callback function will be called for each pointer currently
646
 * registered, passing the id, the pointer and the data pointer passed
647
 * to this function.  It is not safe to modify the idr tree while in
648
 * the callback, so functions such as idr_get_new and idr_remove are
649
 * not allowed.
650
 *
651
 * We check the return of @fn each time. If it returns anything other
2966 Serge 652
 * than %0, we break out and return that value.
1412 serge 653
 *
654
 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
655
 */
656
int idr_for_each(struct idr *idp,
657
		 int (*fn)(int id, void *p, void *data), void *data)
658
{
659
	int n, id, max, error = 0;
660
	struct idr_layer *p;
3391 Serge 661
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 662
	struct idr_layer **paa = &pa[0];
663
 
664
	n = idp->layers * IDR_BITS;
5056 serge 665
	*paa = rcu_dereference_raw(idp->top);
3391 Serge 666
	max = idr_max(idp->layers);
1412 serge 667
 
668
	id = 0;
3391 Serge 669
	while (id >= 0 && id <= max) {
5056 serge 670
		p = *paa;
1412 serge 671
		while (n > 0 && p) {
672
			n -= IDR_BITS;
3391 Serge 673
			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
5056 serge 674
			*++paa = p;
1412 serge 675
		}
676
 
677
		if (p) {
678
			error = fn(id, (void *)p, data);
679
			if (error)
680
				break;
681
		}
682
 
683
		id += 1 << n;
684
		while (n < fls(id)) {
685
			n += IDR_BITS;
5056 serge 686
			--paa;
1412 serge 687
		}
688
	}
689
 
690
	return error;
691
}
692
EXPORT_SYMBOL(idr_for_each);
693
 
694
/**
695
 * idr_get_next - lookup next object of id to given id.
696
 * @idp: idr handle
2966 Serge 697
 * @nextidp:  pointer to lookup key
1412 serge 698
 *
699
 * Returns pointer to registered object with id, which is next number to
2966 Serge 700
 * given id. After being looked up, *@nextidp will be updated for the next
701
 * iteration.
3391 Serge 702
 *
703
 * This function can be called under rcu_read_lock(), given that the leaf
704
 * pointers lifetimes are correctly managed.
1412 serge 705
 */
706
void *idr_get_next(struct idr *idp, int *nextidp)
707
{
3391 Serge 708
	struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
1412 serge 709
	struct idr_layer **paa = &pa[0];
710
	int id = *nextidp;
711
	int n, max;
712
 
713
	/* find first ent */
5056 serge 714
	p = *paa = rcu_dereference_raw(idp->top);
1412 serge 715
	if (!p)
716
		return NULL;
3391 Serge 717
	n = (p->layer + 1) * IDR_BITS;
718
	max = idr_max(p->layer + 1);
1412 serge 719
 
3391 Serge 720
	while (id >= 0 && id <= max) {
5056 serge 721
		p = *paa;
1412 serge 722
		while (n > 0 && p) {
723
			n -= IDR_BITS;
3391 Serge 724
			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
5056 serge 725
			*++paa = p;
1412 serge 726
		}
727
 
728
		if (p) {
729
			*nextidp = id;
730
			return p;
731
		}
732
 
3391 Serge 733
		/*
734
		 * Proceed to the next layer at the current level.  Unlike
735
		 * idr_for_each(), @id isn't guaranteed to be aligned to
736
		 * layer boundary at this point and adding 1 << n may
737
		 * incorrectly skip IDs.  Make sure we jump to the
738
		 * beginning of the next layer using round_up().
739
		 */
740
		id = round_up(id + 1, 1 << n);
1412 serge 741
		while (n < fls(id)) {
742
			n += IDR_BITS;
5056 serge 743
			--paa;
1412 serge 744
		}
745
	}
746
	return NULL;
747
}
3391 Serge 748
EXPORT_SYMBOL(idr_get_next);
1412 serge 749
 
750
 
751
/**
752
 * idr_replace - replace pointer for given id
753
 * @idp: idr handle
754
 * @ptr: pointer you want associated with the id
755
 * @id: lookup key
756
 *
757
 * Replace the pointer registered with an id and return the old value.
2966 Serge 758
 * A %-ENOENT return indicates that @id was not found.
759
 * A %-EINVAL return indicates that @id was not within valid constraints.
1412 serge 760
 *
761
 * The caller must serialize with writers.
762
 */
763
void *idr_replace(struct idr *idp, void *ptr, int id)
764
{
765
	int n;
766
	struct idr_layer *p, *old_p;
767
 
4065 Serge 768
	if (id < 0)
3391 Serge 769
		return ERR_PTR(-EINVAL);
770
 
1412 serge 771
	p = idp->top;
772
	if (!p)
5056 serge 773
		return ERR_PTR(-ENOENT);
1412 serge 774
 
5056 serge 775
	if (id > idr_max(p->layer + 1))
776
		return ERR_PTR(-ENOENT);
1412 serge 777
 
5056 serge 778
	n = p->layer * IDR_BITS;
1412 serge 779
	while ((n > 0) && p) {
780
		p = p->ary[(id >> n) & IDR_MASK];
781
		n -= IDR_BITS;
782
	}
783
 
784
	n = id & IDR_MASK;
3391 Serge 785
	if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
1412 serge 786
		return ERR_PTR(-ENOENT);
787
 
788
	old_p = p->ary[n];
789
	rcu_assign_pointer(p->ary[n], ptr);
790
 
791
	return old_p;
792
}
793
EXPORT_SYMBOL(idr_replace);
794
 
4065 Serge 795
void __init idr_init_cache(void)
1412 serge 796
{
797
    //idr_layer_cache = kmem_cache_create("idr_layer_cache",
798
    //           sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
799
}
800
 
801
/**
802
 * idr_init - initialize idr handle
803
 * @idp:	idr handle
804
 *
805
 * This function is use to set up the handle (@idp) that you will pass
806
 * to the rest of the functions.
807
 */
808
void idr_init(struct idr *idp)
809
{
810
	memset(idp, 0, sizeof(struct idr));
3391 Serge 811
	spin_lock_init(&idp->lock);
1412 serge 812
}
3391 Serge 813
EXPORT_SYMBOL(idr_init);
1412 serge 814
 
5056 serge 815
static int idr_has_entry(int id, void *p, void *data)
816
{
817
	return 1;
818
}
1412 serge 819
 
5056 serge 820
bool idr_is_empty(struct idr *idp)
821
{
822
	return !idr_for_each(idp, idr_has_entry, NULL);
823
}
824
EXPORT_SYMBOL(idr_is_empty);
825
 
3391 Serge 826
/**
827
 * DOC: IDA description
1412 serge 828
 * IDA - IDR based ID allocator
829
 *
2966 Serge 830
 * This is id allocator without id -> pointer translation.  Memory
1412 serge 831
 * usage is much lower than full blown idr because each id only
832
 * occupies a bit.  ida uses a custom leaf node which contains
833
 * IDA_BITMAP_BITS slots.
834
 *
835
 * 2007-04-25  written by Tejun Heo 
836
 */
837
 
838
static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
839
{
840
	unsigned long flags;
841
 
842
	if (!ida->free_bitmap) {
843
		spin_lock_irqsave(&ida->idr.lock, flags);
844
		if (!ida->free_bitmap) {
845
			ida->free_bitmap = bitmap;
846
			bitmap = NULL;
847
		}
848
		spin_unlock_irqrestore(&ida->idr.lock, flags);
849
	}
850
 
851
	kfree(bitmap);
852
}
853
 
854
/**
855
 * ida_pre_get - reserve resources for ida allocation
856
 * @ida:	ida handle
857
 * @gfp_mask:	memory allocation flag
858
 *
859
 * This function should be called prior to locking and calling the
860
 * following function.  It preallocates enough memory to satisfy the
861
 * worst possible allocation.
862
 *
2966 Serge 863
 * If the system is REALLY out of memory this function returns %0,
864
 * otherwise %1.
1412 serge 865
 */
866
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
867
{
868
	/* allocate idr_layers */
4103 Serge 869
	if (!__idr_pre_get(&ida->idr, gfp_mask))
1412 serge 870
		return 0;
871
 
872
	/* allocate free_bitmap */
873
	if (!ida->free_bitmap) {
874
		struct ida_bitmap *bitmap;
875
 
3391 Serge 876
		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
1412 serge 877
		if (!bitmap)
878
			return 0;
879
 
880
		free_bitmap(ida, bitmap);
881
	}
882
 
883
	return 1;
884
}
885
EXPORT_SYMBOL(ida_pre_get);
886
 
887
/**
888
 * ida_get_new_above - allocate new ID above or equal to a start id
889
 * @ida:	ida handle
2966 Serge 890
 * @starting_id: id to start search at
1412 serge 891
 * @p_id:	pointer to the allocated handle
892
 *
2966 Serge 893
 * Allocate new ID above or equal to @starting_id.  It should be called
894
 * with any required locks.
1412 serge 895
 *
2966 Serge 896
 * If memory is required, it will return %-EAGAIN, you should unlock
1412 serge 897
 * and go back to the ida_pre_get() call.  If the ida is full, it will
2966 Serge 898
 * return %-ENOSPC.
1412 serge 899
 *
2966 Serge 900
 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
1412 serge 901
 */
902
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
903
{
3391 Serge 904
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 905
	struct ida_bitmap *bitmap;
906
	unsigned long flags;
907
	int idr_id = starting_id / IDA_BITMAP_BITS;
908
	int offset = starting_id % IDA_BITMAP_BITS;
909
	int t, id;
910
 
911
 restart:
912
	/* get vacant slot */
3391 Serge 913
	t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
1412 serge 914
	if (t < 0)
3391 Serge 915
		return t == -ENOMEM ? -EAGAIN : t;
1412 serge 916
 
3391 Serge 917
	if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
1412 serge 918
		return -ENOSPC;
919
 
920
	if (t != idr_id)
921
		offset = 0;
922
	idr_id = t;
923
 
924
	/* if bitmap isn't there, create a new one */
925
	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
926
	if (!bitmap) {
927
		spin_lock_irqsave(&ida->idr.lock, flags);
928
		bitmap = ida->free_bitmap;
929
		ida->free_bitmap = NULL;
930
		spin_unlock_irqrestore(&ida->idr.lock, flags);
931
 
932
		if (!bitmap)
933
			return -EAGAIN;
934
 
935
		memset(bitmap, 0, sizeof(struct ida_bitmap));
936
		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
937
				(void *)bitmap);
938
		pa[0]->count++;
939
	}
940
 
941
	/* lookup for empty slot */
942
	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
943
	if (t == IDA_BITMAP_BITS) {
944
		/* no empty slot after offset, continue to the next chunk */
945
		idr_id++;
946
		offset = 0;
947
		goto restart;
948
	}
949
 
950
	id = idr_id * IDA_BITMAP_BITS + t;
3391 Serge 951
	if (id >= MAX_IDR_BIT)
1412 serge 952
		return -ENOSPC;
953
 
954
	__set_bit(t, bitmap->bitmap);
955
	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
956
		idr_mark_full(pa, idr_id);
957
 
958
	*p_id = id;
959
 
960
	/* Each leaf node can handle nearly a thousand slots and the
961
	 * whole idea of ida is to have small memory foot print.
962
	 * Throw away extra resources one by one after each successful
963
	 * allocation.
964
	 */
965
	if (ida->idr.id_free_cnt || ida->free_bitmap) {
966
		struct idr_layer *p = get_from_free_list(&ida->idr);
967
		if (p)
4065 Serge 968
			kfree(p);
1412 serge 969
	}
970
 
971
	return 0;
972
}
973
EXPORT_SYMBOL(ida_get_new_above);
974
 
975
/**
976
 * ida_remove - remove the given ID
977
 * @ida:	ida handle
978
 * @id:		ID to free
979
 */
980
void ida_remove(struct ida *ida, int id)
981
{
982
	struct idr_layer *p = ida->idr.top;
983
	int shift = (ida->idr.layers - 1) * IDR_BITS;
984
	int idr_id = id / IDA_BITMAP_BITS;
985
	int offset = id % IDA_BITMAP_BITS;
986
	int n;
987
	struct ida_bitmap *bitmap;
988
 
5056 serge 989
	if (idr_id > idr_max(ida->idr.layers))
990
		goto err;
991
 
1412 serge 992
	/* clear full bits while looking up the leaf idr_layer */
993
	while ((shift > 0) && p) {
994
		n = (idr_id >> shift) & IDR_MASK;
3391 Serge 995
		__clear_bit(n, p->bitmap);
1412 serge 996
		p = p->ary[n];
997
		shift -= IDR_BITS;
998
	}
999
 
1000
	if (p == NULL)
1001
		goto err;
1002
 
1003
	n = idr_id & IDR_MASK;
3391 Serge 1004
	__clear_bit(n, p->bitmap);
1412 serge 1005
 
1006
	bitmap = (void *)p->ary[n];
5056 serge 1007
	if (!bitmap || !test_bit(offset, bitmap->bitmap))
1412 serge 1008
		goto err;
1009
 
1010
	/* update bitmap and remove it if empty */
1011
	__clear_bit(offset, bitmap->bitmap);
1012
	if (--bitmap->nr_busy == 0) {
3391 Serge 1013
		__set_bit(n, p->bitmap);	/* to please idr_remove() */
1412 serge 1014
		idr_remove(&ida->idr, idr_id);
1015
		free_bitmap(ida, bitmap);
1016
	}
1017
 
1018
	return;
1019
 
1020
 err:
4065 Serge 1021
	WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1412 serge 1022
}
1023
EXPORT_SYMBOL(ida_remove);
1024
 
1025
/**
1026
 * ida_destroy - release all cached layers within an ida tree
2966 Serge 1027
 * @ida:		ida handle
1412 serge 1028
 */
1029
void ida_destroy(struct ida *ida)
1030
{
1031
	idr_destroy(&ida->idr);
1032
	kfree(ida->free_bitmap);
1033
}
1034
EXPORT_SYMBOL(ida_destroy);
1035
 
1036
/**
4103 Serge 1037
 * ida_simple_get - get a new id.
1038
 * @ida: the (initialized) ida.
1039
 * @start: the minimum id (inclusive, < 0x8000000)
1040
 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1041
 * @gfp_mask: memory allocation flags
1042
 *
1043
 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1044
 * On memory allocation failure, returns -ENOMEM.
1045
 *
1046
 * Use ida_simple_remove() to get rid of an id.
1047
 */
1048
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1049
		   gfp_t gfp_mask)
1050
{
1051
	int ret, id;
1052
	unsigned int max;
1053
	unsigned long flags;
1054
 
1055
	BUG_ON((int)start < 0);
1056
	BUG_ON((int)end < 0);
1057
 
1058
	if (end == 0)
1059
		max = 0x80000000;
1060
	else {
1061
		BUG_ON(end < start);
1062
		max = end - 1;
1063
	}
1064
 
1065
again:
1066
	if (!ida_pre_get(ida, gfp_mask))
1067
		return -ENOMEM;
1068
 
1069
	spin_lock_irqsave(&simple_ida_lock, flags);
1070
	ret = ida_get_new_above(ida, start, &id);
1071
	if (!ret) {
1072
		if (id > max) {
1073
			ida_remove(ida, id);
1074
			ret = -ENOSPC;
1075
		} else {
1076
			ret = id;
1077
		}
1078
	}
1079
	spin_unlock_irqrestore(&simple_ida_lock, flags);
1080
 
1081
	if (unlikely(ret == -EAGAIN))
1082
		goto again;
1083
 
1084
	return ret;
1085
}
1086
EXPORT_SYMBOL(ida_simple_get);
1087
 
1088
/**
1089
 * ida_simple_remove - remove an allocated id.
1090
 * @ida: the (initialized) ida.
1091
 * @id: the id returned by ida_simple_get.
1092
 */
1093
void ida_simple_remove(struct ida *ida, unsigned int id)
1094
{
1095
	unsigned long flags;
1096
 
1097
	BUG_ON((int)id < 0);
1098
	spin_lock_irqsave(&simple_ida_lock, flags);
1099
	ida_remove(ida, id);
1100
	spin_unlock_irqrestore(&simple_ida_lock, flags);
1101
}
1102
EXPORT_SYMBOL(ida_simple_remove);
1103
 
1104
/**
1412 serge 1105
 * ida_init - initialize ida handle
1106
 * @ida:	ida handle
1107
 *
1108
 * This function is use to set up the handle (@ida) that you will pass
1109
 * to the rest of the functions.
1110
 */
1111
void ida_init(struct ida *ida)
1112
{
1113
	memset(ida, 0, sizeof(struct ida));
1114
	idr_init(&ida->idr);
1115
 
1116
}
1117
EXPORT_SYMBOL(ida_init);