Subversion Repositories Kolibri OS

Rev

Rev 5270 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1412 serge 1
/*
2
 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3
 *	Copyright (C) 2002 by Concurrent Computer Corporation
4
 *	Distributed under the GNU GPL license version 2.
5
 *
6
 * Modified by George Anzinger to reuse immediately and to use
7
 * find bit instructions.  Also removed _irq on spinlocks.
8
 *
9
 * Modified by Nadia Derbey to make it RCU safe.
10
 *
11
 * Small id to pointer translation service.
12
 *
13
 * It uses a radix tree like structure as a sparse array indexed
14
 * by the id to obtain the pointer.  The bitmap makes allocating
15
 * a new id quick.
16
 *
17
 * You call it to allocate an id (an int) an associate with that id a
18
 * pointer or what ever, we treat it as a (void *).  You can pass this
19
 * id to a user for him to pass back at a later time.  You then pass
20
 * that id to this code and it returns your pointer.
21
 */
22
 
5270 serge 23
#ifndef TEST                        // to test in user space...
24
#include 
6934 serge 25
#include 
3391 Serge 26
#include 
5270 serge 27
#endif
28
#include 
1412 serge 29
#include 
30
#include 
5270 serge 31
#include 
1412 serge 32
 
4103 Serge 33
 
1412 serge 34
 
35
 
3391 Serge 36
#define MAX_IDR_SHIFT		(sizeof(int) * 8 - 1)
37
#define MAX_IDR_BIT		(1U << MAX_IDR_SHIFT)
1412 serge 38
 
3391 Serge 39
/* Leave the possibility of an incomplete final layer */
40
#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
1412 serge 41
 
3391 Serge 42
/* Number of id_layer structs to leave in free list */
43
#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
1412 serge 44
 
3391 Serge 45
static struct idr_layer *idr_preload_head;
46
static int idr_preload_cnt;
1412 serge 47
 
4103 Serge 48
static DEFINE_SPINLOCK(simple_ida_lock);
1412 serge 49
 
3391 Serge 50
/* the maximum ID which can be allocated given idr->layers */
51
static int idr_max(int layers)
52
{
53
	int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
1412 serge 54
 
3391 Serge 55
	return (1 << bits) - 1;
56
}
1412 serge 57
 
3391 Serge 58
/*
59
 * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
60
 * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
61
 * so on.
62
 */
63
static int idr_layer_prefix_mask(int layer)
64
{
65
	return ~idr_max(layer + 1);
66
}
1412 serge 67
 
68
static struct idr_layer *get_from_free_list(struct idr *idp)
69
{
70
	struct idr_layer *p;
71
	unsigned long flags;
72
 
3391 Serge 73
	spin_lock_irqsave(&idp->lock, flags);
1412 serge 74
	if ((p = idp->id_free)) {
75
		idp->id_free = p->ary[0];
76
		idp->id_free_cnt--;
77
		p->ary[0] = NULL;
78
	}
3391 Serge 79
	spin_unlock_irqrestore(&idp->lock, flags);
1412 serge 80
	return(p);
81
}
82
 
3391 Serge 83
/**
84
 * idr_layer_alloc - allocate a new idr_layer
85
 * @gfp_mask: allocation mask
86
 * @layer_idr: optional idr to allocate from
87
 *
88
 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
89
 * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
90
 * an idr_layer from @idr->id_free.
91
 *
92
 * @layer_idr is to maintain backward compatibility with the old alloc
93
 * interface - idr_pre_get() and idr_get_new*() - and will be removed
94
 * together with per-pool preload buffer.
95
 */
96
static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
97
{
98
	struct idr_layer *new;
99
 
100
	/* this is the old path, bypass to get_from_free_list() */
101
	if (layer_idr)
102
		return get_from_free_list(layer_idr);
103
 
104
	/* try to allocate directly from kmem_cache */
105
	new = kzalloc(sizeof(struct idr_layer), gfp_mask);
106
	if (new)
107
		return new;
108
 
109
 
110
	new = idr_preload_head;
111
	if (new) {
112
		idr_preload_head = new->ary[0];
113
		idr_preload_cnt--;
114
		new->ary[0] = NULL;
115
	}
116
	preempt_enable();
117
	return new;
118
}
119
 
1412 serge 120
static void idr_layer_rcu_free(struct rcu_head *head)
121
{
122
	struct idr_layer *layer;
123
 
124
    layer = container_of(head, struct idr_layer, rcu_head);
125
    kfree(layer);
126
}
127
 
3391 Serge 128
static inline void free_layer(struct idr *idr, struct idr_layer *p)
1412 serge 129
{
5056 serge 130
	if (idr->hint == p)
3391 Serge 131
		RCU_INIT_POINTER(idr->hint, NULL);
5270 serge 132
	call_rcu(&p->rcu_head, idr_layer_rcu_free);
1412 serge 133
}
134
 
135
/* only called when idp->lock is held */
136
static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
137
{
138
	p->ary[0] = idp->id_free;
139
	idp->id_free = p;
140
	idp->id_free_cnt++;
141
}
142
 
143
static void move_to_free_list(struct idr *idp, struct idr_layer *p)
144
{
145
	unsigned long flags;
146
 
147
	/*
148
	 * Depends on the return element being zeroed.
149
	 */
3391 Serge 150
	spin_lock_irqsave(&idp->lock, flags);
1412 serge 151
	__move_to_free_list(idp, p);
3391 Serge 152
	spin_unlock_irqrestore(&idp->lock, flags);
1412 serge 153
}
154
 
155
static void idr_mark_full(struct idr_layer **pa, int id)
156
{
157
	struct idr_layer *p = pa[0];
158
	int l = 0;
159
 
3391 Serge 160
	__set_bit(id & IDR_MASK, p->bitmap);
1412 serge 161
	/*
162
	 * If this layer is full mark the bit in the layer above to
163
	 * show that this part of the radix tree is full.  This may
164
	 * complete the layer above and require walking up the radix
165
	 * tree.
166
	 */
3391 Serge 167
	while (bitmap_full(p->bitmap, IDR_SIZE)) {
1412 serge 168
		if (!(p = pa[++l]))
169
			break;
170
		id = id >> IDR_BITS;
3391 Serge 171
		__set_bit((id & IDR_MASK), p->bitmap);
1412 serge 172
	}
173
}
174
 
5056 serge 175
static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1412 serge 176
{
3391 Serge 177
	while (idp->id_free_cnt < MAX_IDR_FREE) {
1412 serge 178
       struct idr_layer *new;
179
       new = kzalloc(sizeof(struct idr_layer), gfp_mask);
180
       if (new == NULL)
181
           return (0);
182
       move_to_free_list(idp, new);
183
   }
184
   return 1;
185
}
186
 
3391 Serge 187
/**
188
 * sub_alloc - try to allocate an id without growing the tree depth
189
 * @idp: idr handle
190
 * @starting_id: id to start search at
191
 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
192
 * @gfp_mask: allocation mask for idr_layer_alloc()
193
 * @layer_idr: optional idr passed to idr_layer_alloc()
194
 *
195
 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
196
 * growing its depth.  Returns
197
 *
198
 *  the allocated id >= 0 if successful,
199
 *  -EAGAIN if the tree needs to grow for allocation to succeed,
200
 *  -ENOSPC if the id space is exhausted,
201
 *  -ENOMEM if more idr_layers need to be allocated.
202
 */
203
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
204
		     gfp_t gfp_mask, struct idr *layer_idr)
1412 serge 205
{
206
	int n, m, sh;
207
	struct idr_layer *p, *new;
208
	int l, id, oid;
209
 
210
	id = *starting_id;
211
 restart:
212
	p = idp->top;
213
	l = idp->layers;
214
	pa[l--] = NULL;
215
	while (1) {
216
		/*
217
		 * We run around this while until we reach the leaf node...
218
		 */
219
		n = (id >> (IDR_BITS*l)) & IDR_MASK;
3391 Serge 220
		m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
1412 serge 221
		if (m == IDR_SIZE) {
222
			/* no space available go back to previous layer. */
223
			l++;
224
			oid = id;
225
			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
226
 
227
			/* if already at the top layer, we need to grow */
5056 serge 228
			if (id > idr_max(idp->layers)) {
1412 serge 229
				*starting_id = id;
3391 Serge 230
				return -EAGAIN;
1412 serge 231
			}
3391 Serge 232
			p = pa[l];
233
			BUG_ON(!p);
1412 serge 234
 
235
			/* If we need to go up one layer, continue the
236
			 * loop; otherwise, restart from the top.
237
			 */
238
			sh = IDR_BITS * (l + 1);
239
			if (oid >> sh == id >> sh)
240
				continue;
241
			else
242
				goto restart;
243
		}
244
		if (m != n) {
245
			sh = IDR_BITS*l;
246
			id = ((id >> sh) ^ n ^ m) << sh;
247
		}
3391 Serge 248
		if ((id >= MAX_IDR_BIT) || (id < 0))
249
			return -ENOSPC;
1412 serge 250
		if (l == 0)
251
			break;
252
		/*
253
		 * Create the layer below if it is missing.
254
		 */
255
		if (!p->ary[m]) {
3391 Serge 256
			new = idr_layer_alloc(gfp_mask, layer_idr);
1412 serge 257
			if (!new)
3391 Serge 258
				return -ENOMEM;
1412 serge 259
			new->layer = l-1;
3391 Serge 260
			new->prefix = id & idr_layer_prefix_mask(new->layer);
1412 serge 261
			rcu_assign_pointer(p->ary[m], new);
262
			p->count++;
263
		}
264
		pa[l--] = p;
265
		p = p->ary[m];
266
	}
267
 
268
	pa[l] = p;
269
	return id;
270
}
271
 
272
static int idr_get_empty_slot(struct idr *idp, int starting_id,
3391 Serge 273
			      struct idr_layer **pa, gfp_t gfp_mask,
274
			      struct idr *layer_idr)
1412 serge 275
{
276
	struct idr_layer *p, *new;
277
	int layers, v, id;
278
	unsigned long flags;
279
 
280
	id = starting_id;
281
build_up:
282
	p = idp->top;
283
	layers = idp->layers;
284
	if (unlikely(!p)) {
3391 Serge 285
		if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
286
			return -ENOMEM;
1412 serge 287
		p->layer = 0;
288
		layers = 1;
289
	}
290
	/*
291
	 * Add a new layer to the top of the tree if the requested
292
	 * id is larger than the currently allocated space.
293
	 */
3391 Serge 294
	while (id > idr_max(layers)) {
1412 serge 295
		layers++;
296
		if (!p->count) {
297
			/* special case: if the tree is currently empty,
298
			 * then we grow the tree by moving the top node
299
			 * upwards.
300
			 */
301
			p->layer++;
3391 Serge 302
			WARN_ON_ONCE(p->prefix);
1412 serge 303
			continue;
304
		}
3391 Serge 305
		if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
1412 serge 306
			/*
307
			 * The allocation failed.  If we built part of
308
			 * the structure tear it down.
309
			 */
3391 Serge 310
			spin_lock_irqsave(&idp->lock, flags);
1412 serge 311
			for (new = p; p && p != idp->top; new = p) {
312
				p = p->ary[0];
313
				new->ary[0] = NULL;
3391 Serge 314
				new->count = 0;
315
				bitmap_clear(new->bitmap, 0, IDR_SIZE);
1412 serge 316
				__move_to_free_list(idp, new);
317
			}
3391 Serge 318
			spin_unlock_irqrestore(&idp->lock, flags);
319
			return -ENOMEM;
1412 serge 320
		}
321
		new->ary[0] = p;
322
		new->count = 1;
323
		new->layer = layers-1;
3391 Serge 324
		new->prefix = id & idr_layer_prefix_mask(new->layer);
325
		if (bitmap_full(p->bitmap, IDR_SIZE))
326
			__set_bit(0, new->bitmap);
1412 serge 327
		p = new;
328
	}
329
	rcu_assign_pointer(idp->top, p);
330
	idp->layers = layers;
3391 Serge 331
	v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
332
	if (v == -EAGAIN)
1412 serge 333
		goto build_up;
334
	return(v);
335
}
336
 
3391 Serge 337
/*
338
 * @id and @pa are from a successful allocation from idr_get_empty_slot().
339
 * Install the user pointer @ptr and mark the slot full.
340
 */
341
static void idr_fill_slot(struct idr *idr, void *ptr, int id,
342
			  struct idr_layer **pa)
1412 serge 343
{
3391 Serge 344
	/* update hint used for lookup, cleared from free_layer() */
345
	rcu_assign_pointer(idr->hint, pa[0]);
1412 serge 346
 
3391 Serge 347
	rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
1412 serge 348
		pa[0]->count++;
349
		idr_mark_full(pa, id);
350
}
351
 
2966 Serge 352
 
1412 serge 353
/**
3391 Serge 354
 * idr_preload - preload for idr_alloc()
355
 * @gfp_mask: allocation mask to use for preloading
1412 serge 356
 *
3391 Serge 357
 * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
358
 * process context and each idr_preload() invocation should be matched with
359
 * idr_preload_end().  Note that preemption is disabled while preloaded.
1412 serge 360
 *
3391 Serge 361
 * The first idr_alloc() in the preloaded section can be treated as if it
362
 * were invoked with @gfp_mask used for preloading.  This allows using more
363
 * permissive allocation masks for idrs protected by spinlocks.
1412 serge 364
 *
3391 Serge 365
 * For example, if idr_alloc() below fails, the failure can be treated as
366
 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
367
 *
368
 *	idr_preload(GFP_KERNEL);
369
 *	spin_lock(lock);
370
 *
371
 *	id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
372
 *
373
 *	spin_unlock(lock);
374
 *	idr_preload_end();
375
 *	if (id < 0)
376
 *		error;
1412 serge 377
 */
3391 Serge 378
void idr_preload(gfp_t gfp_mask)
1412 serge 379
{
380
 
381
	/*
3391 Serge 382
	 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
383
	 * return value from idr_alloc() needs to be checked for failure
384
	 * anyway.  Silently give up if allocation fails.  The caller can
385
	 * treat failures from idr_alloc() as if idr_alloc() were called
386
	 * with @gfp_mask which should be enough.
1412 serge 387
	 */
3391 Serge 388
	while (idr_preload_cnt < MAX_IDR_FREE) {
389
		struct idr_layer *new;
390
 
391
		new = kzalloc(sizeof(struct idr_layer), gfp_mask);
392
		if (!new)
393
			break;
394
 
395
		/* link the new one to per-cpu preload list */
396
		new->ary[0] = idr_preload_head;
397
		idr_preload_head = new;
398
		idr_preload_cnt++;
399
	}
1412 serge 400
}
3391 Serge 401
EXPORT_SYMBOL(idr_preload);
1412 serge 402
 
3391 Serge 403
/**
404
 * idr_alloc - allocate new idr entry
405
 * @idr: the (initialized) idr
406
 * @ptr: pointer to be associated with the new id
407
 * @start: the minimum id (inclusive)
408
 * @end: the maximum id (exclusive, <= 0 for max)
409
 * @gfp_mask: memory allocation flags
410
 *
411
 * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
412
 * available in the specified range, returns -ENOSPC.  On memory allocation
413
 * failure, returns -ENOMEM.
414
 *
415
 * Note that @end is treated as max when <= 0.  This is to always allow
416
 * using @start + N as @end as long as N is inside integer range.
417
 *
418
 * The user is responsible for exclusively synchronizing all operations
419
 * which may modify @idr.  However, read-only accesses such as idr_find()
420
 * or iteration can be performed under RCU read lock provided the user
421
 * destroys @ptr in RCU-safe way after removal from idr.
422
 */
423
int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
424
{
425
	int max = end > 0 ? end - 1 : INT_MAX;	/* inclusive upper limit */
426
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
427
	int id;
428
 
429
	/* sanity checks */
430
	if (WARN_ON_ONCE(start < 0))
431
		return -EINVAL;
432
	if (unlikely(max < start))
433
		return -ENOSPC;
434
 
435
	/* allocate id */
436
	id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
437
	if (unlikely(id < 0))
438
		return id;
439
	if (unlikely(id > max))
440
		return -ENOSPC;
441
 
442
	idr_fill_slot(idr, ptr, id, pa);
443
	return id;
444
}
445
EXPORT_SYMBOL_GPL(idr_alloc);
446
 
4103 Serge 447
/**
448
 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
449
 * @idr: the (initialized) idr
450
 * @ptr: pointer to be associated with the new id
451
 * @start: the minimum id (inclusive)
452
 * @end: the maximum id (exclusive, <= 0 for max)
453
 * @gfp_mask: memory allocation flags
454
 *
455
 * Essentially the same as idr_alloc, but prefers to allocate progressively
456
 * higher ids if it can. If the "cur" counter wraps, then it will start again
457
 * at the "start" end of the range and allocate one that has already been used.
458
 */
459
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
460
			gfp_t gfp_mask)
461
{
462
	int id;
463
 
464
	id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
465
	if (id == -ENOSPC)
466
		id = idr_alloc(idr, ptr, start, end, gfp_mask);
467
 
468
	if (likely(id >= 0))
469
		idr->cur = id + 1;
470
	return id;
471
}
472
EXPORT_SYMBOL(idr_alloc_cyclic);
473
 
1412 serge 474
static void idr_remove_warning(int id)
475
{
4065 Serge 476
	WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
1412 serge 477
}
478
 
479
static void sub_remove(struct idr *idp, int shift, int id)
480
{
481
	struct idr_layer *p = idp->top;
3391 Serge 482
	struct idr_layer **pa[MAX_IDR_LEVEL + 1];
1412 serge 483
	struct idr_layer ***paa = &pa[0];
484
	struct idr_layer *to_free;
485
	int n;
486
 
487
	*paa = NULL;
488
	*++paa = &idp->top;
489
 
490
	while ((shift > 0) && p) {
491
		n = (id >> shift) & IDR_MASK;
3391 Serge 492
		__clear_bit(n, p->bitmap);
1412 serge 493
		*++paa = &p->ary[n];
494
		p = p->ary[n];
495
		shift -= IDR_BITS;
496
	}
497
	n = id & IDR_MASK;
3391 Serge 498
	if (likely(p != NULL && test_bit(n, p->bitmap))) {
499
		__clear_bit(n, p->bitmap);
5270 serge 500
		RCU_INIT_POINTER(p->ary[n], NULL);
1412 serge 501
		to_free = NULL;
502
		while(*paa && ! --((**paa)->count)){
503
			if (to_free)
3391 Serge 504
				free_layer(idp, to_free);
1412 serge 505
			to_free = **paa;
506
			**paa-- = NULL;
507
		}
508
		if (!*paa)
509
			idp->layers = 0;
510
		if (to_free)
3391 Serge 511
			free_layer(idp, to_free);
1412 serge 512
	} else
513
		idr_remove_warning(id);
514
}
515
 
516
/**
2966 Serge 517
 * idr_remove - remove the given id and free its slot
1412 serge 518
 * @idp: idr handle
519
 * @id: unique key
520
 */
521
void idr_remove(struct idr *idp, int id)
522
{
523
	struct idr_layer *p;
524
	struct idr_layer *to_free;
525
 
4065 Serge 526
	if (id < 0)
3391 Serge 527
		return;
1412 serge 528
 
5056 serge 529
	if (id > idr_max(idp->layers)) {
530
		idr_remove_warning(id);
531
		return;
532
	}
533
 
1412 serge 534
	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
535
	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
536
	    idp->top->ary[0]) {
537
		/*
538
		 * Single child at leftmost slot: we can shrink the tree.
539
		 * This level is not needed anymore since when layers are
540
		 * inserted, they are inserted at the top of the existing
541
		 * tree.
542
		 */
543
		to_free = idp->top;
544
		p = idp->top->ary[0];
545
		rcu_assign_pointer(idp->top, p);
546
		--idp->layers;
3391 Serge 547
		to_free->count = 0;
548
		bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
549
		free_layer(idp, to_free);
1412 serge 550
	}
551
}
3391 Serge 552
EXPORT_SYMBOL(idr_remove);
1412 serge 553
 
5056 serge 554
static void __idr_remove_all(struct idr *idp)
1412 serge 555
{
556
	int n, id, max;
2966 Serge 557
	int bt_mask;
1412 serge 558
	struct idr_layer *p;
3391 Serge 559
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 560
	struct idr_layer **paa = &pa[0];
561
 
562
	n = idp->layers * IDR_BITS;
5056 serge 563
	*paa = idp->top;
5270 serge 564
	RCU_INIT_POINTER(idp->top, NULL);
3391 Serge 565
	max = idr_max(idp->layers);
1412 serge 566
 
567
	id = 0;
3391 Serge 568
	while (id >= 0 && id <= max) {
5056 serge 569
		p = *paa;
1412 serge 570
		while (n > IDR_BITS && p) {
571
			n -= IDR_BITS;
572
			p = p->ary[(id >> n) & IDR_MASK];
5056 serge 573
			*++paa = p;
1412 serge 574
		}
575
 
2966 Serge 576
		bt_mask = id;
1412 serge 577
		id += 1 << n;
2966 Serge 578
		/* Get the highest bit that the above add changed from 0->1. */
579
		while (n < fls(id ^ bt_mask)) {
5056 serge 580
			if (*paa)
581
				free_layer(idp, *paa);
1412 serge 582
			n += IDR_BITS;
5056 serge 583
			--paa;
1412 serge 584
		}
585
	}
586
	idp->layers = 0;
587
}
588
 
589
/**
590
 * idr_destroy - release all cached layers within an idr tree
2966 Serge 591
 * @idp: idr handle
3391 Serge 592
 *
593
 * Free all id mappings and all idp_layers.  After this function, @idp is
594
 * completely unused and can be freed / recycled.  The caller is
595
 * responsible for ensuring that no one else accesses @idp during or after
596
 * idr_destroy().
597
 *
598
 * A typical clean-up sequence for objects stored in an idr tree will use
5270 serge 599
 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
3391 Serge 600
 * free up the id mappings and cached idr_layers.
1412 serge 601
 */
602
void idr_destroy(struct idr *idp)
603
{
3391 Serge 604
	__idr_remove_all(idp);
605
 
1412 serge 606
	while (idp->id_free_cnt) {
607
		struct idr_layer *p = get_from_free_list(idp);
608
        kfree(p);
609
	}
610
}
3391 Serge 611
EXPORT_SYMBOL(idr_destroy);
1412 serge 612
 
3391 Serge 613
void *idr_find_slowpath(struct idr *idp, int id)
1412 serge 614
{
615
	int n;
616
	struct idr_layer *p;
617
 
4065 Serge 618
	if (id < 0)
3391 Serge 619
		return NULL;
620
 
621
	p = rcu_dereference_raw(idp->top);
1412 serge 622
	if (!p)
623
		return NULL;
624
	n = (p->layer+1) * IDR_BITS;
625
 
3391 Serge 626
	if (id > idr_max(p->layer + 1))
1412 serge 627
		return NULL;
628
	BUG_ON(n == 0);
629
 
630
	while (n > 0 && p) {
631
		n -= IDR_BITS;
632
		BUG_ON(n != p->layer*IDR_BITS);
3391 Serge 633
		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
1412 serge 634
	}
635
	return((void *)p);
636
}
3391 Serge 637
EXPORT_SYMBOL(idr_find_slowpath);
1412 serge 638
 
639
/**
640
 * idr_for_each - iterate through all stored pointers
641
 * @idp: idr handle
642
 * @fn: function to be called for each pointer
643
 * @data: data passed back to callback function
644
 *
645
 * Iterate over the pointers registered with the given idr.  The
646
 * callback function will be called for each pointer currently
647
 * registered, passing the id, the pointer and the data pointer passed
648
 * to this function.  It is not safe to modify the idr tree while in
649
 * the callback, so functions such as idr_get_new and idr_remove are
650
 * not allowed.
651
 *
652
 * We check the return of @fn each time. If it returns anything other
2966 Serge 653
 * than %0, we break out and return that value.
1412 serge 654
 *
655
 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
656
 */
657
int idr_for_each(struct idr *idp,
658
		 int (*fn)(int id, void *p, void *data), void *data)
659
{
660
	int n, id, max, error = 0;
661
	struct idr_layer *p;
3391 Serge 662
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 663
	struct idr_layer **paa = &pa[0];
664
 
665
	n = idp->layers * IDR_BITS;
5056 serge 666
	*paa = rcu_dereference_raw(idp->top);
3391 Serge 667
	max = idr_max(idp->layers);
1412 serge 668
 
669
	id = 0;
3391 Serge 670
	while (id >= 0 && id <= max) {
5056 serge 671
		p = *paa;
1412 serge 672
		while (n > 0 && p) {
673
			n -= IDR_BITS;
3391 Serge 674
			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
5056 serge 675
			*++paa = p;
1412 serge 676
		}
677
 
678
		if (p) {
679
			error = fn(id, (void *)p, data);
680
			if (error)
681
				break;
682
		}
683
 
684
		id += 1 << n;
685
		while (n < fls(id)) {
686
			n += IDR_BITS;
5056 serge 687
			--paa;
1412 serge 688
		}
689
	}
690
 
691
	return error;
692
}
693
EXPORT_SYMBOL(idr_for_each);
694
 
695
/**
696
 * idr_get_next - lookup next object of id to given id.
697
 * @idp: idr handle
2966 Serge 698
 * @nextidp:  pointer to lookup key
1412 serge 699
 *
700
 * Returns pointer to registered object with id, which is next number to
2966 Serge 701
 * given id. After being looked up, *@nextidp will be updated for the next
702
 * iteration.
3391 Serge 703
 *
704
 * This function can be called under rcu_read_lock(), given that the leaf
705
 * pointers lifetimes are correctly managed.
1412 serge 706
 */
707
void *idr_get_next(struct idr *idp, int *nextidp)
708
{
3391 Serge 709
	struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
1412 serge 710
	struct idr_layer **paa = &pa[0];
711
	int id = *nextidp;
712
	int n, max;
713
 
714
	/* find first ent */
5056 serge 715
	p = *paa = rcu_dereference_raw(idp->top);
1412 serge 716
	if (!p)
717
		return NULL;
3391 Serge 718
	n = (p->layer + 1) * IDR_BITS;
719
	max = idr_max(p->layer + 1);
1412 serge 720
 
3391 Serge 721
	while (id >= 0 && id <= max) {
5056 serge 722
		p = *paa;
1412 serge 723
		while (n > 0 && p) {
724
			n -= IDR_BITS;
3391 Serge 725
			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
5056 serge 726
			*++paa = p;
1412 serge 727
		}
728
 
729
		if (p) {
730
			*nextidp = id;
731
			return p;
732
		}
733
 
3391 Serge 734
		/*
735
		 * Proceed to the next layer at the current level.  Unlike
736
		 * idr_for_each(), @id isn't guaranteed to be aligned to
737
		 * layer boundary at this point and adding 1 << n may
738
		 * incorrectly skip IDs.  Make sure we jump to the
739
		 * beginning of the next layer using round_up().
740
		 */
741
		id = round_up(id + 1, 1 << n);
1412 serge 742
		while (n < fls(id)) {
743
			n += IDR_BITS;
5056 serge 744
			--paa;
1412 serge 745
		}
746
	}
747
	return NULL;
748
}
3391 Serge 749
EXPORT_SYMBOL(idr_get_next);
1412 serge 750
 
751
 
752
/**
753
 * idr_replace - replace pointer for given id
754
 * @idp: idr handle
755
 * @ptr: pointer you want associated with the id
756
 * @id: lookup key
757
 *
758
 * Replace the pointer registered with an id and return the old value.
2966 Serge 759
 * A %-ENOENT return indicates that @id was not found.
760
 * A %-EINVAL return indicates that @id was not within valid constraints.
1412 serge 761
 *
762
 * The caller must serialize with writers.
763
 */
764
void *idr_replace(struct idr *idp, void *ptr, int id)
765
{
766
	int n;
767
	struct idr_layer *p, *old_p;
768
 
4065 Serge 769
	if (id < 0)
3391 Serge 770
		return ERR_PTR(-EINVAL);
771
 
1412 serge 772
	p = idp->top;
773
	if (!p)
5056 serge 774
		return ERR_PTR(-ENOENT);
1412 serge 775
 
5056 serge 776
	if (id > idr_max(p->layer + 1))
777
		return ERR_PTR(-ENOENT);
1412 serge 778
 
5056 serge 779
	n = p->layer * IDR_BITS;
1412 serge 780
	while ((n > 0) && p) {
781
		p = p->ary[(id >> n) & IDR_MASK];
782
		n -= IDR_BITS;
783
	}
784
 
785
	n = id & IDR_MASK;
3391 Serge 786
	if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
1412 serge 787
		return ERR_PTR(-ENOENT);
788
 
789
	old_p = p->ary[n];
790
	rcu_assign_pointer(p->ary[n], ptr);
791
 
792
	return old_p;
793
}
794
EXPORT_SYMBOL(idr_replace);
795
 
4065 Serge 796
void __init idr_init_cache(void)
1412 serge 797
{
798
    //idr_layer_cache = kmem_cache_create("idr_layer_cache",
799
    //           sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
800
}
801
 
802
/**
803
 * idr_init - initialize idr handle
804
 * @idp:	idr handle
805
 *
806
 * This function is use to set up the handle (@idp) that you will pass
807
 * to the rest of the functions.
808
 */
809
void idr_init(struct idr *idp)
810
{
811
	memset(idp, 0, sizeof(struct idr));
3391 Serge 812
	spin_lock_init(&idp->lock);
1412 serge 813
}
3391 Serge 814
EXPORT_SYMBOL(idr_init);
1412 serge 815
 
5056 serge 816
static int idr_has_entry(int id, void *p, void *data)
817
{
818
	return 1;
819
}
1412 serge 820
 
5056 serge 821
bool idr_is_empty(struct idr *idp)
822
{
823
	return !idr_for_each(idp, idr_has_entry, NULL);
824
}
825
EXPORT_SYMBOL(idr_is_empty);
826
 
3391 Serge 827
/**
828
 * DOC: IDA description
1412 serge 829
 * IDA - IDR based ID allocator
830
 *
2966 Serge 831
 * This is id allocator without id -> pointer translation.  Memory
1412 serge 832
 * usage is much lower than full blown idr because each id only
833
 * occupies a bit.  ida uses a custom leaf node which contains
834
 * IDA_BITMAP_BITS slots.
835
 *
836
 * 2007-04-25  written by Tejun Heo 
837
 */
838
 
839
static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
840
{
841
	unsigned long flags;
842
 
843
	if (!ida->free_bitmap) {
844
		spin_lock_irqsave(&ida->idr.lock, flags);
845
		if (!ida->free_bitmap) {
846
			ida->free_bitmap = bitmap;
847
			bitmap = NULL;
848
		}
849
		spin_unlock_irqrestore(&ida->idr.lock, flags);
850
	}
851
 
852
	kfree(bitmap);
853
}
854
 
855
/**
856
 * ida_pre_get - reserve resources for ida allocation
857
 * @ida:	ida handle
858
 * @gfp_mask:	memory allocation flag
859
 *
860
 * This function should be called prior to locking and calling the
861
 * following function.  It preallocates enough memory to satisfy the
862
 * worst possible allocation.
863
 *
2966 Serge 864
 * If the system is REALLY out of memory this function returns %0,
865
 * otherwise %1.
1412 serge 866
 */
867
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
868
{
869
	/* allocate idr_layers */
4103 Serge 870
	if (!__idr_pre_get(&ida->idr, gfp_mask))
1412 serge 871
		return 0;
872
 
873
	/* allocate free_bitmap */
874
	if (!ida->free_bitmap) {
875
		struct ida_bitmap *bitmap;
876
 
3391 Serge 877
		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
1412 serge 878
		if (!bitmap)
879
			return 0;
880
 
881
		free_bitmap(ida, bitmap);
882
	}
883
 
884
	return 1;
885
}
886
EXPORT_SYMBOL(ida_pre_get);
887
 
888
/**
889
 * ida_get_new_above - allocate new ID above or equal to a start id
890
 * @ida:	ida handle
2966 Serge 891
 * @starting_id: id to start search at
1412 serge 892
 * @p_id:	pointer to the allocated handle
893
 *
2966 Serge 894
 * Allocate new ID above or equal to @starting_id.  It should be called
895
 * with any required locks.
1412 serge 896
 *
2966 Serge 897
 * If memory is required, it will return %-EAGAIN, you should unlock
1412 serge 898
 * and go back to the ida_pre_get() call.  If the ida is full, it will
2966 Serge 899
 * return %-ENOSPC.
1412 serge 900
 *
2966 Serge 901
 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
1412 serge 902
 */
903
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
904
{
3391 Serge 905
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 906
	struct ida_bitmap *bitmap;
907
	unsigned long flags;
908
	int idr_id = starting_id / IDA_BITMAP_BITS;
909
	int offset = starting_id % IDA_BITMAP_BITS;
910
	int t, id;
911
 
912
 restart:
913
	/* get vacant slot */
3391 Serge 914
	t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
1412 serge 915
	if (t < 0)
3391 Serge 916
		return t == -ENOMEM ? -EAGAIN : t;
1412 serge 917
 
3391 Serge 918
	if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
1412 serge 919
		return -ENOSPC;
920
 
921
	if (t != idr_id)
922
		offset = 0;
923
	idr_id = t;
924
 
925
	/* if bitmap isn't there, create a new one */
926
	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
927
	if (!bitmap) {
928
		spin_lock_irqsave(&ida->idr.lock, flags);
929
		bitmap = ida->free_bitmap;
930
		ida->free_bitmap = NULL;
931
		spin_unlock_irqrestore(&ida->idr.lock, flags);
932
 
933
		if (!bitmap)
934
			return -EAGAIN;
935
 
936
		memset(bitmap, 0, sizeof(struct ida_bitmap));
937
		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
938
				(void *)bitmap);
939
		pa[0]->count++;
940
	}
941
 
942
	/* lookup for empty slot */
943
	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
944
	if (t == IDA_BITMAP_BITS) {
945
		/* no empty slot after offset, continue to the next chunk */
946
		idr_id++;
947
		offset = 0;
948
		goto restart;
949
	}
950
 
951
	id = idr_id * IDA_BITMAP_BITS + t;
3391 Serge 952
	if (id >= MAX_IDR_BIT)
1412 serge 953
		return -ENOSPC;
954
 
955
	__set_bit(t, bitmap->bitmap);
956
	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
957
		idr_mark_full(pa, idr_id);
958
 
959
	*p_id = id;
960
 
961
	/* Each leaf node can handle nearly a thousand slots and the
962
	 * whole idea of ida is to have small memory foot print.
963
	 * Throw away extra resources one by one after each successful
964
	 * allocation.
965
	 */
966
	if (ida->idr.id_free_cnt || ida->free_bitmap) {
967
		struct idr_layer *p = get_from_free_list(&ida->idr);
968
		if (p)
4065 Serge 969
			kfree(p);
1412 serge 970
	}
971
 
972
	return 0;
973
}
974
EXPORT_SYMBOL(ida_get_new_above);
975
 
976
/**
977
 * ida_remove - remove the given ID
978
 * @ida:	ida handle
979
 * @id:		ID to free
980
 */
981
void ida_remove(struct ida *ida, int id)
982
{
983
	struct idr_layer *p = ida->idr.top;
984
	int shift = (ida->idr.layers - 1) * IDR_BITS;
985
	int idr_id = id / IDA_BITMAP_BITS;
986
	int offset = id % IDA_BITMAP_BITS;
987
	int n;
988
	struct ida_bitmap *bitmap;
989
 
5056 serge 990
	if (idr_id > idr_max(ida->idr.layers))
991
		goto err;
992
 
1412 serge 993
	/* clear full bits while looking up the leaf idr_layer */
994
	while ((shift > 0) && p) {
995
		n = (idr_id >> shift) & IDR_MASK;
3391 Serge 996
		__clear_bit(n, p->bitmap);
1412 serge 997
		p = p->ary[n];
998
		shift -= IDR_BITS;
999
	}
1000
 
1001
	if (p == NULL)
1002
		goto err;
1003
 
1004
	n = idr_id & IDR_MASK;
3391 Serge 1005
	__clear_bit(n, p->bitmap);
1412 serge 1006
 
1007
	bitmap = (void *)p->ary[n];
5056 serge 1008
	if (!bitmap || !test_bit(offset, bitmap->bitmap))
1412 serge 1009
		goto err;
1010
 
1011
	/* update bitmap and remove it if empty */
1012
	__clear_bit(offset, bitmap->bitmap);
1013
	if (--bitmap->nr_busy == 0) {
3391 Serge 1014
		__set_bit(n, p->bitmap);	/* to please idr_remove() */
1412 serge 1015
		idr_remove(&ida->idr, idr_id);
1016
		free_bitmap(ida, bitmap);
1017
	}
1018
 
1019
	return;
1020
 
1021
 err:
4065 Serge 1022
	WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1412 serge 1023
}
1024
EXPORT_SYMBOL(ida_remove);
1025
 
1026
/**
1027
 * ida_destroy - release all cached layers within an ida tree
2966 Serge 1028
 * @ida:		ida handle
1412 serge 1029
 */
1030
void ida_destroy(struct ida *ida)
1031
{
1032
	idr_destroy(&ida->idr);
1033
	kfree(ida->free_bitmap);
1034
}
1035
EXPORT_SYMBOL(ida_destroy);
1036
 
1037
/**
4103 Serge 1038
 * ida_simple_get - get a new id.
1039
 * @ida: the (initialized) ida.
1040
 * @start: the minimum id (inclusive, < 0x8000000)
1041
 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1042
 * @gfp_mask: memory allocation flags
1043
 *
1044
 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1045
 * On memory allocation failure, returns -ENOMEM.
1046
 *
1047
 * Use ida_simple_remove() to get rid of an id.
1048
 */
1049
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1050
		   gfp_t gfp_mask)
1051
{
1052
	int ret, id;
1053
	unsigned int max;
1054
	unsigned long flags;
1055
 
1056
	BUG_ON((int)start < 0);
1057
	BUG_ON((int)end < 0);
1058
 
1059
	if (end == 0)
1060
		max = 0x80000000;
1061
	else {
1062
		BUG_ON(end < start);
1063
		max = end - 1;
1064
	}
1065
 
1066
again:
1067
	if (!ida_pre_get(ida, gfp_mask))
1068
		return -ENOMEM;
1069
 
1070
	spin_lock_irqsave(&simple_ida_lock, flags);
1071
	ret = ida_get_new_above(ida, start, &id);
1072
	if (!ret) {
1073
		if (id > max) {
1074
			ida_remove(ida, id);
1075
			ret = -ENOSPC;
1076
		} else {
1077
			ret = id;
1078
		}
1079
	}
1080
	spin_unlock_irqrestore(&simple_ida_lock, flags);
1081
 
1082
	if (unlikely(ret == -EAGAIN))
1083
		goto again;
1084
 
1085
	return ret;
1086
}
1087
EXPORT_SYMBOL(ida_simple_get);
1088
 
1089
/**
1090
 * ida_simple_remove - remove an allocated id.
1091
 * @ida: the (initialized) ida.
1092
 * @id: the id returned by ida_simple_get.
1093
 */
1094
void ida_simple_remove(struct ida *ida, unsigned int id)
1095
{
1096
	unsigned long flags;
1097
 
1098
	BUG_ON((int)id < 0);
1099
	spin_lock_irqsave(&simple_ida_lock, flags);
1100
	ida_remove(ida, id);
1101
	spin_unlock_irqrestore(&simple_ida_lock, flags);
1102
}
1103
EXPORT_SYMBOL(ida_simple_remove);
1104
 
1105
/**
1412 serge 1106
 * ida_init - initialize ida handle
1107
 * @ida:	ida handle
1108
 *
1109
 * This function is use to set up the handle (@ida) that you will pass
1110
 * to the rest of the functions.
1111
 */
1112
void ida_init(struct ida *ida)
1113
{
1114
	memset(ida, 0, sizeof(struct ida));
1115
	idr_init(&ida->idr);
1116
 
1117
}
1118
EXPORT_SYMBOL(ida_init);