Subversion Repositories Kolibri OS

Rev

Rev 3391 | Rev 4103 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1412 serge 1
/*
2
 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3
 *	Copyright (C) 2002 by Concurrent Computer Corporation
4
 *	Distributed under the GNU GPL license version 2.
5
 *
6
 * Modified by George Anzinger to reuse immediately and to use
7
 * find bit instructions.  Also removed _irq on spinlocks.
8
 *
9
 * Modified by Nadia Derbey to make it RCU safe.
10
 *
11
 * Small id to pointer translation service.
12
 *
13
 * It uses a radix tree like structure as a sparse array indexed
14
 * by the id to obtain the pointer.  The bitmap makes allocating
15
 * a new id quick.
16
 *
17
 * You call it to allocate an id (an int) an associate with that id a
18
 * pointer or what ever, we treat it as a (void *).  You can pass this
19
 * id to a user for him to pass back at a later time.  You then pass
20
 * that id to this code and it returns your pointer.
21
 
22
 * You can release ids at any time. When all ids are released, most of
3391 Serge 23
 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
1412 serge 24
 * don't need to go to the memory "store" during an id allocate, just
25
 * so you don't need to be too concerned about locking and conflicts
26
 * with the slab allocator.
27
 */
28
 
29
#include 
3391 Serge 30
#include 
1412 serge 31
#include 
32
#include 
33
#include 
34
//#include 
35
 
3391 Serge 36
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
37
                                 unsigned long offset);
1412 serge 38
 
39
 
3391 Serge 40
#define MAX_IDR_SHIFT		(sizeof(int) * 8 - 1)
41
#define MAX_IDR_BIT		(1U << MAX_IDR_SHIFT)
1412 serge 42
 
3391 Serge 43
/* Leave the possibility of an incomplete final layer */
44
#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
1412 serge 45
 
3391 Serge 46
/* Number of id_layer structs to leave in free list */
47
#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
1412 serge 48
 
3391 Serge 49
static struct idr_layer *idr_preload_head;
50
static int idr_preload_cnt;
1412 serge 51
 
52
 
3391 Serge 53
/* the maximum ID which can be allocated given idr->layers */
54
static int idr_max(int layers)
55
{
56
	int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
1412 serge 57
 
3391 Serge 58
	return (1 << bits) - 1;
59
}
1412 serge 60
 
3391 Serge 61
/*
62
 * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
63
 * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
64
 * so on.
65
 */
66
static int idr_layer_prefix_mask(int layer)
67
{
68
	return ~idr_max(layer + 1);
69
}
1412 serge 70
 
71
static struct idr_layer *get_from_free_list(struct idr *idp)
72
{
73
	struct idr_layer *p;
74
	unsigned long flags;
75
 
3391 Serge 76
	spin_lock_irqsave(&idp->lock, flags);
1412 serge 77
	if ((p = idp->id_free)) {
78
		idp->id_free = p->ary[0];
79
		idp->id_free_cnt--;
80
		p->ary[0] = NULL;
81
	}
3391 Serge 82
	spin_unlock_irqrestore(&idp->lock, flags);
1412 serge 83
	return(p);
84
}
85
 
3391 Serge 86
/**
87
 * idr_layer_alloc - allocate a new idr_layer
88
 * @gfp_mask: allocation mask
89
 * @layer_idr: optional idr to allocate from
90
 *
91
 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
92
 * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
93
 * an idr_layer from @idr->id_free.
94
 *
95
 * @layer_idr is to maintain backward compatibility with the old alloc
96
 * interface - idr_pre_get() and idr_get_new*() - and will be removed
97
 * together with per-pool preload buffer.
98
 */
99
static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
100
{
101
	struct idr_layer *new;
102
 
103
	/* this is the old path, bypass to get_from_free_list() */
104
	if (layer_idr)
105
		return get_from_free_list(layer_idr);
106
 
107
	/* try to allocate directly from kmem_cache */
108
	new = kzalloc(sizeof(struct idr_layer), gfp_mask);
109
	if (new)
110
		return new;
111
 
112
 
113
	new = idr_preload_head;
114
	if (new) {
115
		idr_preload_head = new->ary[0];
116
		idr_preload_cnt--;
117
		new->ary[0] = NULL;
118
	}
119
	preempt_enable();
120
	return new;
121
}
122
 
1412 serge 123
static void idr_layer_rcu_free(struct rcu_head *head)
124
{
125
	struct idr_layer *layer;
126
 
127
    layer = container_of(head, struct idr_layer, rcu_head);
128
    kfree(layer);
129
}
130
 
3391 Serge 131
static inline void free_layer(struct idr *idr, struct idr_layer *p)
1412 serge 132
{
3391 Serge 133
	if (idr->hint && idr->hint == p)
134
		RCU_INIT_POINTER(idr->hint, NULL);
135
    idr_layer_rcu_free(&p->rcu_head);
1412 serge 136
}
137
 
138
/* only called when idp->lock is held */
139
static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
140
{
141
	p->ary[0] = idp->id_free;
142
	idp->id_free = p;
143
	idp->id_free_cnt++;
144
}
145
 
146
static void move_to_free_list(struct idr *idp, struct idr_layer *p)
147
{
148
	unsigned long flags;
149
 
150
	/*
151
	 * Depends on the return element being zeroed.
152
	 */
3391 Serge 153
	spin_lock_irqsave(&idp->lock, flags);
1412 serge 154
	__move_to_free_list(idp, p);
3391 Serge 155
	spin_unlock_irqrestore(&idp->lock, flags);
1412 serge 156
}
157
 
158
static void idr_mark_full(struct idr_layer **pa, int id)
159
{
160
	struct idr_layer *p = pa[0];
161
	int l = 0;
162
 
3391 Serge 163
	__set_bit(id & IDR_MASK, p->bitmap);
1412 serge 164
	/*
165
	 * If this layer is full mark the bit in the layer above to
166
	 * show that this part of the radix tree is full.  This may
167
	 * complete the layer above and require walking up the radix
168
	 * tree.
169
	 */
3391 Serge 170
	while (bitmap_full(p->bitmap, IDR_SIZE)) {
1412 serge 171
		if (!(p = pa[++l]))
172
			break;
173
		id = id >> IDR_BITS;
3391 Serge 174
		__set_bit((id & IDR_MASK), p->bitmap);
1412 serge 175
	}
176
}
177
 
4065 Serge 178
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1412 serge 179
{
3391 Serge 180
	while (idp->id_free_cnt < MAX_IDR_FREE) {
1412 serge 181
       struct idr_layer *new;
182
       new = kzalloc(sizeof(struct idr_layer), gfp_mask);
183
       if (new == NULL)
184
           return (0);
185
       move_to_free_list(idp, new);
186
   }
187
   return 1;
188
}
4065 Serge 189
EXPORT_SYMBOL(__idr_pre_get);
1412 serge 190
 
3391 Serge 191
/**
192
 * sub_alloc - try to allocate an id without growing the tree depth
193
 * @idp: idr handle
194
 * @starting_id: id to start search at
195
 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
196
 * @gfp_mask: allocation mask for idr_layer_alloc()
197
 * @layer_idr: optional idr passed to idr_layer_alloc()
198
 *
199
 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
200
 * growing its depth.  Returns
201
 *
202
 *  the allocated id >= 0 if successful,
203
 *  -EAGAIN if the tree needs to grow for allocation to succeed,
204
 *  -ENOSPC if the id space is exhausted,
205
 *  -ENOMEM if more idr_layers need to be allocated.
206
 */
207
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
208
		     gfp_t gfp_mask, struct idr *layer_idr)
1412 serge 209
{
210
	int n, m, sh;
211
	struct idr_layer *p, *new;
212
	int l, id, oid;
213
 
214
	id = *starting_id;
215
 restart:
216
	p = idp->top;
217
	l = idp->layers;
218
	pa[l--] = NULL;
219
	while (1) {
220
		/*
221
		 * We run around this while until we reach the leaf node...
222
		 */
223
		n = (id >> (IDR_BITS*l)) & IDR_MASK;
3391 Serge 224
		m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
1412 serge 225
		if (m == IDR_SIZE) {
226
			/* no space available go back to previous layer. */
227
			l++;
228
			oid = id;
229
			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
230
 
231
			/* if already at the top layer, we need to grow */
3391 Serge 232
			if (id >= 1 << (idp->layers * IDR_BITS)) {
1412 serge 233
				*starting_id = id;
3391 Serge 234
				return -EAGAIN;
1412 serge 235
			}
3391 Serge 236
			p = pa[l];
237
			BUG_ON(!p);
1412 serge 238
 
239
			/* If we need to go up one layer, continue the
240
			 * loop; otherwise, restart from the top.
241
			 */
242
			sh = IDR_BITS * (l + 1);
243
			if (oid >> sh == id >> sh)
244
				continue;
245
			else
246
				goto restart;
247
		}
248
		if (m != n) {
249
			sh = IDR_BITS*l;
250
			id = ((id >> sh) ^ n ^ m) << sh;
251
		}
3391 Serge 252
		if ((id >= MAX_IDR_BIT) || (id < 0))
253
			return -ENOSPC;
1412 serge 254
		if (l == 0)
255
			break;
256
		/*
257
		 * Create the layer below if it is missing.
258
		 */
259
		if (!p->ary[m]) {
3391 Serge 260
			new = idr_layer_alloc(gfp_mask, layer_idr);
1412 serge 261
			if (!new)
3391 Serge 262
				return -ENOMEM;
1412 serge 263
			new->layer = l-1;
3391 Serge 264
			new->prefix = id & idr_layer_prefix_mask(new->layer);
1412 serge 265
			rcu_assign_pointer(p->ary[m], new);
266
			p->count++;
267
		}
268
		pa[l--] = p;
269
		p = p->ary[m];
270
	}
271
 
272
	pa[l] = p;
273
	return id;
274
}
275
 
276
static int idr_get_empty_slot(struct idr *idp, int starting_id,
3391 Serge 277
			      struct idr_layer **pa, gfp_t gfp_mask,
278
			      struct idr *layer_idr)
1412 serge 279
{
280
	struct idr_layer *p, *new;
281
	int layers, v, id;
282
	unsigned long flags;
283
 
284
	id = starting_id;
285
build_up:
286
	p = idp->top;
287
	layers = idp->layers;
288
	if (unlikely(!p)) {
3391 Serge 289
		if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
290
			return -ENOMEM;
1412 serge 291
		p->layer = 0;
292
		layers = 1;
293
	}
294
	/*
295
	 * Add a new layer to the top of the tree if the requested
296
	 * id is larger than the currently allocated space.
297
	 */
3391 Serge 298
	while (id > idr_max(layers)) {
1412 serge 299
		layers++;
300
		if (!p->count) {
301
			/* special case: if the tree is currently empty,
302
			 * then we grow the tree by moving the top node
303
			 * upwards.
304
			 */
305
			p->layer++;
3391 Serge 306
			WARN_ON_ONCE(p->prefix);
1412 serge 307
			continue;
308
		}
3391 Serge 309
		if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
1412 serge 310
			/*
311
			 * The allocation failed.  If we built part of
312
			 * the structure tear it down.
313
			 */
3391 Serge 314
			spin_lock_irqsave(&idp->lock, flags);
1412 serge 315
			for (new = p; p && p != idp->top; new = p) {
316
				p = p->ary[0];
317
				new->ary[0] = NULL;
3391 Serge 318
				new->count = 0;
319
				bitmap_clear(new->bitmap, 0, IDR_SIZE);
1412 serge 320
				__move_to_free_list(idp, new);
321
			}
3391 Serge 322
			spin_unlock_irqrestore(&idp->lock, flags);
323
			return -ENOMEM;
1412 serge 324
		}
325
		new->ary[0] = p;
326
		new->count = 1;
327
		new->layer = layers-1;
3391 Serge 328
		new->prefix = id & idr_layer_prefix_mask(new->layer);
329
		if (bitmap_full(p->bitmap, IDR_SIZE))
330
			__set_bit(0, new->bitmap);
1412 serge 331
		p = new;
332
	}
333
	rcu_assign_pointer(idp->top, p);
334
	idp->layers = layers;
3391 Serge 335
	v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
336
	if (v == -EAGAIN)
1412 serge 337
		goto build_up;
338
	return(v);
339
}
340
 
3391 Serge 341
/*
342
 * @id and @pa are from a successful allocation from idr_get_empty_slot().
343
 * Install the user pointer @ptr and mark the slot full.
344
 */
345
static void idr_fill_slot(struct idr *idr, void *ptr, int id,
346
			  struct idr_layer **pa)
1412 serge 347
{
3391 Serge 348
	/* update hint used for lookup, cleared from free_layer() */
349
	rcu_assign_pointer(idr->hint, pa[0]);
1412 serge 350
 
3391 Serge 351
	rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
1412 serge 352
		pa[0]->count++;
353
		idr_mark_full(pa, id);
354
}
355
 
4065 Serge 356
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
1412 serge 357
{
3391 Serge 358
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 359
	int rv;
2966 Serge 360
 
3391 Serge 361
	rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
1412 serge 362
	if (rv < 0)
3391 Serge 363
		return rv == -ENOMEM ? -EAGAIN : rv;
364
 
365
	idr_fill_slot(idp, ptr, rv, pa);
1412 serge 366
	*id = rv;
367
    return 0;
368
}
4065 Serge 369
EXPORT_SYMBOL(__idr_get_new_above);
1412 serge 370
 
371
/**
3391 Serge 372
 * idr_preload - preload for idr_alloc()
373
 * @gfp_mask: allocation mask to use for preloading
1412 serge 374
 *
3391 Serge 375
 * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
376
 * process context and each idr_preload() invocation should be matched with
377
 * idr_preload_end().  Note that preemption is disabled while preloaded.
1412 serge 378
 *
3391 Serge 379
 * The first idr_alloc() in the preloaded section can be treated as if it
380
 * were invoked with @gfp_mask used for preloading.  This allows using more
381
 * permissive allocation masks for idrs protected by spinlocks.
1412 serge 382
 *
3391 Serge 383
 * For example, if idr_alloc() below fails, the failure can be treated as
384
 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
385
 *
386
 *	idr_preload(GFP_KERNEL);
387
 *	spin_lock(lock);
388
 *
389
 *	id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
390
 *
391
 *	spin_unlock(lock);
392
 *	idr_preload_end();
393
 *	if (id < 0)
394
 *		error;
1412 serge 395
 */
3391 Serge 396
void idr_preload(gfp_t gfp_mask)
1412 serge 397
{
398
 
399
	/*
3391 Serge 400
	 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
401
	 * return value from idr_alloc() needs to be checked for failure
402
	 * anyway.  Silently give up if allocation fails.  The caller can
403
	 * treat failures from idr_alloc() as if idr_alloc() were called
404
	 * with @gfp_mask which should be enough.
1412 serge 405
	 */
3391 Serge 406
	while (idr_preload_cnt < MAX_IDR_FREE) {
407
		struct idr_layer *new;
408
 
409
		new = kzalloc(sizeof(struct idr_layer), gfp_mask);
410
		if (!new)
411
			break;
412
 
413
		/* link the new one to per-cpu preload list */
414
		new->ary[0] = idr_preload_head;
415
		idr_preload_head = new;
416
		idr_preload_cnt++;
417
	}
1412 serge 418
}
3391 Serge 419
EXPORT_SYMBOL(idr_preload);
1412 serge 420
 
3391 Serge 421
/**
422
 * idr_alloc - allocate new idr entry
423
 * @idr: the (initialized) idr
424
 * @ptr: pointer to be associated with the new id
425
 * @start: the minimum id (inclusive)
426
 * @end: the maximum id (exclusive, <= 0 for max)
427
 * @gfp_mask: memory allocation flags
428
 *
429
 * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
430
 * available in the specified range, returns -ENOSPC.  On memory allocation
431
 * failure, returns -ENOMEM.
432
 *
433
 * Note that @end is treated as max when <= 0.  This is to always allow
434
 * using @start + N as @end as long as N is inside integer range.
435
 *
436
 * The user is responsible for exclusively synchronizing all operations
437
 * which may modify @idr.  However, read-only accesses such as idr_find()
438
 * or iteration can be performed under RCU read lock provided the user
439
 * destroys @ptr in RCU-safe way after removal from idr.
440
 */
441
int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
442
{
443
	int max = end > 0 ? end - 1 : INT_MAX;	/* inclusive upper limit */
444
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
445
	int id;
446
 
447
	/* sanity checks */
448
	if (WARN_ON_ONCE(start < 0))
449
		return -EINVAL;
450
	if (unlikely(max < start))
451
		return -ENOSPC;
452
 
453
	/* allocate id */
454
	id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
455
	if (unlikely(id < 0))
456
		return id;
457
	if (unlikely(id > max))
458
		return -ENOSPC;
459
 
460
	idr_fill_slot(idr, ptr, id, pa);
461
	return id;
462
}
463
EXPORT_SYMBOL_GPL(idr_alloc);
464
 
1412 serge 465
static void idr_remove_warning(int id)
466
{
4065 Serge 467
	WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
1412 serge 468
}
469
 
470
static void sub_remove(struct idr *idp, int shift, int id)
471
{
472
	struct idr_layer *p = idp->top;
3391 Serge 473
	struct idr_layer **pa[MAX_IDR_LEVEL + 1];
1412 serge 474
	struct idr_layer ***paa = &pa[0];
475
	struct idr_layer *to_free;
476
	int n;
477
 
478
	*paa = NULL;
479
	*++paa = &idp->top;
480
 
481
	while ((shift > 0) && p) {
482
		n = (id >> shift) & IDR_MASK;
3391 Serge 483
		__clear_bit(n, p->bitmap);
1412 serge 484
		*++paa = &p->ary[n];
485
		p = p->ary[n];
486
		shift -= IDR_BITS;
487
	}
488
	n = id & IDR_MASK;
3391 Serge 489
	if (likely(p != NULL && test_bit(n, p->bitmap))) {
490
		__clear_bit(n, p->bitmap);
1412 serge 491
		rcu_assign_pointer(p->ary[n], NULL);
492
		to_free = NULL;
493
		while(*paa && ! --((**paa)->count)){
494
			if (to_free)
3391 Serge 495
				free_layer(idp, to_free);
1412 serge 496
			to_free = **paa;
497
			**paa-- = NULL;
498
		}
499
		if (!*paa)
500
			idp->layers = 0;
501
		if (to_free)
3391 Serge 502
			free_layer(idp, to_free);
1412 serge 503
	} else
504
		idr_remove_warning(id);
505
}
506
 
507
/**
2966 Serge 508
 * idr_remove - remove the given id and free its slot
1412 serge 509
 * @idp: idr handle
510
 * @id: unique key
511
 */
512
void idr_remove(struct idr *idp, int id)
513
{
514
	struct idr_layer *p;
515
	struct idr_layer *to_free;
516
 
4065 Serge 517
	if (id < 0)
3391 Serge 518
		return;
1412 serge 519
 
520
	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
521
	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
522
	    idp->top->ary[0]) {
523
		/*
524
		 * Single child at leftmost slot: we can shrink the tree.
525
		 * This level is not needed anymore since when layers are
526
		 * inserted, they are inserted at the top of the existing
527
		 * tree.
528
		 */
529
		to_free = idp->top;
530
		p = idp->top->ary[0];
531
		rcu_assign_pointer(idp->top, p);
532
		--idp->layers;
3391 Serge 533
		to_free->count = 0;
534
		bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
535
		free_layer(idp, to_free);
1412 serge 536
	}
3391 Serge 537
	while (idp->id_free_cnt >= MAX_IDR_FREE) {
1412 serge 538
		p = get_from_free_list(idp);
539
		/*
540
		 * Note: we don't call the rcu callback here, since the only
541
		 * layers that fall into the freelist are those that have been
542
		 * preallocated.
543
		 */
544
        kfree(p);
545
	}
546
	return;
547
}
3391 Serge 548
EXPORT_SYMBOL(idr_remove);
1412 serge 549
 
3391 Serge 550
void __idr_remove_all(struct idr *idp)
1412 serge 551
{
552
	int n, id, max;
2966 Serge 553
	int bt_mask;
1412 serge 554
	struct idr_layer *p;
3391 Serge 555
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 556
	struct idr_layer **paa = &pa[0];
557
 
558
	n = idp->layers * IDR_BITS;
559
	p = idp->top;
560
	rcu_assign_pointer(idp->top, NULL);
3391 Serge 561
	max = idr_max(idp->layers);
1412 serge 562
 
563
	id = 0;
3391 Serge 564
	while (id >= 0 && id <= max) {
1412 serge 565
		while (n > IDR_BITS && p) {
566
			n -= IDR_BITS;
567
			*paa++ = p;
568
			p = p->ary[(id >> n) & IDR_MASK];
569
		}
570
 
2966 Serge 571
		bt_mask = id;
1412 serge 572
		id += 1 << n;
2966 Serge 573
		/* Get the highest bit that the above add changed from 0->1. */
574
		while (n < fls(id ^ bt_mask)) {
1412 serge 575
			if (p)
3391 Serge 576
				free_layer(idp, p);
1412 serge 577
			n += IDR_BITS;
578
			p = *--paa;
579
		}
580
	}
581
	idp->layers = 0;
582
}
3391 Serge 583
EXPORT_SYMBOL(__idr_remove_all);
1412 serge 584
 
585
/**
586
 * idr_destroy - release all cached layers within an idr tree
2966 Serge 587
 * @idp: idr handle
3391 Serge 588
 *
589
 * Free all id mappings and all idp_layers.  After this function, @idp is
590
 * completely unused and can be freed / recycled.  The caller is
591
 * responsible for ensuring that no one else accesses @idp during or after
592
 * idr_destroy().
593
 *
594
 * A typical clean-up sequence for objects stored in an idr tree will use
595
 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
596
 * free up the id mappings and cached idr_layers.
1412 serge 597
 */
598
void idr_destroy(struct idr *idp)
599
{
3391 Serge 600
	__idr_remove_all(idp);
601
 
1412 serge 602
	while (idp->id_free_cnt) {
603
		struct idr_layer *p = get_from_free_list(idp);
604
        kfree(p);
605
	}
606
}
3391 Serge 607
EXPORT_SYMBOL(idr_destroy);
1412 serge 608
 
3391 Serge 609
void *idr_find_slowpath(struct idr *idp, int id)
1412 serge 610
{
611
	int n;
612
	struct idr_layer *p;
613
 
4065 Serge 614
	if (id < 0)
3391 Serge 615
		return NULL;
616
 
617
	p = rcu_dereference_raw(idp->top);
1412 serge 618
	if (!p)
619
		return NULL;
620
	n = (p->layer+1) * IDR_BITS;
621
 
3391 Serge 622
	if (id > idr_max(p->layer + 1))
1412 serge 623
		return NULL;
624
	BUG_ON(n == 0);
625
 
626
	while (n > 0 && p) {
627
		n -= IDR_BITS;
628
		BUG_ON(n != p->layer*IDR_BITS);
3391 Serge 629
		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
1412 serge 630
	}
631
	return((void *)p);
632
}
3391 Serge 633
EXPORT_SYMBOL(idr_find_slowpath);
1412 serge 634
 
635
#if 0
636
/**
637
 * idr_for_each - iterate through all stored pointers
638
 * @idp: idr handle
639
 * @fn: function to be called for each pointer
640
 * @data: data passed back to callback function
641
 *
642
 * Iterate over the pointers registered with the given idr.  The
643
 * callback function will be called for each pointer currently
644
 * registered, passing the id, the pointer and the data pointer passed
645
 * to this function.  It is not safe to modify the idr tree while in
646
 * the callback, so functions such as idr_get_new and idr_remove are
647
 * not allowed.
648
 *
649
 * We check the return of @fn each time. If it returns anything other
2966 Serge 650
 * than %0, we break out and return that value.
1412 serge 651
 *
652
 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
653
 */
654
int idr_for_each(struct idr *idp,
655
		 int (*fn)(int id, void *p, void *data), void *data)
656
{
657
	int n, id, max, error = 0;
658
	struct idr_layer *p;
3391 Serge 659
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 660
	struct idr_layer **paa = &pa[0];
661
 
662
	n = idp->layers * IDR_BITS;
3391 Serge 663
	p = rcu_dereference_raw(idp->top);
664
	max = idr_max(idp->layers);
1412 serge 665
 
666
	id = 0;
3391 Serge 667
	while (id >= 0 && id <= max) {
1412 serge 668
		while (n > 0 && p) {
669
			n -= IDR_BITS;
670
			*paa++ = p;
3391 Serge 671
			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
1412 serge 672
		}
673
 
674
		if (p) {
675
			error = fn(id, (void *)p, data);
676
			if (error)
677
				break;
678
		}
679
 
680
		id += 1 << n;
681
		while (n < fls(id)) {
682
			n += IDR_BITS;
683
			p = *--paa;
684
		}
685
	}
686
 
687
	return error;
688
}
689
EXPORT_SYMBOL(idr_for_each);
690
 
691
/**
692
 * idr_get_next - lookup next object of id to given id.
693
 * @idp: idr handle
2966 Serge 694
 * @nextidp:  pointer to lookup key
1412 serge 695
 *
696
 * Returns pointer to registered object with id, which is next number to
2966 Serge 697
 * given id. After being looked up, *@nextidp will be updated for the next
698
 * iteration.
3391 Serge 699
 *
700
 * This function can be called under rcu_read_lock(), given that the leaf
701
 * pointers lifetimes are correctly managed.
1412 serge 702
 */
703
void *idr_get_next(struct idr *idp, int *nextidp)
704
{
3391 Serge 705
	struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
1412 serge 706
	struct idr_layer **paa = &pa[0];
707
	int id = *nextidp;
708
	int n, max;
709
 
710
	/* find first ent */
3391 Serge 711
	p = rcu_dereference_raw(idp->top);
1412 serge 712
	if (!p)
713
		return NULL;
3391 Serge 714
	n = (p->layer + 1) * IDR_BITS;
715
	max = idr_max(p->layer + 1);
1412 serge 716
 
3391 Serge 717
	while (id >= 0 && id <= max) {
1412 serge 718
		while (n > 0 && p) {
719
			n -= IDR_BITS;
720
			*paa++ = p;
3391 Serge 721
			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
1412 serge 722
		}
723
 
724
		if (p) {
725
			*nextidp = id;
726
			return p;
727
		}
728
 
3391 Serge 729
		/*
730
		 * Proceed to the next layer at the current level.  Unlike
731
		 * idr_for_each(), @id isn't guaranteed to be aligned to
732
		 * layer boundary at this point and adding 1 << n may
733
		 * incorrectly skip IDs.  Make sure we jump to the
734
		 * beginning of the next layer using round_up().
735
		 */
736
		id = round_up(id + 1, 1 << n);
1412 serge 737
		while (n < fls(id)) {
738
			n += IDR_BITS;
739
			p = *--paa;
740
		}
741
	}
742
	return NULL;
743
}
3391 Serge 744
EXPORT_SYMBOL(idr_get_next);
1412 serge 745
 
746
 
747
/**
748
 * idr_replace - replace pointer for given id
749
 * @idp: idr handle
750
 * @ptr: pointer you want associated with the id
751
 * @id: lookup key
752
 *
753
 * Replace the pointer registered with an id and return the old value.
2966 Serge 754
 * A %-ENOENT return indicates that @id was not found.
755
 * A %-EINVAL return indicates that @id was not within valid constraints.
1412 serge 756
 *
757
 * The caller must serialize with writers.
758
 */
759
void *idr_replace(struct idr *idp, void *ptr, int id)
760
{
761
	int n;
762
	struct idr_layer *p, *old_p;
763
 
4065 Serge 764
	if (id < 0)
3391 Serge 765
		return ERR_PTR(-EINVAL);
766
 
1412 serge 767
	p = idp->top;
768
	if (!p)
769
		return ERR_PTR(-EINVAL);
770
 
771
	n = (p->layer+1) * IDR_BITS;
772
 
773
	if (id >= (1 << n))
774
		return ERR_PTR(-EINVAL);
775
 
776
	n -= IDR_BITS;
777
	while ((n > 0) && p) {
778
		p = p->ary[(id >> n) & IDR_MASK];
779
		n -= IDR_BITS;
780
	}
781
 
782
	n = id & IDR_MASK;
3391 Serge 783
	if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
1412 serge 784
		return ERR_PTR(-ENOENT);
785
 
786
	old_p = p->ary[n];
787
	rcu_assign_pointer(p->ary[n], ptr);
788
 
789
	return old_p;
790
}
791
EXPORT_SYMBOL(idr_replace);
792
 
793
 
794
#endif
795
 
796
 
4065 Serge 797
void __init idr_init_cache(void)
1412 serge 798
{
799
    //idr_layer_cache = kmem_cache_create("idr_layer_cache",
800
    //           sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
801
}
802
 
803
/**
804
 * idr_init - initialize idr handle
805
 * @idp:	idr handle
806
 *
807
 * This function is use to set up the handle (@idp) that you will pass
808
 * to the rest of the functions.
809
 */
810
void idr_init(struct idr *idp)
811
{
812
	memset(idp, 0, sizeof(struct idr));
3391 Serge 813
	spin_lock_init(&idp->lock);
1412 serge 814
}
3391 Serge 815
EXPORT_SYMBOL(idr_init);
1412 serge 816
 
817
 
3391 Serge 818
/**
819
 * DOC: IDA description
1412 serge 820
 * IDA - IDR based ID allocator
821
 *
2966 Serge 822
 * This is id allocator without id -> pointer translation.  Memory
1412 serge 823
 * usage is much lower than full blown idr because each id only
824
 * occupies a bit.  ida uses a custom leaf node which contains
825
 * IDA_BITMAP_BITS slots.
826
 *
827
 * 2007-04-25  written by Tejun Heo 
828
 */
829
 
830
static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
831
{
832
	unsigned long flags;
833
 
834
	if (!ida->free_bitmap) {
835
		spin_lock_irqsave(&ida->idr.lock, flags);
836
		if (!ida->free_bitmap) {
837
			ida->free_bitmap = bitmap;
838
			bitmap = NULL;
839
		}
840
		spin_unlock_irqrestore(&ida->idr.lock, flags);
841
	}
842
 
843
	kfree(bitmap);
844
}
845
 
846
/**
847
 * ida_pre_get - reserve resources for ida allocation
848
 * @ida:	ida handle
849
 * @gfp_mask:	memory allocation flag
850
 *
851
 * This function should be called prior to locking and calling the
852
 * following function.  It preallocates enough memory to satisfy the
853
 * worst possible allocation.
854
 *
2966 Serge 855
 * If the system is REALLY out of memory this function returns %0,
856
 * otherwise %1.
1412 serge 857
 */
858
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
859
{
860
	/* allocate idr_layers */
861
	if (!idr_pre_get(&ida->idr, gfp_mask))
862
		return 0;
863
 
864
	/* allocate free_bitmap */
865
	if (!ida->free_bitmap) {
866
		struct ida_bitmap *bitmap;
867
 
3391 Serge 868
		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
1412 serge 869
		if (!bitmap)
870
			return 0;
871
 
872
		free_bitmap(ida, bitmap);
873
	}
874
 
875
	return 1;
876
}
877
EXPORT_SYMBOL(ida_pre_get);
878
 
879
/**
880
 * ida_get_new_above - allocate new ID above or equal to a start id
881
 * @ida:	ida handle
2966 Serge 882
 * @starting_id: id to start search at
1412 serge 883
 * @p_id:	pointer to the allocated handle
884
 *
2966 Serge 885
 * Allocate new ID above or equal to @starting_id.  It should be called
886
 * with any required locks.
1412 serge 887
 *
2966 Serge 888
 * If memory is required, it will return %-EAGAIN, you should unlock
1412 serge 889
 * and go back to the ida_pre_get() call.  If the ida is full, it will
2966 Serge 890
 * return %-ENOSPC.
1412 serge 891
 *
2966 Serge 892
 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
1412 serge 893
 */
894
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
895
{
3391 Serge 896
	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
1412 serge 897
	struct ida_bitmap *bitmap;
898
	unsigned long flags;
899
	int idr_id = starting_id / IDA_BITMAP_BITS;
900
	int offset = starting_id % IDA_BITMAP_BITS;
901
	int t, id;
902
 
903
 restart:
904
	/* get vacant slot */
3391 Serge 905
	t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
1412 serge 906
	if (t < 0)
3391 Serge 907
		return t == -ENOMEM ? -EAGAIN : t;
1412 serge 908
 
3391 Serge 909
	if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
1412 serge 910
		return -ENOSPC;
911
 
912
	if (t != idr_id)
913
		offset = 0;
914
	idr_id = t;
915
 
916
	/* if bitmap isn't there, create a new one */
917
	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
918
	if (!bitmap) {
919
		spin_lock_irqsave(&ida->idr.lock, flags);
920
		bitmap = ida->free_bitmap;
921
		ida->free_bitmap = NULL;
922
		spin_unlock_irqrestore(&ida->idr.lock, flags);
923
 
924
		if (!bitmap)
925
			return -EAGAIN;
926
 
927
		memset(bitmap, 0, sizeof(struct ida_bitmap));
928
		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
929
				(void *)bitmap);
930
		pa[0]->count++;
931
	}
932
 
933
	/* lookup for empty slot */
934
	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
935
	if (t == IDA_BITMAP_BITS) {
936
		/* no empty slot after offset, continue to the next chunk */
937
		idr_id++;
938
		offset = 0;
939
		goto restart;
940
	}
941
 
942
	id = idr_id * IDA_BITMAP_BITS + t;
3391 Serge 943
	if (id >= MAX_IDR_BIT)
1412 serge 944
		return -ENOSPC;
945
 
946
	__set_bit(t, bitmap->bitmap);
947
	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
948
		idr_mark_full(pa, idr_id);
949
 
950
	*p_id = id;
951
 
952
	/* Each leaf node can handle nearly a thousand slots and the
953
	 * whole idea of ida is to have small memory foot print.
954
	 * Throw away extra resources one by one after each successful
955
	 * allocation.
956
	 */
957
	if (ida->idr.id_free_cnt || ida->free_bitmap) {
958
		struct idr_layer *p = get_from_free_list(&ida->idr);
959
		if (p)
4065 Serge 960
			kfree(p);
1412 serge 961
	}
962
 
963
	return 0;
964
}
965
EXPORT_SYMBOL(ida_get_new_above);
966
 
967
/**
968
 * ida_remove - remove the given ID
969
 * @ida:	ida handle
970
 * @id:		ID to free
971
 */
972
void ida_remove(struct ida *ida, int id)
973
{
974
	struct idr_layer *p = ida->idr.top;
975
	int shift = (ida->idr.layers - 1) * IDR_BITS;
976
	int idr_id = id / IDA_BITMAP_BITS;
977
	int offset = id % IDA_BITMAP_BITS;
978
	int n;
979
	struct ida_bitmap *bitmap;
980
 
981
	/* clear full bits while looking up the leaf idr_layer */
982
	while ((shift > 0) && p) {
983
		n = (idr_id >> shift) & IDR_MASK;
3391 Serge 984
		__clear_bit(n, p->bitmap);
1412 serge 985
		p = p->ary[n];
986
		shift -= IDR_BITS;
987
	}
988
 
989
	if (p == NULL)
990
		goto err;
991
 
992
	n = idr_id & IDR_MASK;
3391 Serge 993
	__clear_bit(n, p->bitmap);
1412 serge 994
 
995
	bitmap = (void *)p->ary[n];
996
	if (!test_bit(offset, bitmap->bitmap))
997
		goto err;
998
 
999
	/* update bitmap and remove it if empty */
1000
	__clear_bit(offset, bitmap->bitmap);
1001
	if (--bitmap->nr_busy == 0) {
3391 Serge 1002
		__set_bit(n, p->bitmap);	/* to please idr_remove() */
1412 serge 1003
		idr_remove(&ida->idr, idr_id);
1004
		free_bitmap(ida, bitmap);
1005
	}
1006
 
1007
	return;
1008
 
1009
 err:
4065 Serge 1010
	WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1412 serge 1011
}
1012
EXPORT_SYMBOL(ida_remove);
1013
 
1014
/**
1015
 * ida_destroy - release all cached layers within an ida tree
2966 Serge 1016
 * @ida:		ida handle
1412 serge 1017
 */
1018
void ida_destroy(struct ida *ida)
1019
{
1020
	idr_destroy(&ida->idr);
1021
	kfree(ida->free_bitmap);
1022
}
1023
EXPORT_SYMBOL(ida_destroy);
1024
 
1025
/**
1026
 * ida_init - initialize ida handle
1027
 * @ida:	ida handle
1028
 *
1029
 * This function is use to set up the handle (@ida) that you will pass
1030
 * to the rest of the functions.
1031
 */
1032
void ida_init(struct ida *ida)
1033
{
1034
	memset(ida, 0, sizeof(struct ida));
1035
	idr_init(&ida->idr);
1036
 
1037
}
1038
EXPORT_SYMBOL(ida_init);
1039
 
1040
 
3391 Serge 1041
 
1042
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
1043
{
1044
        const unsigned long *p = addr;
1045
        unsigned long result = 0;
1046
        unsigned long tmp;
1047
 
1048
        while (size & ~(BITS_PER_LONG-1)) {
1049
                if ((tmp = *(p++)))
1050
                        goto found;
1051
                result += BITS_PER_LONG;
1052
                size -= BITS_PER_LONG;
1053
        }
1054
        if (!size)
1055
                return result;
1056
 
1057
        tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
1058
        if (tmp == 0UL)         /* Are any bits set? */
1059
                return result + size;   /* Nope. */
1060
found:
1061
        return result + __ffs(tmp);
1062
}
1063
 
1064
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
1065
                            unsigned long offset)
1066
{
1067
        const unsigned long *p = addr + BITOP_WORD(offset);
1068
        unsigned long result = offset & ~(BITS_PER_LONG-1);
1069
        unsigned long tmp;
1070
 
1071
        if (offset >= size)
1072
                return size;
1073
        size -= result;
1074
        offset %= BITS_PER_LONG;
1075
        if (offset) {
1076
                tmp = *(p++);
1077
                tmp &= (~0UL << offset);
1078
                if (size < BITS_PER_LONG)
1079
                        goto found_first;
1080
                if (tmp)
1081
                        goto found_middle;
1082
                size -= BITS_PER_LONG;
1083
                result += BITS_PER_LONG;
1084
        }
1085
        while (size & ~(BITS_PER_LONG-1)) {
1086
                if ((tmp = *(p++)))
1087
                        goto found_middle;
1088
                result += BITS_PER_LONG;
1089
                size -= BITS_PER_LONG;
1090
        }
1091
        if (!size)
1092
                return result;
1093
        tmp = *p;
1094
 
1095
found_first:
1096
        tmp &= (~0UL >> (BITS_PER_LONG - size));
1097
        if (tmp == 0UL)         /* Are any bits set? */
1098
                return result + size;   /* Nope. */
1099
found_middle:
1100
        return result + __ffs(tmp);
1101
}
1102
 
1103
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
1104
                                 unsigned long offset)
1105
{
1106
        const unsigned long *p = addr + BITOP_WORD(offset);
1107
        unsigned long result = offset & ~(BITS_PER_LONG-1);
1108
        unsigned long tmp;
1109
 
1110
        if (offset >= size)
1111
                return size;
1112
        size -= result;
1113
        offset %= BITS_PER_LONG;
1114
        if (offset) {
1115
                tmp = *(p++);
1116
                tmp |= ~0UL >> (BITS_PER_LONG - offset);
1117
                if (size < BITS_PER_LONG)
1118
                        goto found_first;
1119
                if (~tmp)
1120
                        goto found_middle;
1121
                size -= BITS_PER_LONG;
1122
                result += BITS_PER_LONG;
1123
        }
1124
        while (size & ~(BITS_PER_LONG-1)) {
1125
                if (~(tmp = *(p++)))
1126
                        goto found_middle;
1127
                result += BITS_PER_LONG;
1128
                size -= BITS_PER_LONG;
1129
        }
1130
        if (!size)
1131
                return result;
1132
        tmp = *p;
1133
 
1134
found_first:
1135
        tmp |= ~0UL << size;
1136
        if (tmp == ~0UL)        /* Are any bits zero? */
1137
                return result + size;   /* Nope. */
1138
found_middle:
1139
        return result + ffz(tmp);
1140
}
1141
 
1142
unsigned int hweight32(unsigned int w)
1143
{
1144
        unsigned int res = w - ((w >> 1) & 0x55555555);
1145
        res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
1146
        res = (res + (res >> 4)) & 0x0F0F0F0F;
1147
        res = res + (res >> 8);
1148
        return (res + (res >> 16)) & 0x000000FF;
1149
}
1150