Subversion Repositories Kolibri OS

Rev

Rev 1126 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1125 serge 1
/*
2
 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3
 *	Copyright (C) 2002 by Concurrent Computer Corporation
4
 *	Distributed under the GNU GPL license version 2.
5
 *
6
 * Modified by George Anzinger to reuse immediately and to use
7
 * find bit instructions.  Also removed _irq on spinlocks.
8
 *
9
 * Modified by Nadia Derbey to make it RCU safe.
10
 *
11
 * Small id to pointer translation service.
12
 *
13
 * It uses a radix tree like structure as a sparse array indexed
14
 * by the id to obtain the pointer.  The bitmap makes allocating
15
 * a new id quick.
16
 *
17
 * You call it to allocate an id (an int) an associate with that id a
18
 * pointer or what ever, we treat it as a (void *).  You can pass this
19
 * id to a user for him to pass back at a later time.  You then pass
20
 * that id to this code and it returns your pointer.
21
 
22
 * You can release ids at any time. When all ids are released, most of
23
 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
24
 * don't need to go to the memory "store" during an id allocate, just
25
 * so you don't need to be too concerned about locking and conflicts
26
 * with the slab allocator.
27
 */
28
 
29
#include 
1179 serge 30
#include 
31
#include "drm.h"
32
#include "drmP.h"
33
#include "drm_crtc.h"
1125 serge 34
 
35
#define ADDR "=m" (*(volatile long *) addr)
36
 
37
static inline void __set_bit(int nr, volatile void *addr)
38
{
39
        asm volatile("bts %1,%0"
40
                     : ADDR
41
                     : "Ir" (nr) : "memory");
42
}
43
 
44
static inline void __clear_bit(int nr, volatile void *addr)
45
{
46
        asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
47
}
48
 
49
static inline int constant_test_bit(int nr, const volatile void *addr)
50
{
51
        return ((1UL << (nr % 32)) &
52
                (((unsigned long *)addr)[nr / 32])) != 0;
53
}
54
 
55
static inline int variable_test_bit(int nr, volatile const void *addr)
56
{
57
        int oldbit;
58
 
59
        asm volatile("bt %2,%1\n\t"
60
                     "sbb %0,%0"
61
                     : "=r" (oldbit)
62
                     : "m" (*(unsigned long *)addr), "Ir" (nr));
63
 
64
        return oldbit;
65
};
66
 
67
 
68
#define test_bit(nr,addr)                       \
69
        (__builtin_constant_p(nr) ?             \
70
         constant_test_bit((nr),(addr)) :       \
71
         variable_test_bit((nr),(addr)))
72
 
73
 
74
static inline int fls(int x)
75
{
76
        int r;
77
 
78
        __asm__("bsrl %1,%0\n\t"
79
                "jnz 1f\n\t"
80
                "movl $-1,%0\n"
81
                "1:" : "=r" (r) : "rm" (x));
82
        return r+1;
83
}
84
 
85
static inline unsigned long __ffs(unsigned long word)
86
{
87
        __asm__("bsfl %1,%0"
88
                :"=r" (word)
89
                :"rm" (word));
90
        return word;
91
}
92
 
93
 
94
static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
95
{
96
        unsigned x = 0;
97
 
98
        while (x < size) {
99
                unsigned long val = *addr++;
100
                if (val)
101
                        return __ffs(val) + x;
102
                x += (sizeof(*addr)<<3);
103
        }
104
        return x;
105
}
106
 
107
 
108
int find_next_bit(const unsigned long *addr, int size, int offset)
109
{
110
    const unsigned long *p = addr + (offset >> 5);
111
    int set = 0, bit = offset & 31, res;
112
 
113
    if (bit)
114
    {
115
        /*
116
         * Look for nonzero in the first 32 bits:
117
         */
118
        __asm__("bsfl %1,%0\n\t"
119
                "jne 1f\n\t"
120
                "movl $32, %0\n"
121
                "1:"
122
                : "=r" (set)
123
                : "r" (*p >> bit));
124
        if (set < (32 - bit))
125
                return set + offset;
126
        set = 32 - bit;
127
        p++;
128
    }
129
    /*
130
     * No set bit yet, search remaining full words for a bit
131
     */
132
    res = find_first_bit (p, size - 32 * (p - addr));
133
    return (offset + set + res);
134
}
135
 
136
 
137
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
138
 
139
#define rcu_dereference(p)     ({ \
140
                                typeof(p) _________p1 = ACCESS_ONCE(p); \
141
                                (_________p1); \
142
                                })
143
 
144
#define rcu_assign_pointer(p, v) \
145
        ({ \
146
                if (!__builtin_constant_p(v) || \
147
                    ((v) != NULL)) \
148
                (p) = (v); \
149
        })
150
 
151
//static struct kmem_cache *idr_layer_cache;
152
 
153
 
154
 
155
 
156
 
157
static struct idr_layer *get_from_free_list(struct idr *idp)
158
{
159
	struct idr_layer *p;
160
	unsigned long flags;
161
 
162
//   spin_lock_irqsave(&idp->lock, flags);
163
	if ((p = idp->id_free)) {
164
		idp->id_free = p->ary[0];
165
		idp->id_free_cnt--;
166
		p->ary[0] = NULL;
167
	}
168
//   spin_unlock_irqrestore(&idp->lock, flags);
169
	return(p);
170
}
171
 
172
 
173
static void idr_layer_rcu_free(struct rcu_head *head)
174
{
175
	struct idr_layer *layer;
176
 
177
    layer = container_of(head, struct idr_layer, rcu_head);
178
    kfree(layer);
179
}
180
 
181
 
182
 
183
static inline void free_layer(struct idr_layer *p)
184
{
185
    kfree(p);
186
}
187
 
188
 
189
/* only called when idp->lock is held */
190
static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
191
{
192
	p->ary[0] = idp->id_free;
193
	idp->id_free = p;
194
	idp->id_free_cnt++;
195
}
196
 
197
static void move_to_free_list(struct idr *idp, struct idr_layer *p)
198
{
199
	unsigned long flags;
200
 
201
	/*
202
	 * Depends on the return element being zeroed.
203
	 */
204
//   spin_lock_irqsave(&idp->lock, flags);
205
	__move_to_free_list(idp, p);
206
//   spin_unlock_irqrestore(&idp->lock, flags);
207
}
208
 
209
static void idr_mark_full(struct idr_layer **pa, int id)
210
{
211
	struct idr_layer *p = pa[0];
212
	int l = 0;
213
 
214
	__set_bit(id & IDR_MASK, &p->bitmap);
215
	/*
216
	 * If this layer is full mark the bit in the layer above to
217
	 * show that this part of the radix tree is full.  This may
218
	 * complete the layer above and require walking up the radix
219
	 * tree.
220
	 */
221
	while (p->bitmap == IDR_FULL) {
222
		if (!(p = pa[++l]))
223
			break;
224
		id = id >> IDR_BITS;
225
		__set_bit((id & IDR_MASK), &p->bitmap);
226
	}
227
}
228
 
229
 
230
 
231
/**
232
 * idr_pre_get - reserver resources for idr allocation
233
 * @idp:	idr handle
234
 * @gfp_mask:	memory allocation flags
235
 *
236
 * This function should be called prior to locking and calling the
237
 * idr_get_new* functions. It preallocates enough memory to satisfy
238
 * the worst possible allocation.
239
 *
240
 * If the system is REALLY out of memory this function returns 0,
241
 * otherwise 1.
242
 */
243
int idr_pre_get(struct idr *idp, u32_t gfp_mask)
244
{
245
   while (idp->id_free_cnt < IDR_FREE_MAX) {
246
       struct idr_layer *new;
1126 serge 247
       new = kzalloc(sizeof(struct idr_layer), gfp_mask);
1125 serge 248
       if (new == NULL)
249
           return (0);
250
       move_to_free_list(idp, new);
1179 serge 251
   }
252
   return 1;
1125 serge 253
}
254
EXPORT_SYMBOL(idr_pre_get);
255
 
256
 
257
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
258
{
259
	int n, m, sh;
260
	struct idr_layer *p, *new;
261
	int l, id, oid;
262
	unsigned long bm;
263
 
264
	id = *starting_id;
265
 restart:
266
	p = idp->top;
267
	l = idp->layers;
268
	pa[l--] = NULL;
269
	while (1) {
270
		/*
271
		 * We run around this while until we reach the leaf node...
272
		 */
273
		n = (id >> (IDR_BITS*l)) & IDR_MASK;
274
		bm = ~p->bitmap;
275
		m = find_next_bit(&bm, IDR_SIZE, n);
276
		if (m == IDR_SIZE) {
277
			/* no space available go back to previous layer. */
278
			l++;
279
			oid = id;
280
			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
281
 
282
			/* if already at the top layer, we need to grow */
283
			if (!(p = pa[l])) {
284
				*starting_id = id;
285
				return IDR_NEED_TO_GROW;
286
			}
287
 
288
			/* If we need to go up one layer, continue the
289
			 * loop; otherwise, restart from the top.
290
			 */
291
			sh = IDR_BITS * (l + 1);
292
			if (oid >> sh == id >> sh)
293
				continue;
294
			else
295
				goto restart;
296
		}
297
		if (m != n) {
298
			sh = IDR_BITS*l;
299
			id = ((id >> sh) ^ n ^ m) << sh;
300
		}
301
		if ((id >= MAX_ID_BIT) || (id < 0))
302
			return IDR_NOMORE_SPACE;
303
		if (l == 0)
304
			break;
305
		/*
306
		 * Create the layer below if it is missing.
307
		 */
308
		if (!p->ary[m]) {
309
			new = get_from_free_list(idp);
310
			if (!new)
311
				return -1;
312
			new->layer = l-1;
313
			rcu_assign_pointer(p->ary[m], new);
314
			p->count++;
315
		}
316
		pa[l--] = p;
317
		p = p->ary[m];
318
	}
319
 
320
	pa[l] = p;
321
	return id;
322
}
323
 
324
 
325
static int idr_get_empty_slot(struct idr *idp, int starting_id,
326
			      struct idr_layer **pa)
327
{
328
	struct idr_layer *p, *new;
329
	int layers, v, id;
330
	unsigned long flags;
331
 
332
	id = starting_id;
333
build_up:
334
	p = idp->top;
335
	layers = idp->layers;
336
	if (unlikely(!p)) {
337
		if (!(p = get_from_free_list(idp)))
338
			return -1;
339
		p->layer = 0;
340
		layers = 1;
341
	}
342
	/*
343
	 * Add a new layer to the top of the tree if the requested
344
	 * id is larger than the currently allocated space.
345
	 */
346
	while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
347
		layers++;
348
		if (!p->count) {
349
			/* special case: if the tree is currently empty,
350
			 * then we grow the tree by moving the top node
351
			 * upwards.
352
			 */
353
			p->layer++;
354
			continue;
355
		}
356
		if (!(new = get_from_free_list(idp))) {
357
			/*
358
			 * The allocation failed.  If we built part of
359
			 * the structure tear it down.
360
			 */
361
//           spin_lock_irqsave(&idp->lock, flags);
362
			for (new = p; p && p != idp->top; new = p) {
363
				p = p->ary[0];
364
				new->ary[0] = NULL;
365
				new->bitmap = new->count = 0;
366
				__move_to_free_list(idp, new);
367
			}
368
//           spin_unlock_irqrestore(&idp->lock, flags);
369
			return -1;
370
		}
371
		new->ary[0] = p;
372
		new->count = 1;
373
		new->layer = layers-1;
374
		if (p->bitmap == IDR_FULL)
375
			__set_bit(0, &new->bitmap);
376
		p = new;
377
	}
378
	rcu_assign_pointer(idp->top, p);
379
	idp->layers = layers;
380
	v = sub_alloc(idp, &id, pa);
381
	if (v == IDR_NEED_TO_GROW)
382
		goto build_up;
383
	return(v);
384
}
385
 
386
static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
387
{
388
	struct idr_layer *pa[MAX_LEVEL];
389
	int id;
390
 
391
	id = idr_get_empty_slot(idp, starting_id, pa);
392
	if (id >= 0) {
393
		/*
394
		 * Successfully found an empty slot.  Install the user
395
		 * pointer and mark the slot full.
396
		 */
397
		rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
398
				(struct idr_layer *)ptr);
399
		pa[0]->count++;
400
		idr_mark_full(pa, id);
401
	}
402
 
403
	return id;
404
}
405
 
406
/**
407
 * idr_get_new_above - allocate new idr entry above or equal to a start id
408
 * @idp: idr handle
409
 * @ptr: pointer you want associated with the ide
410
 * @start_id: id to start search at
411
 * @id: pointer to the allocated handle
412
 *
413
 * This is the allocate id function.  It should be called with any
414
 * required locks.
415
 *
416
 * If memory is required, it will return -EAGAIN, you should unlock
417
 * and go back to the idr_pre_get() call.  If the idr is full, it will
418
 * return -ENOSPC.
419
 *
420
 * @id returns a value in the range @starting_id ... 0x7fffffff
421
 */
422
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
423
{
424
	int rv;
1179 serge 425
    rv = idr_get_new_above_int(idp, ptr, starting_id);
1125 serge 426
	/*
427
	 * This is a cheap hack until the IDR code can be fixed to
428
	 * return proper error values.
429
	 */
430
	if (rv < 0)
1179 serge 431
    {
432
        dbgprintf("fail\n");
433
        return _idr_rc_to_errno(rv);
434
    };
1125 serge 435
	*id = rv;
1179 serge 436
    return 0;
1125 serge 437
}
438
EXPORT_SYMBOL(idr_get_new_above);
439
 
440
/**
441
 * idr_get_new - allocate new idr entry
442
 * @idp: idr handle
443
 * @ptr: pointer you want associated with the ide
444
 * @id: pointer to the allocated handle
445
 *
446
 * This is the allocate id function.  It should be called with any
447
 * required locks.
448
 *
449
 * If memory is required, it will return -EAGAIN, you should unlock
450
 * and go back to the idr_pre_get() call.  If the idr is full, it will
451
 * return -ENOSPC.
452
 *
453
 * @id returns a value in the range 0 ... 0x7fffffff
454
 */
455
int idr_get_new(struct idr *idp, void *ptr, int *id)
456
{
457
	int rv;
458
 
459
	rv = idr_get_new_above_int(idp, ptr, 0);
460
	/*
461
	 * This is a cheap hack until the IDR code can be fixed to
462
	 * return proper error values.
463
	 */
464
	if (rv < 0)
465
		return _idr_rc_to_errno(rv);
466
	*id = rv;
467
	return 0;
468
}
469
EXPORT_SYMBOL(idr_get_new);
470
 
471
static void idr_remove_warning(int id)
472
{
473
	printk(KERN_WARNING
474
		"idr_remove called for id=%d which is not allocated.\n", id);
475
//   dump_stack();
476
}
477
 
478
static void sub_remove(struct idr *idp, int shift, int id)
479
{
480
	struct idr_layer *p = idp->top;
481
	struct idr_layer **pa[MAX_LEVEL];
482
	struct idr_layer ***paa = &pa[0];
483
	struct idr_layer *to_free;
484
	int n;
485
 
486
	*paa = NULL;
487
	*++paa = &idp->top;
488
 
489
	while ((shift > 0) && p) {
490
		n = (id >> shift) & IDR_MASK;
491
		__clear_bit(n, &p->bitmap);
492
		*++paa = &p->ary[n];
493
		p = p->ary[n];
494
		shift -= IDR_BITS;
495
	}
496
	n = id & IDR_MASK;
497
	if (likely(p != NULL && test_bit(n, &p->bitmap))){
498
		__clear_bit(n, &p->bitmap);
499
		rcu_assign_pointer(p->ary[n], NULL);
500
		to_free = NULL;
501
		while(*paa && ! --((**paa)->count)){
502
			if (to_free)
503
				free_layer(to_free);
504
			to_free = **paa;
505
			**paa-- = NULL;
506
		}
507
		if (!*paa)
508
			idp->layers = 0;
509
		if (to_free)
510
			free_layer(to_free);
511
	} else
512
		idr_remove_warning(id);
513
}
514
 
515
/**
516
 * idr_remove - remove the given id and free it's slot
517
 * @idp: idr handle
518
 * @id: unique key
519
 */
520
void idr_remove(struct idr *idp, int id)
521
{
522
	struct idr_layer *p;
523
	struct idr_layer *to_free;
524
 
525
	/* Mask off upper bits we don't use for the search. */
526
	id &= MAX_ID_MASK;
527
 
528
	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
529
	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
530
	    idp->top->ary[0]) {
531
		/*
532
		 * Single child at leftmost slot: we can shrink the tree.
533
		 * This level is not needed anymore since when layers are
534
		 * inserted, they are inserted at the top of the existing
535
		 * tree.
536
		 */
537
		to_free = idp->top;
538
		p = idp->top->ary[0];
539
		rcu_assign_pointer(idp->top, p);
540
		--idp->layers;
541
		to_free->bitmap = to_free->count = 0;
542
		free_layer(to_free);
543
	}
544
	while (idp->id_free_cnt >= IDR_FREE_MAX) {
545
		p = get_from_free_list(idp);
546
		/*
547
		 * Note: we don't call the rcu callback here, since the only
548
		 * layers that fall into the freelist are those that have been
549
		 * preallocated.
550
		 */
551
        kfree(p);
552
	}
553
	return;
554
}
555
EXPORT_SYMBOL(idr_remove);
556
 
557
 
558
/**
559
 * idr_remove_all - remove all ids from the given idr tree
560
 * @idp: idr handle
561
 *
562
 * idr_destroy() only frees up unused, cached idp_layers, but this
563
 * function will remove all id mappings and leave all idp_layers
564
 * unused.
565
 *
566
 * A typical clean-up sequence for objects stored in an idr tree, will
567
 * use idr_for_each() to free all objects, if necessay, then
568
 * idr_remove_all() to remove all ids, and idr_destroy() to free
569
 * up the cached idr_layers.
570
 */
571
void idr_remove_all(struct idr *idp)
572
{
573
	int n, id, max;
574
	struct idr_layer *p;
575
	struct idr_layer *pa[MAX_LEVEL];
576
	struct idr_layer **paa = &pa[0];
577
 
578
	n = idp->layers * IDR_BITS;
579
	p = idp->top;
580
	rcu_assign_pointer(idp->top, NULL);
581
	max = 1 << n;
582
 
583
	id = 0;
584
	while (id < max) {
585
		while (n > IDR_BITS && p) {
586
			n -= IDR_BITS;
587
			*paa++ = p;
588
			p = p->ary[(id >> n) & IDR_MASK];
589
		}
590
 
591
		id += 1 << n;
592
		while (n < fls(id)) {
593
			if (p)
594
				free_layer(p);
595
			n += IDR_BITS;
596
			p = *--paa;
597
		}
598
	}
599
	idp->layers = 0;
600
}
601
EXPORT_SYMBOL(idr_remove_all);
602
 
603
/**
604
 * idr_destroy - release all cached layers within an idr tree
605
 * idp: idr handle
606
 */
607
void idr_destroy(struct idr *idp)
608
{
609
	while (idp->id_free_cnt) {
610
		struct idr_layer *p = get_from_free_list(idp);
611
        kfree(p);
612
	}
613
}
614
EXPORT_SYMBOL(idr_destroy);
615
 
616
 
617
/**
618
 * idr_find - return pointer for given id
619
 * @idp: idr handle
620
 * @id: lookup key
621
 *
622
 * Return the pointer given the id it has been registered with.  A %NULL
623
 * return indicates that @id is not valid or you passed %NULL in
624
 * idr_get_new().
625
 *
626
 * This function can be called under rcu_read_lock(), given that the leaf
627
 * pointers lifetimes are correctly managed.
628
 */
629
void *idr_find(struct idr *idp, int id)
630
{
631
	int n;
632
	struct idr_layer *p;
633
 
634
	p = rcu_dereference(idp->top);
635
	if (!p)
636
		return NULL;
637
	n = (p->layer+1) * IDR_BITS;
638
 
639
	/* Mask off upper bits we don't use for the search. */
640
	id &= MAX_ID_MASK;
641
 
642
	if (id >= (1 << n))
643
		return NULL;
644
	BUG_ON(n == 0);
645
 
646
	while (n > 0 && p) {
647
		n -= IDR_BITS;
648
		BUG_ON(n != p->layer*IDR_BITS);
649
		p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
650
	}
651
	return((void *)p);
652
}
653
EXPORT_SYMBOL(idr_find);
654
 
655
#if 0
656
/**
657
 * idr_for_each - iterate through all stored pointers
658
 * @idp: idr handle
659
 * @fn: function to be called for each pointer
660
 * @data: data passed back to callback function
661
 *
662
 * Iterate over the pointers registered with the given idr.  The
663
 * callback function will be called for each pointer currently
664
 * registered, passing the id, the pointer and the data pointer passed
665
 * to this function.  It is not safe to modify the idr tree while in
666
 * the callback, so functions such as idr_get_new and idr_remove are
667
 * not allowed.
668
 *
669
 * We check the return of @fn each time. If it returns anything other
670
 * than 0, we break out and return that value.
671
 *
672
 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
673
 */
674
int idr_for_each(struct idr *idp,
675
		 int (*fn)(int id, void *p, void *data), void *data)
676
{
677
	int n, id, max, error = 0;
678
	struct idr_layer *p;
679
	struct idr_layer *pa[MAX_LEVEL];
680
	struct idr_layer **paa = &pa[0];
681
 
682
	n = idp->layers * IDR_BITS;
683
	p = rcu_dereference(idp->top);
684
	max = 1 << n;
685
 
686
	id = 0;
687
	while (id < max) {
688
		while (n > 0 && p) {
689
			n -= IDR_BITS;
690
			*paa++ = p;
691
			p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
692
		}
693
 
694
		if (p) {
695
			error = fn(id, (void *)p, data);
696
			if (error)
697
				break;
698
		}
699
 
700
		id += 1 << n;
701
		while (n < fls(id)) {
702
			n += IDR_BITS;
703
			p = *--paa;
704
		}
705
	}
706
 
707
	return error;
708
}
709
EXPORT_SYMBOL(idr_for_each);
710
 
711
/**
712
 * idr_get_next - lookup next object of id to given id.
713
 * @idp: idr handle
714
 * @id:  pointer to lookup key
715
 *
716
 * Returns pointer to registered object with id, which is next number to
717
 * given id.
718
 */
719
 
720
void *idr_get_next(struct idr *idp, int *nextidp)
721
{
722
	struct idr_layer *p, *pa[MAX_LEVEL];
723
	struct idr_layer **paa = &pa[0];
724
	int id = *nextidp;
725
	int n, max;
726
 
727
	/* find first ent */
728
	n = idp->layers * IDR_BITS;
729
	max = 1 << n;
730
	p = rcu_dereference(idp->top);
731
	if (!p)
732
		return NULL;
733
 
734
	while (id < max) {
735
		while (n > 0 && p) {
736
			n -= IDR_BITS;
737
			*paa++ = p;
738
			p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
739
		}
740
 
741
		if (p) {
742
			*nextidp = id;
743
			return p;
744
		}
745
 
746
		id += 1 << n;
747
		while (n < fls(id)) {
748
			n += IDR_BITS;
749
			p = *--paa;
750
		}
751
	}
752
	return NULL;
753
}
754
 
755
 
756
 
757
/**
758
 * idr_replace - replace pointer for given id
759
 * @idp: idr handle
760
 * @ptr: pointer you want associated with the id
761
 * @id: lookup key
762
 *
763
 * Replace the pointer registered with an id and return the old value.
764
 * A -ENOENT return indicates that @id was not found.
765
 * A -EINVAL return indicates that @id was not within valid constraints.
766
 *
767
 * The caller must serialize with writers.
768
 */
769
void *idr_replace(struct idr *idp, void *ptr, int id)
770
{
771
	int n;
772
	struct idr_layer *p, *old_p;
773
 
774
	p = idp->top;
775
	if (!p)
776
		return ERR_PTR(-EINVAL);
777
 
778
	n = (p->layer+1) * IDR_BITS;
779
 
780
	id &= MAX_ID_MASK;
781
 
782
	if (id >= (1 << n))
783
		return ERR_PTR(-EINVAL);
784
 
785
	n -= IDR_BITS;
786
	while ((n > 0) && p) {
787
		p = p->ary[(id >> n) & IDR_MASK];
788
		n -= IDR_BITS;
789
	}
790
 
791
	n = id & IDR_MASK;
792
	if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
793
		return ERR_PTR(-ENOENT);
794
 
795
	old_p = p->ary[n];
796
	rcu_assign_pointer(p->ary[n], ptr);
797
 
798
	return old_p;
799
}
800
EXPORT_SYMBOL(idr_replace);
801
 
802
 
803
#endif
804
 
805
 
806
void idr_init_cache(void)
807
{
808
    //idr_layer_cache = kmem_cache_create("idr_layer_cache",
809
    //           sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
810
}
811
 
812
/**
813
 * idr_init - initialize idr handle
814
 * @idp:	idr handle
815
 *
816
 * This function is use to set up the handle (@idp) that you will pass
817
 * to the rest of the functions.
818
 */
819
void idr_init(struct idr *idp)
820
{
821
	memset(idp, 0, sizeof(struct idr));
822
 //  spin_lock_init(&idp->lock);
823
}
824
EXPORT_SYMBOL(idr_init);
825
 
826
#if 0
827
 
828
/*
829
 * IDA - IDR based ID allocator
830
 *
831
 * this is id allocator without id -> pointer translation.  Memory
832
 * usage is much lower than full blown idr because each id only
833
 * occupies a bit.  ida uses a custom leaf node which contains
834
 * IDA_BITMAP_BITS slots.
835
 *
836
 * 2007-04-25  written by Tejun Heo 
837
 */
838
 
839
static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
840
{
841
	unsigned long flags;
842
 
843
	if (!ida->free_bitmap) {
844
		spin_lock_irqsave(&ida->idr.lock, flags);
845
		if (!ida->free_bitmap) {
846
			ida->free_bitmap = bitmap;
847
			bitmap = NULL;
848
		}
849
		spin_unlock_irqrestore(&ida->idr.lock, flags);
850
	}
851
 
852
	kfree(bitmap);
853
}
854
 
855
/**
856
 * ida_pre_get - reserve resources for ida allocation
857
 * @ida:	ida handle
858
 * @gfp_mask:	memory allocation flag
859
 *
860
 * This function should be called prior to locking and calling the
861
 * following function.  It preallocates enough memory to satisfy the
862
 * worst possible allocation.
863
 *
864
 * If the system is REALLY out of memory this function returns 0,
865
 * otherwise 1.
866
 */
867
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
868
{
869
	/* allocate idr_layers */
870
	if (!idr_pre_get(&ida->idr, gfp_mask))
871
		return 0;
872
 
873
	/* allocate free_bitmap */
874
	if (!ida->free_bitmap) {
875
		struct ida_bitmap *bitmap;
876
 
877
		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
878
		if (!bitmap)
879
			return 0;
880
 
881
		free_bitmap(ida, bitmap);
882
	}
883
 
884
	return 1;
885
}
886
EXPORT_SYMBOL(ida_pre_get);
887
 
888
/**
889
 * ida_get_new_above - allocate new ID above or equal to a start id
890
 * @ida:	ida handle
891
 * @staring_id:	id to start search at
892
 * @p_id:	pointer to the allocated handle
893
 *
894
 * Allocate new ID above or equal to @ida.  It should be called with
895
 * any required locks.
896
 *
897
 * If memory is required, it will return -EAGAIN, you should unlock
898
 * and go back to the ida_pre_get() call.  If the ida is full, it will
899
 * return -ENOSPC.
900
 *
901
 * @p_id returns a value in the range @starting_id ... 0x7fffffff.
902
 */
903
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
904
{
905
	struct idr_layer *pa[MAX_LEVEL];
906
	struct ida_bitmap *bitmap;
907
	unsigned long flags;
908
	int idr_id = starting_id / IDA_BITMAP_BITS;
909
	int offset = starting_id % IDA_BITMAP_BITS;
910
	int t, id;
911
 
912
 restart:
913
	/* get vacant slot */
914
	t = idr_get_empty_slot(&ida->idr, idr_id, pa);
915
	if (t < 0)
916
		return _idr_rc_to_errno(t);
917
 
918
	if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
919
		return -ENOSPC;
920
 
921
	if (t != idr_id)
922
		offset = 0;
923
	idr_id = t;
924
 
925
	/* if bitmap isn't there, create a new one */
926
	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
927
	if (!bitmap) {
928
		spin_lock_irqsave(&ida->idr.lock, flags);
929
		bitmap = ida->free_bitmap;
930
		ida->free_bitmap = NULL;
931
		spin_unlock_irqrestore(&ida->idr.lock, flags);
932
 
933
		if (!bitmap)
934
			return -EAGAIN;
935
 
936
		memset(bitmap, 0, sizeof(struct ida_bitmap));
937
		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
938
				(void *)bitmap);
939
		pa[0]->count++;
940
	}
941
 
942
	/* lookup for empty slot */
943
	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
944
	if (t == IDA_BITMAP_BITS) {
945
		/* no empty slot after offset, continue to the next chunk */
946
		idr_id++;
947
		offset = 0;
948
		goto restart;
949
	}
950
 
951
	id = idr_id * IDA_BITMAP_BITS + t;
952
	if (id >= MAX_ID_BIT)
953
		return -ENOSPC;
954
 
955
	__set_bit(t, bitmap->bitmap);
956
	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
957
		idr_mark_full(pa, idr_id);
958
 
959
	*p_id = id;
960
 
961
	/* Each leaf node can handle nearly a thousand slots and the
962
	 * whole idea of ida is to have small memory foot print.
963
	 * Throw away extra resources one by one after each successful
964
	 * allocation.
965
	 */
966
	if (ida->idr.id_free_cnt || ida->free_bitmap) {
967
		struct idr_layer *p = get_from_free_list(&ida->idr);
968
		if (p)
969
			kmem_cache_free(idr_layer_cache, p);
970
	}
971
 
972
	return 0;
973
}
974
EXPORT_SYMBOL(ida_get_new_above);
975
 
976
/**
977
 * ida_get_new - allocate new ID
978
 * @ida:	idr handle
979
 * @p_id:	pointer to the allocated handle
980
 *
981
 * Allocate new ID.  It should be called with any required locks.
982
 *
983
 * If memory is required, it will return -EAGAIN, you should unlock
984
 * and go back to the idr_pre_get() call.  If the idr is full, it will
985
 * return -ENOSPC.
986
 *
987
 * @id returns a value in the range 0 ... 0x7fffffff.
988
 */
989
int ida_get_new(struct ida *ida, int *p_id)
990
{
991
	return ida_get_new_above(ida, 0, p_id);
992
}
993
EXPORT_SYMBOL(ida_get_new);
994
 
995
/**
996
 * ida_remove - remove the given ID
997
 * @ida:	ida handle
998
 * @id:		ID to free
999
 */
1000
void ida_remove(struct ida *ida, int id)
1001
{
1002
	struct idr_layer *p = ida->idr.top;
1003
	int shift = (ida->idr.layers - 1) * IDR_BITS;
1004
	int idr_id = id / IDA_BITMAP_BITS;
1005
	int offset = id % IDA_BITMAP_BITS;
1006
	int n;
1007
	struct ida_bitmap *bitmap;
1008
 
1009
	/* clear full bits while looking up the leaf idr_layer */
1010
	while ((shift > 0) && p) {
1011
		n = (idr_id >> shift) & IDR_MASK;
1012
		__clear_bit(n, &p->bitmap);
1013
		p = p->ary[n];
1014
		shift -= IDR_BITS;
1015
	}
1016
 
1017
	if (p == NULL)
1018
		goto err;
1019
 
1020
	n = idr_id & IDR_MASK;
1021
	__clear_bit(n, &p->bitmap);
1022
 
1023
	bitmap = (void *)p->ary[n];
1024
	if (!test_bit(offset, bitmap->bitmap))
1025
		goto err;
1026
 
1027
	/* update bitmap and remove it if empty */
1028
	__clear_bit(offset, bitmap->bitmap);
1029
	if (--bitmap->nr_busy == 0) {
1030
		__set_bit(n, &p->bitmap);	/* to please idr_remove() */
1031
		idr_remove(&ida->idr, idr_id);
1032
		free_bitmap(ida, bitmap);
1033
	}
1034
 
1035
	return;
1036
 
1037
 err:
1038
	printk(KERN_WARNING
1039
	       "ida_remove called for id=%d which is not allocated.\n", id);
1040
}
1041
EXPORT_SYMBOL(ida_remove);
1042
 
1043
/**
1044
 * ida_destroy - release all cached layers within an ida tree
1045
 * ida:		ida handle
1046
 */
1047
void ida_destroy(struct ida *ida)
1048
{
1049
	idr_destroy(&ida->idr);
1050
	kfree(ida->free_bitmap);
1051
}
1052
EXPORT_SYMBOL(ida_destroy);
1053
 
1054
/**
1055
 * ida_init - initialize ida handle
1056
 * @ida:	ida handle
1057
 *
1058
 * This function is use to set up the handle (@ida) that you will pass
1059
 * to the rest of the functions.
1060
 */
1061
void ida_init(struct ida *ida)
1062
{
1063
	memset(ida, 0, sizeof(struct ida));
1064
	idr_init(&ida->idr);
1065
 
1066
}
1067
EXPORT_SYMBOL(ida_init);
1068
 
1069
 
1070
#endif