Subversion Repositories Kolibri OS

Rev

Rev 1412 | Rev 3391 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1412 Rev 2966
Line 115... Line 115...
115
	}
115
	}
116
//   spin_unlock_irqrestore(&idp->lock, flags);
116
//   spin_unlock_irqrestore(&idp->lock, flags);
117
	return(p);
117
	return(p);
118
}
118
}
Line 119... Line -...
119
 
-
 
120
 
119
 
121
static void idr_layer_rcu_free(struct rcu_head *head)
120
static void idr_layer_rcu_free(struct rcu_head *head)
122
{
121
{
Line 123... Line 122...
123
	struct idr_layer *layer;
122
	struct idr_layer *layer;
124
 
123
 
125
    layer = container_of(head, struct idr_layer, rcu_head);
124
    layer = container_of(head, struct idr_layer, rcu_head);
Line 126... Line -...
126
    kfree(layer);
-
 
127
}
-
 
128
 
125
    kfree(layer);
129
 
126
}
130
 
127
 
131
static inline void free_layer(struct idr_layer *p)
128
static inline void free_layer(struct idr_layer *p)
Line 132... Line -...
132
{
-
 
133
    kfree(p);
129
{
134
}
130
    kfree(p);
135
 
131
}
136
 
132
 
137
/* only called when idp->lock is held */
133
/* only called when idp->lock is held */
Line 172... Line 168...
172
		id = id >> IDR_BITS;
168
		id = id >> IDR_BITS;
173
		__set_bit((id & IDR_MASK), &p->bitmap);
169
		__set_bit((id & IDR_MASK), &p->bitmap);
174
	}
170
	}
175
}
171
}
Line 176... Line -...
176
 
-
 
177
 
-
 
178
 
172
 
179
/**
173
/**
180
 * idr_pre_get - reserver resources for idr allocation
174
 * idr_pre_get - reserve resources for idr allocation
181
 * @idp:	idr handle
175
 * @idp:	idr handle
182
 * @gfp_mask:	memory allocation flags
176
 * @gfp_mask:	memory allocation flags
183
 *
177
 *
184
 * This function should be called prior to locking and calling the
178
 * This function should be called prior to calling the idr_get_new* functions.
-
 
179
 * It preallocates enough memory to satisfy the worst possible allocation. The
185
 * idr_get_new* functions. It preallocates enough memory to satisfy
180
 * caller should pass in GFP_KERNEL if possible.  This of course requires that
186
 * the worst possible allocation.
181
 * no spinning locks be held.
187
 *
182
 *
188
 * If the system is REALLY out of memory this function returns 0,
183
 * If the system is REALLY out of memory this function returns %0,
189
 * otherwise 1.
184
 * otherwise %1.
190
 */
185
 */
191
int idr_pre_get(struct idr *idp, u32_t gfp_mask)
186
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
192
{
187
{
193
   while (idp->id_free_cnt < IDR_FREE_MAX) {
188
   while (idp->id_free_cnt < IDR_FREE_MAX) {
194
       struct idr_layer *new;
189
       struct idr_layer *new;
195
       new = kzalloc(sizeof(struct idr_layer), gfp_mask);
190
       new = kzalloc(sizeof(struct idr_layer), gfp_mask);
Line 265... Line 260...
265
 
260
 
266
	pa[l] = p;
261
	pa[l] = p;
267
	return id;
262
	return id;
Line 268... Line -...
268
}
-
 
269
 
263
}
270
 
264
 
271
static int idr_get_empty_slot(struct idr *idp, int starting_id,
265
static int idr_get_empty_slot(struct idr *idp, int starting_id,
272
			      struct idr_layer **pa)
266
			      struct idr_layer **pa)
273
{
267
{
Line 350... Line 344...
350
}
344
}
Line 351... Line 345...
351
 
345
 
352
/**
346
/**
353
 * idr_get_new_above - allocate new idr entry above or equal to a start id
347
 * idr_get_new_above - allocate new idr entry above or equal to a start id
354
 * @idp: idr handle
348
 * @idp: idr handle
355
 * @ptr: pointer you want associated with the ide
349
 * @ptr: pointer you want associated with the id
356
 * @start_id: id to start search at
350
 * @starting_id: id to start search at
357
 * @id: pointer to the allocated handle
351
 * @id: pointer to the allocated handle
358
 *
352
 *
359
 * This is the allocate id function.  It should be called with any
353
 * This is the allocate id function.  It should be called with any
360
 * required locks.
354
 * required locks.
361
 *
355
 *
362
 * If memory is required, it will return -EAGAIN, you should unlock
356
 * If allocation from IDR's private freelist fails, idr_get_new_above() will
363
 * and go back to the idr_pre_get() call.  If the idr is full, it will
357
 * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill
364
 * return -ENOSPC.
358
 * IDR's preallocation and then retry the idr_get_new_above() call.
-
 
359
 *
-
 
360
 * If the idr is full idr_get_new_above() will return %-ENOSPC.
365
 *
361
 *
366
 * @id returns a value in the range @starting_id ... 0x7fffffff
362
 * @id returns a value in the range @starting_id ... %0x7fffffff
367
 */
363
 */
368
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
364
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
369
{
365
{
-
 
366
	int rv;
370
	int rv;
367
 
371
    rv = idr_get_new_above_int(idp, ptr, starting_id);
368
    rv = idr_get_new_above_int(idp, ptr, starting_id);
372
	/*
369
	/*
373
	 * This is a cheap hack until the IDR code can be fixed to
370
	 * This is a cheap hack until the IDR code can be fixed to
374
	 * return proper error values.
371
	 * return proper error values.
Line 383... Line 380...
383
}
380
}
Line 384... Line 381...
384
 
381
 
385
/**
382
/**
386
 * idr_get_new - allocate new idr entry
383
 * idr_get_new - allocate new idr entry
387
 * @idp: idr handle
384
 * @idp: idr handle
388
 * @ptr: pointer you want associated with the ide
385
 * @ptr: pointer you want associated with the id
389
 * @id: pointer to the allocated handle
386
 * @id: pointer to the allocated handle
-
 
387
 *
390
 *
388
 * If allocation from IDR's private freelist fails, idr_get_new_above() will
391
 * This is the allocate id function.  It should be called with any
389
 * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill
392
 * required locks.
390
 * IDR's preallocation and then retry the idr_get_new_above() call.
393
 *
391
 *
394
 * If memory is required, it will return -EAGAIN, you should unlock
-
 
395
 * and go back to the idr_pre_get() call.  If the idr is full, it will
-
 
396
 * return -ENOSPC.
392
 * If the idr is full idr_get_new_above() will return %-ENOSPC.
397
 *
393
 *
398
 * @id returns a value in the range 0 ... 0x7fffffff
394
 * @id returns a value in the range %0 ... %0x7fffffff
399
 */
395
 */
400
int idr_get_new(struct idr *idp, void *ptr, int *id)
396
int idr_get_new(struct idr *idp, void *ptr, int *id)
401
{
397
{
Line 455... Line 451...
455
	} else
451
	} else
456
		idr_remove_warning(id);
452
		idr_remove_warning(id);
457
}
453
}
Line 458... Line 454...
458
 
454
 
459
/**
455
/**
460
 * idr_remove - remove the given id and free it's slot
456
 * idr_remove - remove the given id and free its slot
461
 * @idp: idr handle
457
 * @idp: idr handle
462
 * @id: unique key
458
 * @id: unique key
463
 */
459
 */
464
void idr_remove(struct idr *idp, int id)
460
void idr_remove(struct idr *idp, int id)
Line 504... Line 500...
504
 *
500
 *
505
 * idr_destroy() only frees up unused, cached idp_layers, but this
501
 * idr_destroy() only frees up unused, cached idp_layers, but this
506
 * function will remove all id mappings and leave all idp_layers
502
 * function will remove all id mappings and leave all idp_layers
507
 * unused.
503
 * unused.
508
 *
504
 *
509
 * A typical clean-up sequence for objects stored in an idr tree, will
505
 * A typical clean-up sequence for objects stored in an idr tree will
510
 * use idr_for_each() to free all objects, if necessay, then
506
 * use idr_for_each() to free all objects, if necessay, then
511
 * idr_remove_all() to remove all ids, and idr_destroy() to free
507
 * idr_remove_all() to remove all ids, and idr_destroy() to free
512
 * up the cached idr_layers.
508
 * up the cached idr_layers.
513
 */
509
 */
514
void idr_remove_all(struct idr *idp)
510
void idr_remove_all(struct idr *idp)
515
{
511
{
516
	int n, id, max;
512
	int n, id, max;
-
 
513
	int bt_mask;
517
	struct idr_layer *p;
514
	struct idr_layer *p;
518
	struct idr_layer *pa[MAX_LEVEL];
515
	struct idr_layer *pa[MAX_LEVEL];
519
	struct idr_layer **paa = &pa[0];
516
	struct idr_layer **paa = &pa[0];
Line 520... Line 517...
520
 
517
 
Line 529... Line 526...
529
			n -= IDR_BITS;
526
			n -= IDR_BITS;
530
			*paa++ = p;
527
			*paa++ = p;
531
			p = p->ary[(id >> n) & IDR_MASK];
528
			p = p->ary[(id >> n) & IDR_MASK];
532
		}
529
		}
Line -... Line 530...
-
 
530
 
533
 
531
		bt_mask = id;
-
 
532
		id += 1 << n;
534
		id += 1 << n;
533
		/* Get the highest bit that the above add changed from 0->1. */
535
		while (n < fls(id)) {
534
		while (n < fls(id ^ bt_mask)) {
536
			if (p)
535
			if (p)
537
				free_layer(p);
536
				free_layer(p);
538
			n += IDR_BITS;
537
			n += IDR_BITS;
539
			p = *--paa;
538
			p = *--paa;
Line 542... Line 541...
542
	idp->layers = 0;
541
	idp->layers = 0;
543
}
542
}
Line 544... Line 543...
544
 
543
 
545
/**
544
/**
546
 * idr_destroy - release all cached layers within an idr tree
545
 * idr_destroy - release all cached layers within an idr tree
547
 * idp: idr handle
546
 * @idp: idr handle
548
 */
547
 */
549
void idr_destroy(struct idr *idp)
548
void idr_destroy(struct idr *idp)
550
{
549
{
551
	while (idp->id_free_cnt) {
550
	while (idp->id_free_cnt) {
Line 605... Line 604...
605
 * to this function.  It is not safe to modify the idr tree while in
604
 * to this function.  It is not safe to modify the idr tree while in
606
 * the callback, so functions such as idr_get_new and idr_remove are
605
 * the callback, so functions such as idr_get_new and idr_remove are
607
 * not allowed.
606
 * not allowed.
608
 *
607
 *
609
 * We check the return of @fn each time. If it returns anything other
608
 * We check the return of @fn each time. If it returns anything other
610
 * than 0, we break out and return that value.
609
 * than %0, we break out and return that value.
611
 *
610
 *
612
 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
611
 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
613
 */
612
 */
614
int idr_for_each(struct idr *idp,
613
int idr_for_each(struct idr *idp,
615
		 int (*fn)(int id, void *p, void *data), void *data)
614
		 int (*fn)(int id, void *p, void *data), void *data)
Line 649... Line 648...
649
EXPORT_SYMBOL(idr_for_each);
648
EXPORT_SYMBOL(idr_for_each);
Line 650... Line 649...
650
 
649
 
651
/**
650
/**
652
 * idr_get_next - lookup next object of id to given id.
651
 * idr_get_next - lookup next object of id to given id.
653
 * @idp: idr handle
652
 * @idp: idr handle
654
 * @id:  pointer to lookup key
653
 * @nextidp:  pointer to lookup key
655
 *
654
 *
-
 
655
 * Returns pointer to registered object with id, which is next number to
656
 * Returns pointer to registered object with id, which is next number to
656
 * given id. After being looked up, *@nextidp will be updated for the next
657
 * given id.
657
 * iteration.
Line 658... Line 658...
658
 */
658
 */
659
 
659
 
660
void *idr_get_next(struct idr *idp, int *nextidp)
660
void *idr_get_next(struct idr *idp, int *nextidp)
Line 699... Line 699...
699
 * @idp: idr handle
699
 * @idp: idr handle
700
 * @ptr: pointer you want associated with the id
700
 * @ptr: pointer you want associated with the id
701
 * @id: lookup key
701
 * @id: lookup key
702
 *
702
 *
703
 * Replace the pointer registered with an id and return the old value.
703
 * Replace the pointer registered with an id and return the old value.
704
 * A -ENOENT return indicates that @id was not found.
704
 * A %-ENOENT return indicates that @id was not found.
705
 * A -EINVAL return indicates that @id was not within valid constraints.
705
 * A %-EINVAL return indicates that @id was not within valid constraints.
706
 *
706
 *
707
 * The caller must serialize with writers.
707
 * The caller must serialize with writers.
708
 */
708
 */
709
void *idr_replace(struct idr *idp, void *ptr, int id)
709
void *idr_replace(struct idr *idp, void *ptr, int id)
710
{
710
{
Line 765... Line 765...
765
#if 0
765
#if 0
Line 766... Line 766...
766
 
766
 
767
/*
767
/*
768
 * IDA - IDR based ID allocator
768
 * IDA - IDR based ID allocator
769
 *
769
 *
770
 * this is id allocator without id -> pointer translation.  Memory
770
 * This is id allocator without id -> pointer translation.  Memory
771
 * usage is much lower than full blown idr because each id only
771
 * usage is much lower than full blown idr because each id only
772
 * occupies a bit.  ida uses a custom leaf node which contains
772
 * occupies a bit.  ida uses a custom leaf node which contains
773
 * IDA_BITMAP_BITS slots.
773
 * IDA_BITMAP_BITS slots.
774
 *
774
 *
Line 798... Line 798...
798
 *
798
 *
799
 * This function should be called prior to locking and calling the
799
 * This function should be called prior to locking and calling the
800
 * following function.  It preallocates enough memory to satisfy the
800
 * following function.  It preallocates enough memory to satisfy the
801
 * worst possible allocation.
801
 * worst possible allocation.
802
 *
802
 *
803
 * If the system is REALLY out of memory this function returns 0,
803
 * If the system is REALLY out of memory this function returns %0,
804
 * otherwise 1.
804
 * otherwise %1.
805
 */
805
 */
806
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
806
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
807
{
807
{
808
	/* allocate idr_layers */
808
	/* allocate idr_layers */
809
	if (!idr_pre_get(&ida->idr, gfp_mask))
809
	if (!idr_pre_get(&ida->idr, gfp_mask))
Line 825... Line 825...
825
EXPORT_SYMBOL(ida_pre_get);
825
EXPORT_SYMBOL(ida_pre_get);
Line 826... Line 826...
826
 
826
 
827
/**
827
/**
828
 * ida_get_new_above - allocate new ID above or equal to a start id
828
 * ida_get_new_above - allocate new ID above or equal to a start id
829
 * @ida:	ida handle
829
 * @ida:	ida handle
830
 * @staring_id:	id to start search at
830
 * @starting_id: id to start search at
831
 * @p_id:	pointer to the allocated handle
831
 * @p_id:	pointer to the allocated handle
832
 *
832
 *
833
 * Allocate new ID above or equal to @ida.  It should be called with
833
 * Allocate new ID above or equal to @starting_id.  It should be called
834
 * any required locks.
834
 * with any required locks.
835
 *
835
 *
836
 * If memory is required, it will return -EAGAIN, you should unlock
836
 * If memory is required, it will return %-EAGAIN, you should unlock
837
 * and go back to the ida_pre_get() call.  If the ida is full, it will
837
 * and go back to the ida_pre_get() call.  If the ida is full, it will
838
 * return -ENOSPC.
838
 * return %-ENOSPC.
839
 *
839
 *
840
 * @p_id returns a value in the range @starting_id ... 0x7fffffff.
840
 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
841
 */
841
 */
842
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
842
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
843
{
843
{
844
	struct idr_layer *pa[MAX_LEVEL];
844
	struct idr_layer *pa[MAX_LEVEL];
Line 917... Line 917...
917
 * @ida:	idr handle
917
 * @ida:	idr handle
918
 * @p_id:	pointer to the allocated handle
918
 * @p_id:	pointer to the allocated handle
919
 *
919
 *
920
 * Allocate new ID.  It should be called with any required locks.
920
 * Allocate new ID.  It should be called with any required locks.
921
 *
921
 *
922
 * If memory is required, it will return -EAGAIN, you should unlock
922
 * If memory is required, it will return %-EAGAIN, you should unlock
923
 * and go back to the idr_pre_get() call.  If the idr is full, it will
923
 * and go back to the idr_pre_get() call.  If the idr is full, it will
924
 * return -ENOSPC.
924
 * return %-ENOSPC.
925
 *
925
 *
926
 * @id returns a value in the range 0 ... 0x7fffffff.
926
 * @p_id returns a value in the range %0 ... %0x7fffffff.
927
 */
927
 */
928
int ida_get_new(struct ida *ida, int *p_id)
928
int ida_get_new(struct ida *ida, int *p_id)
929
{
929
{
930
	return ida_get_new_above(ida, 0, p_id);
930
	return ida_get_new_above(ida, 0, p_id);
931
}
931
}
Line 979... Line 979...
979
}
979
}
980
EXPORT_SYMBOL(ida_remove);
980
EXPORT_SYMBOL(ida_remove);
Line 981... Line 981...
981
 
981
 
982
/**
982
/**
983
 * ida_destroy - release all cached layers within an ida tree
983
 * ida_destroy - release all cached layers within an ida tree
984
 * ida:		ida handle
984
 * @ida:		ida handle
985
 */
985
 */
986
void ida_destroy(struct ida *ida)
986
void ida_destroy(struct ida *ida)
987
{
987
{
988
	idr_destroy(&ida->idr);
988
	idr_destroy(&ida->idr);