33,6 → 33,11 |
#include <linux/idr.h> |
//#include <stdlib.h> |
|
static inline void * __must_check ERR_PTR(long error) |
{ |
return (void *) error; |
} |
|
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset); |
|
49,6 → 54,7 |
static struct idr_layer *idr_preload_head; |
static int idr_preload_cnt; |
|
static DEFINE_SPINLOCK(simple_ida_lock); |
|
/* the maximum ID which can be allocated given idr->layers */ |
static int idr_max(int layers) |
462,6 → 468,33 |
} |
EXPORT_SYMBOL_GPL(idr_alloc); |
|
/** |
* idr_alloc_cyclic - allocate new idr entry in a cyclical fashion |
* @idr: the (initialized) idr |
* @ptr: pointer to be associated with the new id |
* @start: the minimum id (inclusive) |
* @end: the maximum id (exclusive, <= 0 for max) |
* @gfp_mask: memory allocation flags |
* |
* Essentially the same as idr_alloc, but prefers to allocate progressively |
* higher ids if it can. If the "cur" counter wraps, then it will start again |
* at the "start" end of the range and allocate one that has already been used. |
*/ |
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, |
gfp_t gfp_mask) |
{ |
int id; |
|
id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask); |
if (id == -ENOSPC) |
id = idr_alloc(idr, ptr, start, end, gfp_mask); |
|
if (likely(id >= 0)) |
idr->cur = id + 1; |
return id; |
} |
EXPORT_SYMBOL(idr_alloc_cyclic); |
|
static void idr_remove_warning(int id) |
{ |
WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); |
632,7 → 665,6 |
} |
EXPORT_SYMBOL(idr_find_slowpath); |
|
#if 0 |
/** |
* idr_for_each - iterate through all stored pointers |
* @idp: idr handle |
790,10 → 822,6 |
} |
EXPORT_SYMBOL(idr_replace); |
|
|
#endif |
|
|
void __init idr_init_cache(void) |
{ |
//idr_layer_cache = kmem_cache_create("idr_layer_cache", |
858,7 → 886,7 |
int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
{ |
/* allocate idr_layers */ |
if (!idr_pre_get(&ida->idr, gfp_mask)) |
if (!__idr_pre_get(&ida->idr, gfp_mask)) |
return 0; |
|
/* allocate free_bitmap */ |
1023,6 → 1051,74 |
EXPORT_SYMBOL(ida_destroy); |
|
/** |
* ida_simple_get - get a new id. |
* @ida: the (initialized) ida. |
* @start: the minimum id (inclusive, < 0x8000000) |
* @end: the maximum id (exclusive, < 0x8000000 or 0) |
* @gfp_mask: memory allocation flags |
* |
* Allocates an id in the range start <= id < end, or returns -ENOSPC. |
* On memory allocation failure, returns -ENOMEM. |
* |
* Use ida_simple_remove() to get rid of an id. |
*/ |
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
gfp_t gfp_mask) |
{ |
int ret, id; |
unsigned int max; |
unsigned long flags; |
|
BUG_ON((int)start < 0); |
BUG_ON((int)end < 0); |
|
if (end == 0) |
max = 0x80000000; |
else { |
BUG_ON(end < start); |
max = end - 1; |
} |
|
again: |
if (!ida_pre_get(ida, gfp_mask)) |
return -ENOMEM; |
|
spin_lock_irqsave(&simple_ida_lock, flags); |
ret = ida_get_new_above(ida, start, &id); |
if (!ret) { |
if (id > max) { |
ida_remove(ida, id); |
ret = -ENOSPC; |
} else { |
ret = id; |
} |
} |
spin_unlock_irqrestore(&simple_ida_lock, flags); |
|
if (unlikely(ret == -EAGAIN)) |
goto again; |
|
return ret; |
} |
EXPORT_SYMBOL(ida_simple_get); |
|
/** |
* ida_simple_remove - remove an allocated id. |
* @ida: the (initialized) ida. |
* @id: the id returned by ida_simple_get. |
*/ |
void ida_simple_remove(struct ida *ida, unsigned int id) |
{ |
unsigned long flags; |
|
BUG_ON((int)id < 0); |
spin_lock_irqsave(&simple_ida_lock, flags); |
ida_remove(ida, id); |
spin_unlock_irqrestore(&simple_ida_lock, flags); |
} |
EXPORT_SYMBOL(ida_simple_remove); |
|
/** |
* ida_init - initialize ida handle |
* @ida: ida handle |
* |