20,7 → 20,7 |
* that id to this code and it returns your pointer. |
|
* You can release ids at any time. When all ids are released, most of |
* the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
* the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
* don't need to go to the memory "store" during an id allocate, just |
* so you don't need to be too concerned about locking and conflicts |
* with the slab allocator. |
27,96 → 27,99 |
*/ |
|
#include <linux/kernel.h> |
#include <linux/export.h> |
#include <linux/string.h> |
#include <linux/bitops.h> |
#include <linux/idr.h> |
//#include <stdlib.h> |
|
unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset); |
|
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
|
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found: |
return result + __ffs(tmp); |
} |
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) |
|
int find_next_bit(const unsigned long *addr, int size, int offset) |
/* Leave the possibility of an incomplete final layer */ |
#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) |
|
/* Number of id_layer structs to leave in free list */ |
#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) |
|
static struct idr_layer *idr_preload_head; |
static int idr_preload_cnt; |
|
|
/* the maximum ID which can be allocated given idr->layers */ |
static int idr_max(int layers) |
{ |
const unsigned long *p = addr + (offset >> 5); |
int set = 0, bit = offset & 31, res; |
int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); |
|
if (bit) |
{ |
/* |
* Look for nonzero in the first 32 bits: |
*/ |
__asm__("bsfl %1,%0\n\t" |
"jne 1f\n\t" |
"movl $32, %0\n" |
"1:" |
: "=r" (set) |
: "r" (*p >> bit)); |
if (set < (32 - bit)) |
return set + offset; |
set = 32 - bit; |
p++; |
return (1 << bits) - 1; |
} |
|
/* |
* No set bit yet, search remaining full words for a bit |
* Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is |
* all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and |
* so on. |
*/ |
res = find_first_bit (p, size - 32 * (p - addr)); |
return (offset + set + res); |
static int idr_layer_prefix_mask(int layer) |
{ |
return ~idr_max(layer + 1); |
} |
|
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
|
#define rcu_dereference(p) ({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
(_________p1); \ |
}) |
|
#define rcu_assign_pointer(p, v) \ |
({ \ |
if (!__builtin_constant_p(v) || \ |
((v) != NULL)) \ |
(p) = (v); \ |
}) |
|
//static struct kmem_cache *idr_layer_cache; |
|
|
|
|
|
static struct idr_layer *get_from_free_list(struct idr *idp) |
{ |
struct idr_layer *p; |
unsigned long flags; |
|
// spin_lock_irqsave(&idp->lock, flags); |
spin_lock_irqsave(&idp->lock, flags); |
if ((p = idp->id_free)) { |
idp->id_free = p->ary[0]; |
idp->id_free_cnt--; |
p->ary[0] = NULL; |
} |
// spin_unlock_irqrestore(&idp->lock, flags); |
spin_unlock_irqrestore(&idp->lock, flags); |
return(p); |
} |
|
/** |
* idr_layer_alloc - allocate a new idr_layer |
* @gfp_mask: allocation mask |
* @layer_idr: optional idr to allocate from |
* |
* If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch |
* one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch |
* an idr_layer from @idr->id_free. |
* |
* @layer_idr is to maintain backward compatibility with the old alloc |
* interface - idr_pre_get() and idr_get_new*() - and will be removed |
* together with per-pool preload buffer. |
*/ |
static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) |
{ |
struct idr_layer *new; |
|
/* this is the old path, bypass to get_from_free_list() */ |
if (layer_idr) |
return get_from_free_list(layer_idr); |
|
/* try to allocate directly from kmem_cache */ |
new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
if (new) |
return new; |
|
|
new = idr_preload_head; |
if (new) { |
idr_preload_head = new->ary[0]; |
idr_preload_cnt--; |
new->ary[0] = NULL; |
} |
preempt_enable(); |
return new; |
} |
|
static void idr_layer_rcu_free(struct rcu_head *head) |
{ |
struct idr_layer *layer; |
125,9 → 128,11 |
kfree(layer); |
} |
|
static inline void free_layer(struct idr_layer *p) |
static inline void free_layer(struct idr *idr, struct idr_layer *p) |
{ |
kfree(p); |
if (idr->hint && idr->hint == p) |
RCU_INIT_POINTER(idr->hint, NULL); |
idr_layer_rcu_free(&p->rcu_head); |
} |
|
/* only called when idp->lock is held */ |
145,9 → 150,9 |
/* |
* Depends on the return element being zeroed. |
*/ |
// spin_lock_irqsave(&idp->lock, flags); |
spin_lock_irqsave(&idp->lock, flags); |
__move_to_free_list(idp, p); |
// spin_unlock_irqrestore(&idp->lock, flags); |
spin_unlock_irqrestore(&idp->lock, flags); |
} |
|
static void idr_mark_full(struct idr_layer **pa, int id) |
155,7 → 160,7 |
struct idr_layer *p = pa[0]; |
int l = 0; |
|
__set_bit(id & IDR_MASK, &p->bitmap); |
__set_bit(id & IDR_MASK, p->bitmap); |
/* |
* If this layer is full mark the bit in the layer above to |
* show that this part of the radix tree is full. This may |
162,11 → 167,11 |
* complete the layer above and require walking up the radix |
* tree. |
*/ |
while (p->bitmap == IDR_FULL) { |
while (bitmap_full(p->bitmap, IDR_SIZE)) { |
if (!(p = pa[++l])) |
break; |
id = id >> IDR_BITS; |
__set_bit((id & IDR_MASK), &p->bitmap); |
__set_bit((id & IDR_MASK), p->bitmap); |
} |
} |
|
185,7 → 190,7 |
*/ |
int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
{ |
while (idp->id_free_cnt < IDR_FREE_MAX) { |
while (idp->id_free_cnt < MAX_IDR_FREE) { |
struct idr_layer *new; |
new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
if (new == NULL) |
194,13 → 199,31 |
} |
return 1; |
} |
EXPORT_SYMBOL(idr_pre_get); |
|
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
/** |
* sub_alloc - try to allocate an id without growing the tree depth |
* @idp: idr handle |
* @starting_id: id to start search at |
* @id: pointer to the allocated handle |
* @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer |
* @gfp_mask: allocation mask for idr_layer_alloc() |
* @layer_idr: optional idr passed to idr_layer_alloc() |
* |
* Allocate an id in range [@starting_id, INT_MAX] from @idp without |
* growing its depth. Returns |
* |
* the allocated id >= 0 if successful, |
* -EAGAIN if the tree needs to grow for allocation to succeed, |
* -ENOSPC if the id space is exhausted, |
* -ENOMEM if more idr_layers need to be allocated. |
*/ |
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, |
gfp_t gfp_mask, struct idr *layer_idr) |
{ |
int n, m, sh; |
struct idr_layer *p, *new; |
int l, id, oid; |
unsigned long bm; |
|
id = *starting_id; |
restart: |
212,8 → 235,7 |
* We run around this while until we reach the leaf node... |
*/ |
n = (id >> (IDR_BITS*l)) & IDR_MASK; |
bm = ~p->bitmap; |
m = find_next_bit(&bm, IDR_SIZE, n); |
m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); |
if (m == IDR_SIZE) { |
/* no space available go back to previous layer. */ |
l++; |
221,10 → 243,12 |
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
|
/* if already at the top layer, we need to grow */ |
if (!(p = pa[l])) { |
if (id >= 1 << (idp->layers * IDR_BITS)) { |
*starting_id = id; |
return IDR_NEED_TO_GROW; |
return -EAGAIN; |
} |
p = pa[l]; |
BUG_ON(!p); |
|
/* If we need to go up one layer, continue the |
* loop; otherwise, restart from the top. |
239,8 → 263,8 |
sh = IDR_BITS*l; |
id = ((id >> sh) ^ n ^ m) << sh; |
} |
if ((id >= MAX_ID_BIT) || (id < 0)) |
return IDR_NOMORE_SPACE; |
if ((id >= MAX_IDR_BIT) || (id < 0)) |
return -ENOSPC; |
if (l == 0) |
break; |
/* |
247,10 → 271,11 |
* Create the layer below if it is missing. |
*/ |
if (!p->ary[m]) { |
new = get_from_free_list(idp); |
new = idr_layer_alloc(gfp_mask, layer_idr); |
if (!new) |
return -1; |
return -ENOMEM; |
new->layer = l-1; |
new->prefix = id & idr_layer_prefix_mask(new->layer); |
rcu_assign_pointer(p->ary[m], new); |
p->count++; |
} |
263,7 → 288,8 |
} |
|
static int idr_get_empty_slot(struct idr *idp, int starting_id, |
struct idr_layer **pa) |
struct idr_layer **pa, gfp_t gfp_mask, |
struct idr *layer_idr) |
{ |
struct idr_layer *p, *new; |
int layers, v, id; |
274,8 → 300,8 |
p = idp->top; |
layers = idp->layers; |
if (unlikely(!p)) { |
if (!(p = get_from_free_list(idp))) |
return -1; |
if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) |
return -ENOMEM; |
p->layer = 0; |
layers = 1; |
} |
283,7 → 309,7 |
* Add a new layer to the top of the tree if the requested |
* id is larger than the currently allocated space. |
*/ |
while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
while (id > idr_max(layers)) { |
layers++; |
if (!p->count) { |
/* special case: if the tree is currently empty, |
291,58 → 317,56 |
* upwards. |
*/ |
p->layer++; |
WARN_ON_ONCE(p->prefix); |
continue; |
} |
if (!(new = get_from_free_list(idp))) { |
if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { |
/* |
* The allocation failed. If we built part of |
* the structure tear it down. |
*/ |
// spin_lock_irqsave(&idp->lock, flags); |
spin_lock_irqsave(&idp->lock, flags); |
for (new = p; p && p != idp->top; new = p) { |
p = p->ary[0]; |
new->ary[0] = NULL; |
new->bitmap = new->count = 0; |
new->count = 0; |
bitmap_clear(new->bitmap, 0, IDR_SIZE); |
__move_to_free_list(idp, new); |
} |
// spin_unlock_irqrestore(&idp->lock, flags); |
return -1; |
spin_unlock_irqrestore(&idp->lock, flags); |
return -ENOMEM; |
} |
new->ary[0] = p; |
new->count = 1; |
new->layer = layers-1; |
if (p->bitmap == IDR_FULL) |
__set_bit(0, &new->bitmap); |
new->prefix = id & idr_layer_prefix_mask(new->layer); |
if (bitmap_full(p->bitmap, IDR_SIZE)) |
__set_bit(0, new->bitmap); |
p = new; |
} |
rcu_assign_pointer(idp->top, p); |
idp->layers = layers; |
v = sub_alloc(idp, &id, pa); |
if (v == IDR_NEED_TO_GROW) |
v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); |
if (v == -EAGAIN) |
goto build_up; |
return(v); |
} |
|
static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
/* |
* @id and @pa are from a successful allocation from idr_get_empty_slot(). |
* Install the user pointer @ptr and mark the slot full. |
*/ |
static void idr_fill_slot(struct idr *idr, void *ptr, int id, |
struct idr_layer **pa) |
{ |
struct idr_layer *pa[MAX_LEVEL]; |
int id; |
/* update hint used for lookup, cleared from free_layer() */ |
rcu_assign_pointer(idr->hint, pa[0]); |
|
id = idr_get_empty_slot(idp, starting_id, pa); |
if (id >= 0) { |
/* |
* Successfully found an empty slot. Install the user |
* pointer and mark the slot full. |
*/ |
rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
(struct idr_layer *)ptr); |
rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
pa[0]->count++; |
idr_mark_full(pa, id); |
} |
|
return id; |
} |
|
/** |
* idr_get_new_above - allocate new idr entry above or equal to a start id |
* @idp: idr handle |
363,51 → 387,113 |
*/ |
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
{ |
struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
int rv; |
|
rv = idr_get_new_above_int(idp, ptr, starting_id); |
/* |
* This is a cheap hack until the IDR code can be fixed to |
* return proper error values. |
*/ |
rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
if (rv < 0) |
{ |
dbgprintf("fail\n"); |
return _idr_rc_to_errno(rv); |
}; |
return rv == -ENOMEM ? -EAGAIN : rv; |
|
idr_fill_slot(idp, ptr, rv, pa); |
*id = rv; |
return 0; |
} |
EXPORT_SYMBOL(idr_get_new_above); |
|
/** |
* idr_get_new - allocate new idr entry |
* @idp: idr handle |
* @ptr: pointer you want associated with the id |
* @id: pointer to the allocated handle |
* idr_preload - preload for idr_alloc() |
* @gfp_mask: allocation mask to use for preloading |
* |
* If allocation from IDR's private freelist fails, idr_get_new_above() will |
* return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
* IDR's preallocation and then retry the idr_get_new_above() call. |
* Preload per-cpu layer buffer for idr_alloc(). Can only be used from |
* process context and each idr_preload() invocation should be matched with |
* idr_preload_end(). Note that preemption is disabled while preloaded. |
* |
* If the idr is full idr_get_new_above() will return %-ENOSPC. |
* The first idr_alloc() in the preloaded section can be treated as if it |
* were invoked with @gfp_mask used for preloading. This allows using more |
* permissive allocation masks for idrs protected by spinlocks. |
* |
* @id returns a value in the range %0 ... %0x7fffffff |
* For example, if idr_alloc() below fails, the failure can be treated as |
* if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. |
* |
* idr_preload(GFP_KERNEL); |
* spin_lock(lock); |
* |
* id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); |
* |
* spin_unlock(lock); |
* idr_preload_end(); |
* if (id < 0) |
* error; |
*/ |
int idr_get_new(struct idr *idp, void *ptr, int *id) |
void idr_preload(gfp_t gfp_mask) |
{ |
int rv; |
|
rv = idr_get_new_above_int(idp, ptr, 0); |
/* |
* This is a cheap hack until the IDR code can be fixed to |
* return proper error values. |
* idr_alloc() is likely to succeed w/o full idr_layer buffer and |
* return value from idr_alloc() needs to be checked for failure |
* anyway. Silently give up if allocation fails. The caller can |
* treat failures from idr_alloc() as if idr_alloc() were called |
* with @gfp_mask which should be enough. |
*/ |
if (rv < 0) |
return _idr_rc_to_errno(rv); |
*id = rv; |
return 0; |
while (idr_preload_cnt < MAX_IDR_FREE) { |
struct idr_layer *new; |
|
new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
if (!new) |
break; |
|
/* link the new one to per-cpu preload list */ |
new->ary[0] = idr_preload_head; |
idr_preload_head = new; |
idr_preload_cnt++; |
} |
} |
EXPORT_SYMBOL(idr_preload); |
|
/** |
* idr_alloc - allocate new idr entry |
* @idr: the (initialized) idr |
* @ptr: pointer to be associated with the new id |
* @start: the minimum id (inclusive) |
* @end: the maximum id (exclusive, <= 0 for max) |
* @gfp_mask: memory allocation flags |
* |
* Allocate an id in [start, end) and associate it with @ptr. If no ID is |
* available in the specified range, returns -ENOSPC. On memory allocation |
* failure, returns -ENOMEM. |
* |
* Note that @end is treated as max when <= 0. This is to always allow |
* using @start + N as @end as long as N is inside integer range. |
* |
* The user is responsible for exclusively synchronizing all operations |
* which may modify @idr. However, read-only accesses such as idr_find() |
* or iteration can be performed under RCU read lock provided the user |
* destroys @ptr in RCU-safe way after removal from idr. |
*/ |
int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) |
{ |
int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ |
struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
int id; |
|
/* sanity checks */ |
if (WARN_ON_ONCE(start < 0)) |
return -EINVAL; |
if (unlikely(max < start)) |
return -ENOSPC; |
|
/* allocate id */ |
id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); |
if (unlikely(id < 0)) |
return id; |
if (unlikely(id > max)) |
return -ENOSPC; |
|
idr_fill_slot(idr, ptr, id, pa); |
return id; |
} |
EXPORT_SYMBOL_GPL(idr_alloc); |
|
static void idr_remove_warning(int id) |
{ |
printk(KERN_WARNING |
418,7 → 504,7 |
static void sub_remove(struct idr *idp, int shift, int id) |
{ |
struct idr_layer *p = idp->top; |
struct idr_layer **pa[MAX_LEVEL]; |
struct idr_layer **pa[MAX_IDR_LEVEL + 1]; |
struct idr_layer ***paa = &pa[0]; |
struct idr_layer *to_free; |
int n; |
428,19 → 514,19 |
|
while ((shift > 0) && p) { |
n = (id >> shift) & IDR_MASK; |
__clear_bit(n, &p->bitmap); |
__clear_bit(n, p->bitmap); |
*++paa = &p->ary[n]; |
p = p->ary[n]; |
shift -= IDR_BITS; |
} |
n = id & IDR_MASK; |
if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
__clear_bit(n, &p->bitmap); |
if (likely(p != NULL && test_bit(n, p->bitmap))) { |
__clear_bit(n, p->bitmap); |
rcu_assign_pointer(p->ary[n], NULL); |
to_free = NULL; |
while(*paa && ! --((**paa)->count)){ |
if (to_free) |
free_layer(to_free); |
free_layer(idp, to_free); |
to_free = **paa; |
**paa-- = NULL; |
} |
447,7 → 533,7 |
if (!*paa) |
idp->layers = 0; |
if (to_free) |
free_layer(to_free); |
free_layer(idp, to_free); |
} else |
idr_remove_warning(id); |
} |
462,8 → 548,9 |
struct idr_layer *p; |
struct idr_layer *to_free; |
|
/* Mask off upper bits we don't use for the search. */ |
id &= MAX_ID_MASK; |
/* see comment in idr_find_slowpath() */ |
if (WARN_ON_ONCE(id < 0)) |
return; |
|
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
478,10 → 565,11 |
p = idp->top->ary[0]; |
rcu_assign_pointer(idp->top, p); |
--idp->layers; |
to_free->bitmap = to_free->count = 0; |
free_layer(to_free); |
to_free->count = 0; |
bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
free_layer(idp, to_free); |
} |
while (idp->id_free_cnt >= IDR_FREE_MAX) { |
while (idp->id_free_cnt >= MAX_IDR_FREE) { |
p = get_from_free_list(idp); |
/* |
* Note: we don't call the rcu callback here, since the only |
492,36 → 580,23 |
} |
return; |
} |
EXPORT_SYMBOL(idr_remove); |
|
|
/** |
* idr_remove_all - remove all ids from the given idr tree |
* @idp: idr handle |
* |
* idr_destroy() only frees up unused, cached idp_layers, but this |
* function will remove all id mappings and leave all idp_layers |
* unused. |
* |
* A typical clean-up sequence for objects stored in an idr tree will |
* use idr_for_each() to free all objects, if necessay, then |
* idr_remove_all() to remove all ids, and idr_destroy() to free |
* up the cached idr_layers. |
*/ |
void idr_remove_all(struct idr *idp) |
void __idr_remove_all(struct idr *idp) |
{ |
int n, id, max; |
int bt_mask; |
struct idr_layer *p; |
struct idr_layer *pa[MAX_LEVEL]; |
struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
struct idr_layer **paa = &pa[0]; |
|
n = idp->layers * IDR_BITS; |
p = idp->top; |
rcu_assign_pointer(idp->top, NULL); |
max = 1 << n; |
max = idr_max(idp->layers); |
|
id = 0; |
while (id < max) { |
while (id >= 0 && id <= max) { |
while (n > IDR_BITS && p) { |
n -= IDR_BITS; |
*paa++ = p; |
533,7 → 608,7 |
/* Get the highest bit that the above add changed from 0->1. */ |
while (n < fls(id ^ bt_mask)) { |
if (p) |
free_layer(p); |
free_layer(idp, p); |
n += IDR_BITS; |
p = *--paa; |
} |
540,46 → 615,54 |
} |
idp->layers = 0; |
} |
EXPORT_SYMBOL(__idr_remove_all); |
|
/** |
* idr_destroy - release all cached layers within an idr tree |
* @idp: idr handle |
* |
* Free all id mappings and all idp_layers. After this function, @idp is |
* completely unused and can be freed / recycled. The caller is |
* responsible for ensuring that no one else accesses @idp during or after |
* idr_destroy(). |
* |
* A typical clean-up sequence for objects stored in an idr tree will use |
* idr_for_each() to free all objects, if necessay, then idr_destroy() to |
* free up the id mappings and cached idr_layers. |
*/ |
void idr_destroy(struct idr *idp) |
{ |
__idr_remove_all(idp); |
|
while (idp->id_free_cnt) { |
struct idr_layer *p = get_from_free_list(idp); |
kfree(p); |
} |
} |
EXPORT_SYMBOL(idr_destroy); |
|
|
/** |
* idr_find - return pointer for given id |
* @idp: idr handle |
* @id: lookup key |
* |
* Return the pointer given the id it has been registered with. A %NULL |
* return indicates that @id is not valid or you passed %NULL in |
* idr_get_new(). |
* |
* This function can be called under rcu_read_lock(), given that the leaf |
* pointers lifetimes are correctly managed. |
*/ |
void *idr_find(struct idr *idp, int id) |
void *idr_find_slowpath(struct idr *idp, int id) |
{ |
int n; |
struct idr_layer *p; |
|
p = rcu_dereference(idp->top); |
/* |
* If @id is negative, idr_find() used to ignore the sign bit and |
* performed lookup with the rest of bits, which is weird and can |
* lead to very obscure bugs. We're now returning NULL for all |
* negative IDs but just in case somebody was depending on the sign |
* bit being ignored, let's trigger WARN_ON_ONCE() so that they can |
* be detected and fixed. WARN_ON_ONCE() can later be removed. |
*/ |
if (WARN_ON_ONCE(id < 0)) |
return NULL; |
|
p = rcu_dereference_raw(idp->top); |
if (!p) |
return NULL; |
n = (p->layer+1) * IDR_BITS; |
|
/* Mask off upper bits we don't use for the search. */ |
id &= MAX_ID_MASK; |
|
if (id >= (1 << n)) |
if (id > idr_max(p->layer + 1)) |
return NULL; |
BUG_ON(n == 0); |
|
586,10 → 669,11 |
while (n > 0 && p) { |
n -= IDR_BITS; |
BUG_ON(n != p->layer*IDR_BITS); |
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
} |
return((void *)p); |
} |
EXPORT_SYMBOL(idr_find_slowpath); |
|
#if 0 |
/** |
615,19 → 699,19 |
{ |
int n, id, max, error = 0; |
struct idr_layer *p; |
struct idr_layer *pa[MAX_LEVEL]; |
struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
struct idr_layer **paa = &pa[0]; |
|
n = idp->layers * IDR_BITS; |
p = rcu_dereference(idp->top); |
max = 1 << n; |
p = rcu_dereference_raw(idp->top); |
max = idr_max(idp->layers); |
|
id = 0; |
while (id < max) { |
while (id >= 0 && id <= max) { |
while (n > 0 && p) { |
n -= IDR_BITS; |
*paa++ = p; |
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
} |
|
if (p) { |
655,27 → 739,29 |
* Returns pointer to registered object with id, which is next number to |
* given id. After being looked up, *@nextidp will be updated for the next |
* iteration. |
* |
* This function can be called under rcu_read_lock(), given that the leaf |
* pointers lifetimes are correctly managed. |
*/ |
|
void *idr_get_next(struct idr *idp, int *nextidp) |
{ |
struct idr_layer *p, *pa[MAX_LEVEL]; |
struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; |
struct idr_layer **paa = &pa[0]; |
int id = *nextidp; |
int n, max; |
|
/* find first ent */ |
n = idp->layers * IDR_BITS; |
max = 1 << n; |
p = rcu_dereference(idp->top); |
p = rcu_dereference_raw(idp->top); |
if (!p) |
return NULL; |
n = (p->layer + 1) * IDR_BITS; |
max = idr_max(p->layer + 1); |
|
while (id < max) { |
while (id >= 0 && id <= max) { |
while (n > 0 && p) { |
n -= IDR_BITS; |
*paa++ = p; |
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
} |
|
if (p) { |
683,7 → 769,14 |
return p; |
} |
|
id += 1 << n; |
/* |
* Proceed to the next layer at the current level. Unlike |
* idr_for_each(), @id isn't guaranteed to be aligned to |
* layer boundary at this point and adding 1 << n may |
* incorrectly skip IDs. Make sure we jump to the |
* beginning of the next layer using round_up(). |
*/ |
id = round_up(id + 1, 1 << n); |
while (n < fls(id)) { |
n += IDR_BITS; |
p = *--paa; |
691,9 → 784,9 |
} |
return NULL; |
} |
EXPORT_SYMBOL(idr_get_next); |
|
|
|
/** |
* idr_replace - replace pointer for given id |
* @idp: idr handle |
711,6 → 804,10 |
int n; |
struct idr_layer *p, *old_p; |
|
/* see comment in idr_find_slowpath() */ |
if (WARN_ON_ONCE(id < 0)) |
return ERR_PTR(-EINVAL); |
|
p = idp->top; |
if (!p) |
return ERR_PTR(-EINVAL); |
717,8 → 814,6 |
|
n = (p->layer+1) * IDR_BITS; |
|
id &= MAX_ID_MASK; |
|
if (id >= (1 << n)) |
return ERR_PTR(-EINVAL); |
|
729,7 → 824,7 |
} |
|
n = id & IDR_MASK; |
if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
if (unlikely(p == NULL || !test_bit(n, p->bitmap))) |
return ERR_PTR(-ENOENT); |
|
old_p = p->ary[n]; |
759,12 → 854,14 |
void idr_init(struct idr *idp) |
{ |
memset(idp, 0, sizeof(struct idr)); |
// spin_lock_init(&idp->lock); |
spin_lock_init(&idp->lock); |
} |
EXPORT_SYMBOL(idr_init); |
|
#if 0 |
|
/* |
/** |
* DOC: IDA description |
* IDA - IDR based ID allocator |
* |
* This is id allocator without id -> pointer translation. Memory |
813,7 → 910,7 |
if (!ida->free_bitmap) { |
struct ida_bitmap *bitmap; |
|
bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask); |
bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); |
if (!bitmap) |
return 0; |
|
841,7 → 938,7 |
*/ |
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
{ |
struct idr_layer *pa[MAX_LEVEL]; |
struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
struct ida_bitmap *bitmap; |
unsigned long flags; |
int idr_id = starting_id / IDA_BITMAP_BITS; |
850,11 → 947,11 |
|
restart: |
/* get vacant slot */ |
t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); |
if (t < 0) |
return _idr_rc_to_errno(t); |
return t == -ENOMEM ? -EAGAIN : t; |
|
if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
return -ENOSPC; |
|
if (t != idr_id) |
888,7 → 985,7 |
} |
|
id = idr_id * IDA_BITMAP_BITS + t; |
if (id >= MAX_ID_BIT) |
if (id >= MAX_IDR_BIT) |
return -ENOSPC; |
|
__set_bit(t, bitmap->bitmap); |
913,25 → 1010,6 |
EXPORT_SYMBOL(ida_get_new_above); |
|
/** |
* ida_get_new - allocate new ID |
* @ida: idr handle |
* @p_id: pointer to the allocated handle |
* |
* Allocate new ID. It should be called with any required locks. |
* |
* If memory is required, it will return %-EAGAIN, you should unlock |
* and go back to the idr_pre_get() call. If the idr is full, it will |
* return %-ENOSPC. |
* |
* @p_id returns a value in the range %0 ... %0x7fffffff. |
*/ |
int ida_get_new(struct ida *ida, int *p_id) |
{ |
return ida_get_new_above(ida, 0, p_id); |
} |
EXPORT_SYMBOL(ida_get_new); |
|
/** |
* ida_remove - remove the given ID |
* @ida: ida handle |
* @id: ID to free |
948,7 → 1026,7 |
/* clear full bits while looking up the leaf idr_layer */ |
while ((shift > 0) && p) { |
n = (idr_id >> shift) & IDR_MASK; |
__clear_bit(n, &p->bitmap); |
__clear_bit(n, p->bitmap); |
p = p->ary[n]; |
shift -= IDR_BITS; |
} |
957,7 → 1035,7 |
goto err; |
|
n = idr_id & IDR_MASK; |
__clear_bit(n, &p->bitmap); |
__clear_bit(n, p->bitmap); |
|
bitmap = (void *)p->ary[n]; |
if (!test_bit(offset, bitmap->bitmap)) |
966,7 → 1044,7 |
/* update bitmap and remove it if empty */ |
__clear_bit(offset, bitmap->bitmap); |
if (--bitmap->nr_busy == 0) { |
__set_bit(n, &p->bitmap); /* to please idr_remove() */ |
__set_bit(n, p->bitmap); /* to please idr_remove() */ |
idr_remove(&ida->idr, idr_id); |
free_bitmap(ida, bitmap); |
} |
1007,3 → 1085,114 |
|
|
#endif |
|
|
unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
|
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
|
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found: |
return result + __ffs(tmp); |
} |
|
unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
|
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp &= (~0UL << offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
|
found_first: |
tmp &= (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + __ffs(tmp); |
} |
|
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
|
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp |= ~0UL >> (BITS_PER_LONG - offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (~tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if (~(tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
|
found_first: |
tmp |= ~0UL << size; |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + ffz(tmp); |
} |
|
unsigned int hweight32(unsigned int w) |
{ |
unsigned int res = w - ((w >> 1) & 0x55555555); |
res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
res = (res + (res >> 4)) & 0x0F0F0F0F; |
res = res + (res >> 8); |
return (res + (res >> 16)) & 0x000000FF; |
} |
|