Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5055 → Rev 5056

/drivers/ddk/linux/idr.c
18,12 → 18,6
* pointer or what ever, we treat it as a (void *). You can pass this
* id to a user for him to pass back at a later time. You then pass
* that id to this code and it returns your pointer.
 
* You can release ids at any time. When all ids are released, most of
* the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
* don't need to go to the memory "store" during an id allocate, just
* so you don't need to be too concerned about locking and conflicts
* with the slab allocator.
*/
 
#include <linux/kernel.h>
136,7 → 130,7
 
static inline void free_layer(struct idr *idr, struct idr_layer *p)
{
if (idr->hint && idr->hint == p)
if (idr->hint == p)
RCU_INIT_POINTER(idr->hint, NULL);
idr_layer_rcu_free(&p->rcu_head);
}
181,7 → 175,7
}
}
 
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
192,7 → 186,6
}
return 1;
}
EXPORT_SYMBOL(__idr_pre_get);
 
/**
* sub_alloc - try to allocate an id without growing the tree depth
235,7 → 228,7
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
 
/* if already at the top layer, we need to grow */
if (id >= 1 << (idp->layers * IDR_BITS)) {
if (id > idr_max(idp->layers)) {
*starting_id = id;
return -EAGAIN;
}
359,21 → 352,7
idr_mark_full(pa, id);
}
 
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
int rv;
 
rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
if (rv < 0)
return rv == -ENOMEM ? -EAGAIN : rv;
 
idr_fill_slot(idp, ptr, rv, pa);
*id = rv;
return 0;
}
EXPORT_SYMBOL(__idr_get_new_above);
 
/**
* idr_preload - preload for idr_alloc()
* @gfp_mask: allocation mask to use for preloading
550,6 → 529,11
if (id < 0)
return;
 
if (id > idr_max(idp->layers)) {
idr_remove_warning(id);
return;
}
 
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
idp->top->ary[0]) {
567,20 → 551,10
bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
free_layer(idp, to_free);
}
while (idp->id_free_cnt >= MAX_IDR_FREE) {
p = get_from_free_list(idp);
/*
* Note: we don't call the rcu callback here, since the only
* layers that fall into the freelist are those that have been
* preallocated.
*/
kfree(p);
}
return;
}
EXPORT_SYMBOL(idr_remove);
 
void __idr_remove_all(struct idr *idp)
static void __idr_remove_all(struct idr *idp)
{
int n, id, max;
int bt_mask;
589,16 → 563,17
struct idr_layer **paa = &pa[0];
 
n = idp->layers * IDR_BITS;
p = idp->top;
*paa = idp->top;
rcu_assign_pointer(idp->top, NULL);
max = idr_max(idp->layers);
 
id = 0;
while (id >= 0 && id <= max) {
p = *paa;
while (n > IDR_BITS && p) {
n -= IDR_BITS;
*paa++ = p;
p = p->ary[(id >> n) & IDR_MASK];
*++paa = p;
}
 
bt_mask = id;
605,15 → 580,14
id += 1 << n;
/* Get the highest bit that the above add changed from 0->1. */
while (n < fls(id ^ bt_mask)) {
if (p)
free_layer(idp, p);
if (*paa)
free_layer(idp, *paa);
n += IDR_BITS;
p = *--paa;
--paa;
}
}
idp->layers = 0;
}
EXPORT_SYMBOL(__idr_remove_all);
 
/**
* idr_destroy - release all cached layers within an idr tree
692,15 → 666,16
struct idr_layer **paa = &pa[0];
 
n = idp->layers * IDR_BITS;
p = rcu_dereference_raw(idp->top);
*paa = rcu_dereference_raw(idp->top);
max = idr_max(idp->layers);
 
id = 0;
while (id >= 0 && id <= max) {
p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
*++paa = p;
}
 
if (p) {
712,7 → 687,7
id += 1 << n;
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
--paa;
}
}
 
740,7 → 715,7
int n, max;
 
/* find first ent */
p = rcu_dereference_raw(idp->top);
p = *paa = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
n = (p->layer + 1) * IDR_BITS;
747,10 → 722,11
max = idr_max(p->layer + 1);
 
while (id >= 0 && id <= max) {
p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
*++paa = p;
}
 
if (p) {
768,7 → 744,7
id = round_up(id + 1, 1 << n);
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
--paa;
}
}
return NULL;
798,14 → 774,12
 
p = idp->top;
if (!p)
return ERR_PTR(-EINVAL);
return ERR_PTR(-ENOENT);
 
n = (p->layer+1) * IDR_BITS;
if (id > idr_max(p->layer + 1))
return ERR_PTR(-ENOENT);
 
if (id >= (1 << n))
return ERR_PTR(-EINVAL);
 
n -= IDR_BITS;
n = p->layer * IDR_BITS;
while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS;
842,7 → 816,17
}
EXPORT_SYMBOL(idr_init);
 
static int idr_has_entry(int id, void *p, void *data)
{
return 1;
}
 
bool idr_is_empty(struct idr *idp)
{
return !idr_for_each(idp, idr_has_entry, NULL);
}
EXPORT_SYMBOL(idr_is_empty);
 
/**
* DOC: IDA description
* IDA - IDR based ID allocator
1006,6 → 990,9
int n;
struct ida_bitmap *bitmap;
 
if (idr_id > idr_max(ida->idr.layers))
goto err;
 
/* clear full bits while looking up the leaf idr_layer */
while ((shift > 0) && p) {
n = (idr_id >> shift) & IDR_MASK;
1021,7 → 1008,7
__clear_bit(n, p->bitmap);
 
bitmap = (void *)p->ary[n];
if (!test_bit(offset, bitmap->bitmap))
if (!bitmap || !test_bit(offset, bitmap->bitmap))
goto err;
 
/* update bitmap and remove it if empty */
1244,3 → 1231,17
return (res + (res >> 16)) & 0x000000FF;
}
 
unsigned long hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
res = res + (res >> 8);
res = res + (res >> 16);
return (res + (res >> 32)) & 0x00000000000000FFul;
#endif
}