Subversion Repositories Kolibri OS

Rev

Rev 4065 | Rev 5270 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
  3.  *      Copyright (C) 2002 by Concurrent Computer Corporation
  4.  *      Distributed under the GNU GPL license version 2.
  5.  *
  6.  * Modified by George Anzinger to reuse immediately and to use
  7.  * find bit instructions.  Also removed _irq on spinlocks.
  8.  *
  9.  * Modified by Nadia Derbey to make it RCU safe.
  10.  *
  11.  * Small id to pointer translation service.
  12.  *
  13.  * It uses a radix tree like structure as a sparse array indexed
  14.  * by the id to obtain the pointer.  The bitmap makes allocating
  15.  * a new id quick.
  16.  *
  17.  * You call it to allocate an id (an int) an associate with that id a
  18.  * pointer or what ever, we treat it as a (void *).  You can pass this
  19.  * id to a user for him to pass back at a later time.  You then pass
  20.  * that id to this code and it returns your pointer.
  21.  
  22.  * You can release ids at any time. When all ids are released, most of
  23.  * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
  24.  * don't need to go to the memory "store" during an id allocate, just
  25.  * so you don't need to be too concerned about locking and conflicts
  26.  * with the slab allocator.
  27.  */
  28.  
  29. #include <linux/kernel.h>
  30. #include <linux/export.h>
  31. #include <linux/string.h>
  32. #include <linux/bitops.h>
  33. #include <linux/idr.h>
  34. //#include <stdlib.h>
  35.  
  36. static inline void * __must_check ERR_PTR(long error)
  37. {
  38.         return (void *) error;
  39. }
  40.  
  41. unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
  42.                                  unsigned long offset);
  43.  
  44.  
  45. #define MAX_IDR_SHIFT           (sizeof(int) * 8 - 1)
  46. #define MAX_IDR_BIT             (1U << MAX_IDR_SHIFT)
  47.  
  48. /* Leave the possibility of an incomplete final layer */
  49. #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
  50.  
  51. /* Number of id_layer structs to leave in free list */
  52. #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
  53.  
  54. static struct idr_layer *idr_preload_head;
  55. static int idr_preload_cnt;
  56.  
  57. static DEFINE_SPINLOCK(simple_ida_lock);
  58.  
  59. /* the maximum ID which can be allocated given idr->layers */
  60. static int idr_max(int layers)
  61. {
  62.         int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
  63.  
  64.         return (1 << bits) - 1;
  65. }
  66.  
  67. /*
  68.  * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
  69.  * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
  70.  * so on.
  71.  */
  72. static int idr_layer_prefix_mask(int layer)
  73. {
  74.         return ~idr_max(layer + 1);
  75. }
  76.  
  77. static struct idr_layer *get_from_free_list(struct idr *idp)
  78. {
  79.         struct idr_layer *p;
  80.         unsigned long flags;
  81.  
  82.         spin_lock_irqsave(&idp->lock, flags);
  83.         if ((p = idp->id_free)) {
  84.                 idp->id_free = p->ary[0];
  85.                 idp->id_free_cnt--;
  86.                 p->ary[0] = NULL;
  87.         }
  88.         spin_unlock_irqrestore(&idp->lock, flags);
  89.         return(p);
  90. }
  91.  
  92. /**
  93.  * idr_layer_alloc - allocate a new idr_layer
  94.  * @gfp_mask: allocation mask
  95.  * @layer_idr: optional idr to allocate from
  96.  *
  97.  * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
  98.  * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
  99.  * an idr_layer from @idr->id_free.
  100.  *
  101.  * @layer_idr is to maintain backward compatibility with the old alloc
  102.  * interface - idr_pre_get() and idr_get_new*() - and will be removed
  103.  * together with per-pool preload buffer.
  104.  */
  105. static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
  106. {
  107.         struct idr_layer *new;
  108.  
  109.         /* this is the old path, bypass to get_from_free_list() */
  110.         if (layer_idr)
  111.                 return get_from_free_list(layer_idr);
  112.  
  113.         /* try to allocate directly from kmem_cache */
  114.         new = kzalloc(sizeof(struct idr_layer), gfp_mask);
  115.         if (new)
  116.                 return new;
  117.  
  118.  
  119.         new = idr_preload_head;
  120.         if (new) {
  121.                 idr_preload_head = new->ary[0];
  122.                 idr_preload_cnt--;
  123.                 new->ary[0] = NULL;
  124.         }
  125.         preempt_enable();
  126.         return new;
  127. }
  128.  
  129. static void idr_layer_rcu_free(struct rcu_head *head)
  130. {
  131.         struct idr_layer *layer;
  132.  
  133.     layer = container_of(head, struct idr_layer, rcu_head);
  134.     kfree(layer);
  135. }
  136.  
  137. static inline void free_layer(struct idr *idr, struct idr_layer *p)
  138. {
  139.         if (idr->hint && idr->hint == p)
  140.                 RCU_INIT_POINTER(idr->hint, NULL);
  141.     idr_layer_rcu_free(&p->rcu_head);
  142. }
  143.  
  144. /* only called when idp->lock is held */
  145. static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
  146. {
  147.         p->ary[0] = idp->id_free;
  148.         idp->id_free = p;
  149.         idp->id_free_cnt++;
  150. }
  151.  
  152. static void move_to_free_list(struct idr *idp, struct idr_layer *p)
  153. {
  154.         unsigned long flags;
  155.  
  156.         /*
  157.          * Depends on the return element being zeroed.
  158.          */
  159.         spin_lock_irqsave(&idp->lock, flags);
  160.         __move_to_free_list(idp, p);
  161.         spin_unlock_irqrestore(&idp->lock, flags);
  162. }
  163.  
  164. static void idr_mark_full(struct idr_layer **pa, int id)
  165. {
  166.         struct idr_layer *p = pa[0];
  167.         int l = 0;
  168.  
  169.         __set_bit(id & IDR_MASK, p->bitmap);
  170.         /*
  171.          * If this layer is full mark the bit in the layer above to
  172.          * show that this part of the radix tree is full.  This may
  173.          * complete the layer above and require walking up the radix
  174.          * tree.
  175.          */
  176.         while (bitmap_full(p->bitmap, IDR_SIZE)) {
  177.                 if (!(p = pa[++l]))
  178.                         break;
  179.                 id = id >> IDR_BITS;
  180.                 __set_bit((id & IDR_MASK), p->bitmap);
  181.         }
  182. }
  183.  
  184. int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  185. {
  186.         while (idp->id_free_cnt < MAX_IDR_FREE) {
  187.        struct idr_layer *new;
  188.        new = kzalloc(sizeof(struct idr_layer), gfp_mask);
  189.        if (new == NULL)
  190.            return (0);
  191.        move_to_free_list(idp, new);
  192.    }
  193.    return 1;
  194. }
  195. EXPORT_SYMBOL(__idr_pre_get);
  196.  
  197. /**
  198.  * sub_alloc - try to allocate an id without growing the tree depth
  199.  * @idp: idr handle
  200.  * @starting_id: id to start search at
  201.  * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
  202.  * @gfp_mask: allocation mask for idr_layer_alloc()
  203.  * @layer_idr: optional idr passed to idr_layer_alloc()
  204.  *
  205.  * Allocate an id in range [@starting_id, INT_MAX] from @idp without
  206.  * growing its depth.  Returns
  207.  *
  208.  *  the allocated id >= 0 if successful,
  209.  *  -EAGAIN if the tree needs to grow for allocation to succeed,
  210.  *  -ENOSPC if the id space is exhausted,
  211.  *  -ENOMEM if more idr_layers need to be allocated.
  212.  */
  213. static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
  214.                      gfp_t gfp_mask, struct idr *layer_idr)
  215. {
  216.         int n, m, sh;
  217.         struct idr_layer *p, *new;
  218.         int l, id, oid;
  219.  
  220.         id = *starting_id;
  221.  restart:
  222.         p = idp->top;
  223.         l = idp->layers;
  224.         pa[l--] = NULL;
  225.         while (1) {
  226.                 /*
  227.                  * We run around this while until we reach the leaf node...
  228.                  */
  229.                 n = (id >> (IDR_BITS*l)) & IDR_MASK;
  230.                 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
  231.                 if (m == IDR_SIZE) {
  232.                         /* no space available go back to previous layer. */
  233.                         l++;
  234.                         oid = id;
  235.                         id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  236.  
  237.                         /* if already at the top layer, we need to grow */
  238.                         if (id >= 1 << (idp->layers * IDR_BITS)) {
  239.                                 *starting_id = id;
  240.                                 return -EAGAIN;
  241.                         }
  242.                         p = pa[l];
  243.                         BUG_ON(!p);
  244.  
  245.                         /* If we need to go up one layer, continue the
  246.                          * loop; otherwise, restart from the top.
  247.                          */
  248.                         sh = IDR_BITS * (l + 1);
  249.                         if (oid >> sh == id >> sh)
  250.                                 continue;
  251.                         else
  252.                                 goto restart;
  253.                 }
  254.                 if (m != n) {
  255.                         sh = IDR_BITS*l;
  256.                         id = ((id >> sh) ^ n ^ m) << sh;
  257.                 }
  258.                 if ((id >= MAX_IDR_BIT) || (id < 0))
  259.                         return -ENOSPC;
  260.                 if (l == 0)
  261.                         break;
  262.                 /*
  263.                  * Create the layer below if it is missing.
  264.                  */
  265.                 if (!p->ary[m]) {
  266.                         new = idr_layer_alloc(gfp_mask, layer_idr);
  267.                         if (!new)
  268.                                 return -ENOMEM;
  269.                         new->layer = l-1;
  270.                         new->prefix = id & idr_layer_prefix_mask(new->layer);
  271.                         rcu_assign_pointer(p->ary[m], new);
  272.                         p->count++;
  273.                 }
  274.                 pa[l--] = p;
  275.                 p = p->ary[m];
  276.         }
  277.  
  278.         pa[l] = p;
  279.         return id;
  280. }
  281.  
  282. static int idr_get_empty_slot(struct idr *idp, int starting_id,
  283.                               struct idr_layer **pa, gfp_t gfp_mask,
  284.                               struct idr *layer_idr)
  285. {
  286.         struct idr_layer *p, *new;
  287.         int layers, v, id;
  288.         unsigned long flags;
  289.  
  290.         id = starting_id;
  291. build_up:
  292.         p = idp->top;
  293.         layers = idp->layers;
  294.         if (unlikely(!p)) {
  295.                 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
  296.                         return -ENOMEM;
  297.                 p->layer = 0;
  298.                 layers = 1;
  299.         }
  300.         /*
  301.          * Add a new layer to the top of the tree if the requested
  302.          * id is larger than the currently allocated space.
  303.          */
  304.         while (id > idr_max(layers)) {
  305.                 layers++;
  306.                 if (!p->count) {
  307.                         /* special case: if the tree is currently empty,
  308.                          * then we grow the tree by moving the top node
  309.                          * upwards.
  310.                          */
  311.                         p->layer++;
  312.                         WARN_ON_ONCE(p->prefix);
  313.                         continue;
  314.                 }
  315.                 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
  316.                         /*
  317.                          * The allocation failed.  If we built part of
  318.                          * the structure tear it down.
  319.                          */
  320.                         spin_lock_irqsave(&idp->lock, flags);
  321.                         for (new = p; p && p != idp->top; new = p) {
  322.                                 p = p->ary[0];
  323.                                 new->ary[0] = NULL;
  324.                                 new->count = 0;
  325.                                 bitmap_clear(new->bitmap, 0, IDR_SIZE);
  326.                                 __move_to_free_list(idp, new);
  327.                         }
  328.                         spin_unlock_irqrestore(&idp->lock, flags);
  329.                         return -ENOMEM;
  330.                 }
  331.                 new->ary[0] = p;
  332.                 new->count = 1;
  333.                 new->layer = layers-1;
  334.                 new->prefix = id & idr_layer_prefix_mask(new->layer);
  335.                 if (bitmap_full(p->bitmap, IDR_SIZE))
  336.                         __set_bit(0, new->bitmap);
  337.                 p = new;
  338.         }
  339.         rcu_assign_pointer(idp->top, p);
  340.         idp->layers = layers;
  341.         v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
  342.         if (v == -EAGAIN)
  343.                 goto build_up;
  344.         return(v);
  345. }
  346.  
  347. /*
  348.  * @id and @pa are from a successful allocation from idr_get_empty_slot().
  349.  * Install the user pointer @ptr and mark the slot full.
  350.  */
  351. static void idr_fill_slot(struct idr *idr, void *ptr, int id,
  352.                           struct idr_layer **pa)
  353. {
  354.         /* update hint used for lookup, cleared from free_layer() */
  355.         rcu_assign_pointer(idr->hint, pa[0]);
  356.  
  357.         rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
  358.                 pa[0]->count++;
  359.                 idr_mark_full(pa, id);
  360. }
  361.  
  362. int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  363. {
  364.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  365.         int rv;
  366.  
  367.         rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
  368.         if (rv < 0)
  369.                 return rv == -ENOMEM ? -EAGAIN : rv;
  370.  
  371.         idr_fill_slot(idp, ptr, rv, pa);
  372.         *id = rv;
  373.     return 0;
  374. }
  375. EXPORT_SYMBOL(__idr_get_new_above);
  376.  
  377. /**
  378.  * idr_preload - preload for idr_alloc()
  379.  * @gfp_mask: allocation mask to use for preloading
  380.  *
  381.  * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
  382.  * process context and each idr_preload() invocation should be matched with
  383.  * idr_preload_end().  Note that preemption is disabled while preloaded.
  384.  *
  385.  * The first idr_alloc() in the preloaded section can be treated as if it
  386.  * were invoked with @gfp_mask used for preloading.  This allows using more
  387.  * permissive allocation masks for idrs protected by spinlocks.
  388.  *
  389.  * For example, if idr_alloc() below fails, the failure can be treated as
  390.  * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
  391.  *
  392.  *      idr_preload(GFP_KERNEL);
  393.  *      spin_lock(lock);
  394.  *
  395.  *      id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
  396.  *
  397.  *      spin_unlock(lock);
  398.  *      idr_preload_end();
  399.  *      if (id < 0)
  400.  *              error;
  401.  */
  402. void idr_preload(gfp_t gfp_mask)
  403. {
  404.  
  405.         /*
  406.          * idr_alloc() is likely to succeed w/o full idr_layer buffer and
  407.          * return value from idr_alloc() needs to be checked for failure
  408.          * anyway.  Silently give up if allocation fails.  The caller can
  409.          * treat failures from idr_alloc() as if idr_alloc() were called
  410.          * with @gfp_mask which should be enough.
  411.          */
  412.         while (idr_preload_cnt < MAX_IDR_FREE) {
  413.                 struct idr_layer *new;
  414.  
  415.                 new = kzalloc(sizeof(struct idr_layer), gfp_mask);
  416.                 if (!new)
  417.                         break;
  418.  
  419.                 /* link the new one to per-cpu preload list */
  420.                 new->ary[0] = idr_preload_head;
  421.                 idr_preload_head = new;
  422.                 idr_preload_cnt++;
  423.         }
  424. }
  425. EXPORT_SYMBOL(idr_preload);
  426.  
  427. /**
  428.  * idr_alloc - allocate new idr entry
  429.  * @idr: the (initialized) idr
  430.  * @ptr: pointer to be associated with the new id
  431.  * @start: the minimum id (inclusive)
  432.  * @end: the maximum id (exclusive, <= 0 for max)
  433.  * @gfp_mask: memory allocation flags
  434.  *
  435.  * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
  436.  * available in the specified range, returns -ENOSPC.  On memory allocation
  437.  * failure, returns -ENOMEM.
  438.  *
  439.  * Note that @end is treated as max when <= 0.  This is to always allow
  440.  * using @start + N as @end as long as N is inside integer range.
  441.  *
  442.  * The user is responsible for exclusively synchronizing all operations
  443.  * which may modify @idr.  However, read-only accesses such as idr_find()
  444.  * or iteration can be performed under RCU read lock provided the user
  445.  * destroys @ptr in RCU-safe way after removal from idr.
  446.  */
  447. int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
  448. {
  449.         int max = end > 0 ? end - 1 : INT_MAX;  /* inclusive upper limit */
  450.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  451.         int id;
  452.  
  453.         /* sanity checks */
  454.         if (WARN_ON_ONCE(start < 0))
  455.                 return -EINVAL;
  456.         if (unlikely(max < start))
  457.                 return -ENOSPC;
  458.  
  459.         /* allocate id */
  460.         id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
  461.         if (unlikely(id < 0))
  462.                 return id;
  463.         if (unlikely(id > max))
  464.                 return -ENOSPC;
  465.  
  466.         idr_fill_slot(idr, ptr, id, pa);
  467.         return id;
  468. }
  469. EXPORT_SYMBOL_GPL(idr_alloc);
  470.  
  471. /**
  472.  * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
  473.  * @idr: the (initialized) idr
  474.  * @ptr: pointer to be associated with the new id
  475.  * @start: the minimum id (inclusive)
  476.  * @end: the maximum id (exclusive, <= 0 for max)
  477.  * @gfp_mask: memory allocation flags
  478.  *
  479.  * Essentially the same as idr_alloc, but prefers to allocate progressively
  480.  * higher ids if it can. If the "cur" counter wraps, then it will start again
  481.  * at the "start" end of the range and allocate one that has already been used.
  482.  */
  483. int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
  484.                         gfp_t gfp_mask)
  485. {
  486.         int id;
  487.  
  488.         id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
  489.         if (id == -ENOSPC)
  490.                 id = idr_alloc(idr, ptr, start, end, gfp_mask);
  491.  
  492.         if (likely(id >= 0))
  493.                 idr->cur = id + 1;
  494.         return id;
  495. }
  496. EXPORT_SYMBOL(idr_alloc_cyclic);
  497.  
  498. static void idr_remove_warning(int id)
  499. {
  500.         WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
  501. }
  502.  
  503. static void sub_remove(struct idr *idp, int shift, int id)
  504. {
  505.         struct idr_layer *p = idp->top;
  506.         struct idr_layer **pa[MAX_IDR_LEVEL + 1];
  507.         struct idr_layer ***paa = &pa[0];
  508.         struct idr_layer *to_free;
  509.         int n;
  510.  
  511.         *paa = NULL;
  512.         *++paa = &idp->top;
  513.  
  514.         while ((shift > 0) && p) {
  515.                 n = (id >> shift) & IDR_MASK;
  516.                 __clear_bit(n, p->bitmap);
  517.                 *++paa = &p->ary[n];
  518.                 p = p->ary[n];
  519.                 shift -= IDR_BITS;
  520.         }
  521.         n = id & IDR_MASK;
  522.         if (likely(p != NULL && test_bit(n, p->bitmap))) {
  523.                 __clear_bit(n, p->bitmap);
  524.                 rcu_assign_pointer(p->ary[n], NULL);
  525.                 to_free = NULL;
  526.                 while(*paa && ! --((**paa)->count)){
  527.                         if (to_free)
  528.                                 free_layer(idp, to_free);
  529.                         to_free = **paa;
  530.                         **paa-- = NULL;
  531.                 }
  532.                 if (!*paa)
  533.                         idp->layers = 0;
  534.                 if (to_free)
  535.                         free_layer(idp, to_free);
  536.         } else
  537.                 idr_remove_warning(id);
  538. }
  539.  
  540. /**
  541.  * idr_remove - remove the given id and free its slot
  542.  * @idp: idr handle
  543.  * @id: unique key
  544.  */
  545. void idr_remove(struct idr *idp, int id)
  546. {
  547.         struct idr_layer *p;
  548.         struct idr_layer *to_free;
  549.  
  550.         if (id < 0)
  551.                 return;
  552.  
  553.         sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  554.         if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  555.             idp->top->ary[0]) {
  556.                 /*
  557.                  * Single child at leftmost slot: we can shrink the tree.
  558.                  * This level is not needed anymore since when layers are
  559.                  * inserted, they are inserted at the top of the existing
  560.                  * tree.
  561.                  */
  562.                 to_free = idp->top;
  563.                 p = idp->top->ary[0];
  564.                 rcu_assign_pointer(idp->top, p);
  565.                 --idp->layers;
  566.                 to_free->count = 0;
  567.                 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
  568.                 free_layer(idp, to_free);
  569.         }
  570.         while (idp->id_free_cnt >= MAX_IDR_FREE) {
  571.                 p = get_from_free_list(idp);
  572.                 /*
  573.                  * Note: we don't call the rcu callback here, since the only
  574.                  * layers that fall into the freelist are those that have been
  575.                  * preallocated.
  576.                  */
  577.         kfree(p);
  578.         }
  579.         return;
  580. }
  581. EXPORT_SYMBOL(idr_remove);
  582.  
  583. void __idr_remove_all(struct idr *idp)
  584. {
  585.         int n, id, max;
  586.         int bt_mask;
  587.         struct idr_layer *p;
  588.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  589.         struct idr_layer **paa = &pa[0];
  590.  
  591.         n = idp->layers * IDR_BITS;
  592.         p = idp->top;
  593.         rcu_assign_pointer(idp->top, NULL);
  594.         max = idr_max(idp->layers);
  595.  
  596.         id = 0;
  597.         while (id >= 0 && id <= max) {
  598.                 while (n > IDR_BITS && p) {
  599.                         n -= IDR_BITS;
  600.                         *paa++ = p;
  601.                         p = p->ary[(id >> n) & IDR_MASK];
  602.                 }
  603.  
  604.                 bt_mask = id;
  605.                 id += 1 << n;
  606.                 /* Get the highest bit that the above add changed from 0->1. */
  607.                 while (n < fls(id ^ bt_mask)) {
  608.                         if (p)
  609.                                 free_layer(idp, p);
  610.                         n += IDR_BITS;
  611.                         p = *--paa;
  612.                 }
  613.         }
  614.         idp->layers = 0;
  615. }
  616. EXPORT_SYMBOL(__idr_remove_all);
  617.  
  618. /**
  619.  * idr_destroy - release all cached layers within an idr tree
  620.  * @idp: idr handle
  621.  *
  622.  * Free all id mappings and all idp_layers.  After this function, @idp is
  623.  * completely unused and can be freed / recycled.  The caller is
  624.  * responsible for ensuring that no one else accesses @idp during or after
  625.  * idr_destroy().
  626.  *
  627.  * A typical clean-up sequence for objects stored in an idr tree will use
  628.  * idr_for_each() to free all objects, if necessay, then idr_destroy() to
  629.  * free up the id mappings and cached idr_layers.
  630.  */
  631. void idr_destroy(struct idr *idp)
  632. {
  633.         __idr_remove_all(idp);
  634.  
  635.         while (idp->id_free_cnt) {
  636.                 struct idr_layer *p = get_from_free_list(idp);
  637.         kfree(p);
  638.         }
  639. }
  640. EXPORT_SYMBOL(idr_destroy);
  641.  
  642. void *idr_find_slowpath(struct idr *idp, int id)
  643. {
  644.         int n;
  645.         struct idr_layer *p;
  646.  
  647.         if (id < 0)
  648.                 return NULL;
  649.  
  650.         p = rcu_dereference_raw(idp->top);
  651.         if (!p)
  652.                 return NULL;
  653.         n = (p->layer+1) * IDR_BITS;
  654.  
  655.         if (id > idr_max(p->layer + 1))
  656.                 return NULL;
  657.         BUG_ON(n == 0);
  658.  
  659.         while (n > 0 && p) {
  660.                 n -= IDR_BITS;
  661.                 BUG_ON(n != p->layer*IDR_BITS);
  662.                 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
  663.         }
  664.         return((void *)p);
  665. }
  666. EXPORT_SYMBOL(idr_find_slowpath);
  667.  
  668. /**
  669.  * idr_for_each - iterate through all stored pointers
  670.  * @idp: idr handle
  671.  * @fn: function to be called for each pointer
  672.  * @data: data passed back to callback function
  673.  *
  674.  * Iterate over the pointers registered with the given idr.  The
  675.  * callback function will be called for each pointer currently
  676.  * registered, passing the id, the pointer and the data pointer passed
  677.  * to this function.  It is not safe to modify the idr tree while in
  678.  * the callback, so functions such as idr_get_new and idr_remove are
  679.  * not allowed.
  680.  *
  681.  * We check the return of @fn each time. If it returns anything other
  682.  * than %0, we break out and return that value.
  683.  *
  684.  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
  685.  */
  686. int idr_for_each(struct idr *idp,
  687.                  int (*fn)(int id, void *p, void *data), void *data)
  688. {
  689.         int n, id, max, error = 0;
  690.         struct idr_layer *p;
  691.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  692.         struct idr_layer **paa = &pa[0];
  693.  
  694.         n = idp->layers * IDR_BITS;
  695.         p = rcu_dereference_raw(idp->top);
  696.         max = idr_max(idp->layers);
  697.  
  698.         id = 0;
  699.         while (id >= 0 && id <= max) {
  700.                 while (n > 0 && p) {
  701.                         n -= IDR_BITS;
  702.                         *paa++ = p;
  703.                         p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
  704.                 }
  705.  
  706.                 if (p) {
  707.                         error = fn(id, (void *)p, data);
  708.                         if (error)
  709.                                 break;
  710.                 }
  711.  
  712.                 id += 1 << n;
  713.                 while (n < fls(id)) {
  714.                         n += IDR_BITS;
  715.                         p = *--paa;
  716.                 }
  717.         }
  718.  
  719.         return error;
  720. }
  721. EXPORT_SYMBOL(idr_for_each);
  722.  
  723. /**
  724.  * idr_get_next - lookup next object of id to given id.
  725.  * @idp: idr handle
  726.  * @nextidp:  pointer to lookup key
  727.  *
  728.  * Returns pointer to registered object with id, which is next number to
  729.  * given id. After being looked up, *@nextidp will be updated for the next
  730.  * iteration.
  731.  *
  732.  * This function can be called under rcu_read_lock(), given that the leaf
  733.  * pointers lifetimes are correctly managed.
  734.  */
  735. void *idr_get_next(struct idr *idp, int *nextidp)
  736. {
  737.         struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
  738.         struct idr_layer **paa = &pa[0];
  739.         int id = *nextidp;
  740.         int n, max;
  741.  
  742.         /* find first ent */
  743.         p = rcu_dereference_raw(idp->top);
  744.         if (!p)
  745.                 return NULL;
  746.         n = (p->layer + 1) * IDR_BITS;
  747.         max = idr_max(p->layer + 1);
  748.  
  749.         while (id >= 0 && id <= max) {
  750.                 while (n > 0 && p) {
  751.                         n -= IDR_BITS;
  752.                         *paa++ = p;
  753.                         p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
  754.                 }
  755.  
  756.                 if (p) {
  757.                         *nextidp = id;
  758.                         return p;
  759.                 }
  760.  
  761.                 /*
  762.                  * Proceed to the next layer at the current level.  Unlike
  763.                  * idr_for_each(), @id isn't guaranteed to be aligned to
  764.                  * layer boundary at this point and adding 1 << n may
  765.                  * incorrectly skip IDs.  Make sure we jump to the
  766.                  * beginning of the next layer using round_up().
  767.                  */
  768.                 id = round_up(id + 1, 1 << n);
  769.                 while (n < fls(id)) {
  770.                         n += IDR_BITS;
  771.                         p = *--paa;
  772.                 }
  773.         }
  774.         return NULL;
  775. }
  776. EXPORT_SYMBOL(idr_get_next);
  777.  
  778.  
  779. /**
  780.  * idr_replace - replace pointer for given id
  781.  * @idp: idr handle
  782.  * @ptr: pointer you want associated with the id
  783.  * @id: lookup key
  784.  *
  785.  * Replace the pointer registered with an id and return the old value.
  786.  * A %-ENOENT return indicates that @id was not found.
  787.  * A %-EINVAL return indicates that @id was not within valid constraints.
  788.  *
  789.  * The caller must serialize with writers.
  790.  */
  791. void *idr_replace(struct idr *idp, void *ptr, int id)
  792. {
  793.         int n;
  794.         struct idr_layer *p, *old_p;
  795.  
  796.         if (id < 0)
  797.                 return ERR_PTR(-EINVAL);
  798.  
  799.         p = idp->top;
  800.         if (!p)
  801.                 return ERR_PTR(-EINVAL);
  802.  
  803.         n = (p->layer+1) * IDR_BITS;
  804.  
  805.         if (id >= (1 << n))
  806.                 return ERR_PTR(-EINVAL);
  807.  
  808.         n -= IDR_BITS;
  809.         while ((n > 0) && p) {
  810.                 p = p->ary[(id >> n) & IDR_MASK];
  811.                 n -= IDR_BITS;
  812.         }
  813.  
  814.         n = id & IDR_MASK;
  815.         if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
  816.                 return ERR_PTR(-ENOENT);
  817.  
  818.         old_p = p->ary[n];
  819.         rcu_assign_pointer(p->ary[n], ptr);
  820.  
  821.         return old_p;
  822. }
  823. EXPORT_SYMBOL(idr_replace);
  824.  
  825. void __init idr_init_cache(void)
  826. {
  827.     //idr_layer_cache = kmem_cache_create("idr_layer_cache",
  828.     //           sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
  829. }
  830.  
  831. /**
  832.  * idr_init - initialize idr handle
  833.  * @idp:        idr handle
  834.  *
  835.  * This function is use to set up the handle (@idp) that you will pass
  836.  * to the rest of the functions.
  837.  */
  838. void idr_init(struct idr *idp)
  839. {
  840.         memset(idp, 0, sizeof(struct idr));
  841.         spin_lock_init(&idp->lock);
  842. }
  843. EXPORT_SYMBOL(idr_init);
  844.  
  845.  
  846. /**
  847.  * DOC: IDA description
  848.  * IDA - IDR based ID allocator
  849.  *
  850.  * This is id allocator without id -> pointer translation.  Memory
  851.  * usage is much lower than full blown idr because each id only
  852.  * occupies a bit.  ida uses a custom leaf node which contains
  853.  * IDA_BITMAP_BITS slots.
  854.  *
  855.  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
  856.  */
  857.  
  858. static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
  859. {
  860.         unsigned long flags;
  861.  
  862.         if (!ida->free_bitmap) {
  863.                 spin_lock_irqsave(&ida->idr.lock, flags);
  864.                 if (!ida->free_bitmap) {
  865.                         ida->free_bitmap = bitmap;
  866.                         bitmap = NULL;
  867.                 }
  868.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  869.         }
  870.  
  871.         kfree(bitmap);
  872. }
  873.  
  874. /**
  875.  * ida_pre_get - reserve resources for ida allocation
  876.  * @ida:        ida handle
  877.  * @gfp_mask:   memory allocation flag
  878.  *
  879.  * This function should be called prior to locking and calling the
  880.  * following function.  It preallocates enough memory to satisfy the
  881.  * worst possible allocation.
  882.  *
  883.  * If the system is REALLY out of memory this function returns %0,
  884.  * otherwise %1.
  885.  */
  886. int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
  887. {
  888.         /* allocate idr_layers */
  889.         if (!__idr_pre_get(&ida->idr, gfp_mask))
  890.                 return 0;
  891.  
  892.         /* allocate free_bitmap */
  893.         if (!ida->free_bitmap) {
  894.                 struct ida_bitmap *bitmap;
  895.  
  896.                 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
  897.                 if (!bitmap)
  898.                         return 0;
  899.  
  900.                 free_bitmap(ida, bitmap);
  901.         }
  902.  
  903.         return 1;
  904. }
  905. EXPORT_SYMBOL(ida_pre_get);
  906.  
  907. /**
  908.  * ida_get_new_above - allocate new ID above or equal to a start id
  909.  * @ida:        ida handle
  910.  * @starting_id: id to start search at
  911.  * @p_id:       pointer to the allocated handle
  912.  *
  913.  * Allocate new ID above or equal to @starting_id.  It should be called
  914.  * with any required locks.
  915.  *
  916.  * If memory is required, it will return %-EAGAIN, you should unlock
  917.  * and go back to the ida_pre_get() call.  If the ida is full, it will
  918.  * return %-ENOSPC.
  919.  *
  920.  * @p_id returns a value in the range @starting_id ... %0x7fffffff.
  921.  */
  922. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
  923. {
  924.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  925.         struct ida_bitmap *bitmap;
  926.         unsigned long flags;
  927.         int idr_id = starting_id / IDA_BITMAP_BITS;
  928.         int offset = starting_id % IDA_BITMAP_BITS;
  929.         int t, id;
  930.  
  931.  restart:
  932.         /* get vacant slot */
  933.         t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
  934.         if (t < 0)
  935.                 return t == -ENOMEM ? -EAGAIN : t;
  936.  
  937.         if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
  938.                 return -ENOSPC;
  939.  
  940.         if (t != idr_id)
  941.                 offset = 0;
  942.         idr_id = t;
  943.  
  944.         /* if bitmap isn't there, create a new one */
  945.         bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
  946.         if (!bitmap) {
  947.                 spin_lock_irqsave(&ida->idr.lock, flags);
  948.                 bitmap = ida->free_bitmap;
  949.                 ida->free_bitmap = NULL;
  950.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  951.  
  952.                 if (!bitmap)
  953.                         return -EAGAIN;
  954.  
  955.                 memset(bitmap, 0, sizeof(struct ida_bitmap));
  956.                 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
  957.                                 (void *)bitmap);
  958.                 pa[0]->count++;
  959.         }
  960.  
  961.         /* lookup for empty slot */
  962.         t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
  963.         if (t == IDA_BITMAP_BITS) {
  964.                 /* no empty slot after offset, continue to the next chunk */
  965.                 idr_id++;
  966.                 offset = 0;
  967.                 goto restart;
  968.         }
  969.  
  970.         id = idr_id * IDA_BITMAP_BITS + t;
  971.         if (id >= MAX_IDR_BIT)
  972.                 return -ENOSPC;
  973.  
  974.         __set_bit(t, bitmap->bitmap);
  975.         if (++bitmap->nr_busy == IDA_BITMAP_BITS)
  976.                 idr_mark_full(pa, idr_id);
  977.  
  978.         *p_id = id;
  979.  
  980.         /* Each leaf node can handle nearly a thousand slots and the
  981.          * whole idea of ida is to have small memory foot print.
  982.          * Throw away extra resources one by one after each successful
  983.          * allocation.
  984.          */
  985.         if (ida->idr.id_free_cnt || ida->free_bitmap) {
  986.                 struct idr_layer *p = get_from_free_list(&ida->idr);
  987.                 if (p)
  988.                         kfree(p);
  989.         }
  990.  
  991.         return 0;
  992. }
  993. EXPORT_SYMBOL(ida_get_new_above);
  994.  
  995. /**
  996.  * ida_remove - remove the given ID
  997.  * @ida:        ida handle
  998.  * @id:         ID to free
  999.  */
  1000. void ida_remove(struct ida *ida, int id)
  1001. {
  1002.         struct idr_layer *p = ida->idr.top;
  1003.         int shift = (ida->idr.layers - 1) * IDR_BITS;
  1004.         int idr_id = id / IDA_BITMAP_BITS;
  1005.         int offset = id % IDA_BITMAP_BITS;
  1006.         int n;
  1007.         struct ida_bitmap *bitmap;
  1008.  
  1009.         /* clear full bits while looking up the leaf idr_layer */
  1010.         while ((shift > 0) && p) {
  1011.                 n = (idr_id >> shift) & IDR_MASK;
  1012.                 __clear_bit(n, p->bitmap);
  1013.                 p = p->ary[n];
  1014.                 shift -= IDR_BITS;
  1015.         }
  1016.  
  1017.         if (p == NULL)
  1018.                 goto err;
  1019.  
  1020.         n = idr_id & IDR_MASK;
  1021.         __clear_bit(n, p->bitmap);
  1022.  
  1023.         bitmap = (void *)p->ary[n];
  1024.         if (!test_bit(offset, bitmap->bitmap))
  1025.                 goto err;
  1026.  
  1027.         /* update bitmap and remove it if empty */
  1028.         __clear_bit(offset, bitmap->bitmap);
  1029.         if (--bitmap->nr_busy == 0) {
  1030.                 __set_bit(n, p->bitmap);        /* to please idr_remove() */
  1031.                 idr_remove(&ida->idr, idr_id);
  1032.                 free_bitmap(ida, bitmap);
  1033.         }
  1034.  
  1035.         return;
  1036.  
  1037.  err:
  1038.         WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
  1039. }
  1040. EXPORT_SYMBOL(ida_remove);
  1041.  
  1042. /**
  1043.  * ida_destroy - release all cached layers within an ida tree
  1044.  * @ida:                ida handle
  1045.  */
  1046. void ida_destroy(struct ida *ida)
  1047. {
  1048.         idr_destroy(&ida->idr);
  1049.         kfree(ida->free_bitmap);
  1050. }
  1051. EXPORT_SYMBOL(ida_destroy);
  1052.  
  1053. /**
  1054.  * ida_simple_get - get a new id.
  1055.  * @ida: the (initialized) ida.
  1056.  * @start: the minimum id (inclusive, < 0x8000000)
  1057.  * @end: the maximum id (exclusive, < 0x8000000 or 0)
  1058.  * @gfp_mask: memory allocation flags
  1059.  *
  1060.  * Allocates an id in the range start <= id < end, or returns -ENOSPC.
  1061.  * On memory allocation failure, returns -ENOMEM.
  1062.  *
  1063.  * Use ida_simple_remove() to get rid of an id.
  1064.  */
  1065. int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
  1066.                    gfp_t gfp_mask)
  1067. {
  1068.         int ret, id;
  1069.         unsigned int max;
  1070.         unsigned long flags;
  1071.  
  1072.         BUG_ON((int)start < 0);
  1073.         BUG_ON((int)end < 0);
  1074.  
  1075.         if (end == 0)
  1076.                 max = 0x80000000;
  1077.         else {
  1078.                 BUG_ON(end < start);
  1079.                 max = end - 1;
  1080.         }
  1081.  
  1082. again:
  1083.         if (!ida_pre_get(ida, gfp_mask))
  1084.                 return -ENOMEM;
  1085.  
  1086.         spin_lock_irqsave(&simple_ida_lock, flags);
  1087.         ret = ida_get_new_above(ida, start, &id);
  1088.         if (!ret) {
  1089.                 if (id > max) {
  1090.                         ida_remove(ida, id);
  1091.                         ret = -ENOSPC;
  1092.                 } else {
  1093.                         ret = id;
  1094.                 }
  1095.         }
  1096.         spin_unlock_irqrestore(&simple_ida_lock, flags);
  1097.  
  1098.         if (unlikely(ret == -EAGAIN))
  1099.                 goto again;
  1100.  
  1101.         return ret;
  1102. }
  1103. EXPORT_SYMBOL(ida_simple_get);
  1104.  
  1105. /**
  1106.  * ida_simple_remove - remove an allocated id.
  1107.  * @ida: the (initialized) ida.
  1108.  * @id: the id returned by ida_simple_get.
  1109.  */
  1110. void ida_simple_remove(struct ida *ida, unsigned int id)
  1111. {
  1112.         unsigned long flags;
  1113.  
  1114.         BUG_ON((int)id < 0);
  1115.         spin_lock_irqsave(&simple_ida_lock, flags);
  1116.         ida_remove(ida, id);
  1117.         spin_unlock_irqrestore(&simple_ida_lock, flags);
  1118. }
  1119. EXPORT_SYMBOL(ida_simple_remove);
  1120.  
  1121. /**
  1122.  * ida_init - initialize ida handle
  1123.  * @ida:        ida handle
  1124.  *
  1125.  * This function is use to set up the handle (@ida) that you will pass
  1126.  * to the rest of the functions.
  1127.  */
  1128. void ida_init(struct ida *ida)
  1129. {
  1130.         memset(ida, 0, sizeof(struct ida));
  1131.         idr_init(&ida->idr);
  1132.  
  1133. }
  1134. EXPORT_SYMBOL(ida_init);
  1135.  
  1136.  
  1137.  
  1138. unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
  1139. {
  1140.         const unsigned long *p = addr;
  1141.         unsigned long result = 0;
  1142.         unsigned long tmp;
  1143.  
  1144.         while (size & ~(BITS_PER_LONG-1)) {
  1145.                 if ((tmp = *(p++)))
  1146.                         goto found;
  1147.                 result += BITS_PER_LONG;
  1148.                 size -= BITS_PER_LONG;
  1149.         }
  1150.         if (!size)
  1151.                 return result;
  1152.  
  1153.         tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
  1154.         if (tmp == 0UL)         /* Are any bits set? */
  1155.                 return result + size;   /* Nope. */
  1156. found:
  1157.         return result + __ffs(tmp);
  1158. }
  1159.  
  1160. unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
  1161.                             unsigned long offset)
  1162. {
  1163.         const unsigned long *p = addr + BITOP_WORD(offset);
  1164.         unsigned long result = offset & ~(BITS_PER_LONG-1);
  1165.         unsigned long tmp;
  1166.  
  1167.         if (offset >= size)
  1168.                 return size;
  1169.         size -= result;
  1170.         offset %= BITS_PER_LONG;
  1171.         if (offset) {
  1172.                 tmp = *(p++);
  1173.                 tmp &= (~0UL << offset);
  1174.                 if (size < BITS_PER_LONG)
  1175.                         goto found_first;
  1176.                 if (tmp)
  1177.                         goto found_middle;
  1178.                 size -= BITS_PER_LONG;
  1179.                 result += BITS_PER_LONG;
  1180.         }
  1181.         while (size & ~(BITS_PER_LONG-1)) {
  1182.                 if ((tmp = *(p++)))
  1183.                         goto found_middle;
  1184.                 result += BITS_PER_LONG;
  1185.                 size -= BITS_PER_LONG;
  1186.         }
  1187.         if (!size)
  1188.                 return result;
  1189.         tmp = *p;
  1190.  
  1191. found_first:
  1192.         tmp &= (~0UL >> (BITS_PER_LONG - size));
  1193.         if (tmp == 0UL)         /* Are any bits set? */
  1194.                 return result + size;   /* Nope. */
  1195. found_middle:
  1196.         return result + __ffs(tmp);
  1197. }
  1198.  
  1199. unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
  1200.                                  unsigned long offset)
  1201. {
  1202.         const unsigned long *p = addr + BITOP_WORD(offset);
  1203.         unsigned long result = offset & ~(BITS_PER_LONG-1);
  1204.         unsigned long tmp;
  1205.  
  1206.         if (offset >= size)
  1207.                 return size;
  1208.         size -= result;
  1209.         offset %= BITS_PER_LONG;
  1210.         if (offset) {
  1211.                 tmp = *(p++);
  1212.                 tmp |= ~0UL >> (BITS_PER_LONG - offset);
  1213.                 if (size < BITS_PER_LONG)
  1214.                         goto found_first;
  1215.                 if (~tmp)
  1216.                         goto found_middle;
  1217.                 size -= BITS_PER_LONG;
  1218.                 result += BITS_PER_LONG;
  1219.         }
  1220.         while (size & ~(BITS_PER_LONG-1)) {
  1221.                 if (~(tmp = *(p++)))
  1222.                         goto found_middle;
  1223.                 result += BITS_PER_LONG;
  1224.                 size -= BITS_PER_LONG;
  1225.         }
  1226.         if (!size)
  1227.                 return result;
  1228.         tmp = *p;
  1229.  
  1230. found_first:
  1231.         tmp |= ~0UL << size;
  1232.         if (tmp == ~0UL)        /* Are any bits zero? */
  1233.                 return result + size;   /* Nope. */
  1234. found_middle:
  1235.         return result + ffz(tmp);
  1236. }
  1237.  
  1238. unsigned int hweight32(unsigned int w)
  1239. {
  1240.         unsigned int res = w - ((w >> 1) & 0x55555555);
  1241.         res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
  1242.         res = (res + (res >> 4)) & 0x0F0F0F0F;
  1243.         res = res + (res >> 8);
  1244.         return (res + (res >> 16)) & 0x000000FF;
  1245. }
  1246.  
  1247.