Subversion Repositories Kolibri OS

Rev

Rev 2966 | Rev 4065 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
  3.  *      Copyright (C) 2002 by Concurrent Computer Corporation
  4.  *      Distributed under the GNU GPL license version 2.
  5.  *
  6.  * Modified by George Anzinger to reuse immediately and to use
  7.  * find bit instructions.  Also removed _irq on spinlocks.
  8.  *
  9.  * Modified by Nadia Derbey to make it RCU safe.
  10.  *
  11.  * Small id to pointer translation service.
  12.  *
  13.  * It uses a radix tree like structure as a sparse array indexed
  14.  * by the id to obtain the pointer.  The bitmap makes allocating
  15.  * a new id quick.
  16.  *
  17.  * You call it to allocate an id (an int) an associate with that id a
  18.  * pointer or what ever, we treat it as a (void *).  You can pass this
  19.  * id to a user for him to pass back at a later time.  You then pass
  20.  * that id to this code and it returns your pointer.
  21.  
  22.  * You can release ids at any time. When all ids are released, most of
  23.  * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
  24.  * don't need to go to the memory "store" during an id allocate, just
  25.  * so you don't need to be too concerned about locking and conflicts
  26.  * with the slab allocator.
  27.  */
  28.  
  29. #include <linux/kernel.h>
  30. #include <linux/export.h>
  31. #include <linux/string.h>
  32. #include <linux/bitops.h>
  33. #include <linux/idr.h>
  34. //#include <stdlib.h>
  35.  
  36. unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
  37.                                  unsigned long offset);
  38.  
  39.  
  40. #define MAX_IDR_SHIFT           (sizeof(int) * 8 - 1)
  41. #define MAX_IDR_BIT             (1U << MAX_IDR_SHIFT)
  42.  
  43. /* Leave the possibility of an incomplete final layer */
  44. #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
  45.  
  46. /* Number of id_layer structs to leave in free list */
  47. #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
  48.  
  49. static struct idr_layer *idr_preload_head;
  50. static int idr_preload_cnt;
  51.  
  52.  
  53. /* the maximum ID which can be allocated given idr->layers */
  54. static int idr_max(int layers)
  55. {
  56.         int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
  57.  
  58.         return (1 << bits) - 1;
  59. }
  60.  
  61. /*
  62.  * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
  63.  * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
  64.  * so on.
  65.  */
  66. static int idr_layer_prefix_mask(int layer)
  67. {
  68.         return ~idr_max(layer + 1);
  69. }
  70.  
  71. static struct idr_layer *get_from_free_list(struct idr *idp)
  72. {
  73.         struct idr_layer *p;
  74.         unsigned long flags;
  75.  
  76.         spin_lock_irqsave(&idp->lock, flags);
  77.         if ((p = idp->id_free)) {
  78.                 idp->id_free = p->ary[0];
  79.                 idp->id_free_cnt--;
  80.                 p->ary[0] = NULL;
  81.         }
  82.         spin_unlock_irqrestore(&idp->lock, flags);
  83.         return(p);
  84. }
  85.  
  86. /**
  87.  * idr_layer_alloc - allocate a new idr_layer
  88.  * @gfp_mask: allocation mask
  89.  * @layer_idr: optional idr to allocate from
  90.  *
  91.  * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
  92.  * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
  93.  * an idr_layer from @idr->id_free.
  94.  *
  95.  * @layer_idr is to maintain backward compatibility with the old alloc
  96.  * interface - idr_pre_get() and idr_get_new*() - and will be removed
  97.  * together with per-pool preload buffer.
  98.  */
  99. static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
  100. {
  101.         struct idr_layer *new;
  102.  
  103.         /* this is the old path, bypass to get_from_free_list() */
  104.         if (layer_idr)
  105.                 return get_from_free_list(layer_idr);
  106.  
  107.         /* try to allocate directly from kmem_cache */
  108.         new = kzalloc(sizeof(struct idr_layer), gfp_mask);
  109.         if (new)
  110.                 return new;
  111.  
  112.  
  113.         new = idr_preload_head;
  114.         if (new) {
  115.                 idr_preload_head = new->ary[0];
  116.                 idr_preload_cnt--;
  117.                 new->ary[0] = NULL;
  118.         }
  119.         preempt_enable();
  120.         return new;
  121. }
  122.  
  123. static void idr_layer_rcu_free(struct rcu_head *head)
  124. {
  125.         struct idr_layer *layer;
  126.  
  127.     layer = container_of(head, struct idr_layer, rcu_head);
  128.     kfree(layer);
  129. }
  130.  
  131. static inline void free_layer(struct idr *idr, struct idr_layer *p)
  132. {
  133.         if (idr->hint && idr->hint == p)
  134.                 RCU_INIT_POINTER(idr->hint, NULL);
  135.     idr_layer_rcu_free(&p->rcu_head);
  136. }
  137.  
  138. /* only called when idp->lock is held */
  139. static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
  140. {
  141.         p->ary[0] = idp->id_free;
  142.         idp->id_free = p;
  143.         idp->id_free_cnt++;
  144. }
  145.  
  146. static void move_to_free_list(struct idr *idp, struct idr_layer *p)
  147. {
  148.         unsigned long flags;
  149.  
  150.         /*
  151.          * Depends on the return element being zeroed.
  152.          */
  153.         spin_lock_irqsave(&idp->lock, flags);
  154.         __move_to_free_list(idp, p);
  155.         spin_unlock_irqrestore(&idp->lock, flags);
  156. }
  157.  
  158. static void idr_mark_full(struct idr_layer **pa, int id)
  159. {
  160.         struct idr_layer *p = pa[0];
  161.         int l = 0;
  162.  
  163.         __set_bit(id & IDR_MASK, p->bitmap);
  164.         /*
  165.          * If this layer is full mark the bit in the layer above to
  166.          * show that this part of the radix tree is full.  This may
  167.          * complete the layer above and require walking up the radix
  168.          * tree.
  169.          */
  170.         while (bitmap_full(p->bitmap, IDR_SIZE)) {
  171.                 if (!(p = pa[++l]))
  172.                         break;
  173.                 id = id >> IDR_BITS;
  174.                 __set_bit((id & IDR_MASK), p->bitmap);
  175.         }
  176. }
  177.  
  178. /**
  179.  * idr_pre_get - reserve resources for idr allocation
  180.  * @idp:        idr handle
  181.  * @gfp_mask:   memory allocation flags
  182.  *
  183.  * This function should be called prior to calling the idr_get_new* functions.
  184.  * It preallocates enough memory to satisfy the worst possible allocation. The
  185.  * caller should pass in GFP_KERNEL if possible.  This of course requires that
  186.  * no spinning locks be held.
  187.  *
  188.  * If the system is REALLY out of memory this function returns %0,
  189.  * otherwise %1.
  190.  */
  191. int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  192. {
  193.         while (idp->id_free_cnt < MAX_IDR_FREE) {
  194.        struct idr_layer *new;
  195.        new = kzalloc(sizeof(struct idr_layer), gfp_mask);
  196.        if (new == NULL)
  197.            return (0);
  198.        move_to_free_list(idp, new);
  199.    }
  200.    return 1;
  201. }
  202. EXPORT_SYMBOL(idr_pre_get);
  203.  
  204. /**
  205.  * sub_alloc - try to allocate an id without growing the tree depth
  206.  * @idp: idr handle
  207.  * @starting_id: id to start search at
  208.  * @id: pointer to the allocated handle
  209.  * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
  210.  * @gfp_mask: allocation mask for idr_layer_alloc()
  211.  * @layer_idr: optional idr passed to idr_layer_alloc()
  212.  *
  213.  * Allocate an id in range [@starting_id, INT_MAX] from @idp without
  214.  * growing its depth.  Returns
  215.  *
  216.  *  the allocated id >= 0 if successful,
  217.  *  -EAGAIN if the tree needs to grow for allocation to succeed,
  218.  *  -ENOSPC if the id space is exhausted,
  219.  *  -ENOMEM if more idr_layers need to be allocated.
  220.  */
  221. static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
  222.                      gfp_t gfp_mask, struct idr *layer_idr)
  223. {
  224.         int n, m, sh;
  225.         struct idr_layer *p, *new;
  226.         int l, id, oid;
  227.  
  228.         id = *starting_id;
  229.  restart:
  230.         p = idp->top;
  231.         l = idp->layers;
  232.         pa[l--] = NULL;
  233.         while (1) {
  234.                 /*
  235.                  * We run around this while until we reach the leaf node...
  236.                  */
  237.                 n = (id >> (IDR_BITS*l)) & IDR_MASK;
  238.                 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
  239.                 if (m == IDR_SIZE) {
  240.                         /* no space available go back to previous layer. */
  241.                         l++;
  242.                         oid = id;
  243.                         id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  244.  
  245.                         /* if already at the top layer, we need to grow */
  246.                         if (id >= 1 << (idp->layers * IDR_BITS)) {
  247.                                 *starting_id = id;
  248.                                 return -EAGAIN;
  249.                         }
  250.                         p = pa[l];
  251.                         BUG_ON(!p);
  252.  
  253.                         /* If we need to go up one layer, continue the
  254.                          * loop; otherwise, restart from the top.
  255.                          */
  256.                         sh = IDR_BITS * (l + 1);
  257.                         if (oid >> sh == id >> sh)
  258.                                 continue;
  259.                         else
  260.                                 goto restart;
  261.                 }
  262.                 if (m != n) {
  263.                         sh = IDR_BITS*l;
  264.                         id = ((id >> sh) ^ n ^ m) << sh;
  265.                 }
  266.                 if ((id >= MAX_IDR_BIT) || (id < 0))
  267.                         return -ENOSPC;
  268.                 if (l == 0)
  269.                         break;
  270.                 /*
  271.                  * Create the layer below if it is missing.
  272.                  */
  273.                 if (!p->ary[m]) {
  274.                         new = idr_layer_alloc(gfp_mask, layer_idr);
  275.                         if (!new)
  276.                                 return -ENOMEM;
  277.                         new->layer = l-1;
  278.                         new->prefix = id & idr_layer_prefix_mask(new->layer);
  279.                         rcu_assign_pointer(p->ary[m], new);
  280.                         p->count++;
  281.                 }
  282.                 pa[l--] = p;
  283.                 p = p->ary[m];
  284.         }
  285.  
  286.         pa[l] = p;
  287.         return id;
  288. }
  289.  
  290. static int idr_get_empty_slot(struct idr *idp, int starting_id,
  291.                               struct idr_layer **pa, gfp_t gfp_mask,
  292.                               struct idr *layer_idr)
  293. {
  294.         struct idr_layer *p, *new;
  295.         int layers, v, id;
  296.         unsigned long flags;
  297.  
  298.         id = starting_id;
  299. build_up:
  300.         p = idp->top;
  301.         layers = idp->layers;
  302.         if (unlikely(!p)) {
  303.                 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
  304.                         return -ENOMEM;
  305.                 p->layer = 0;
  306.                 layers = 1;
  307.         }
  308.         /*
  309.          * Add a new layer to the top of the tree if the requested
  310.          * id is larger than the currently allocated space.
  311.          */
  312.         while (id > idr_max(layers)) {
  313.                 layers++;
  314.                 if (!p->count) {
  315.                         /* special case: if the tree is currently empty,
  316.                          * then we grow the tree by moving the top node
  317.                          * upwards.
  318.                          */
  319.                         p->layer++;
  320.                         WARN_ON_ONCE(p->prefix);
  321.                         continue;
  322.                 }
  323.                 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
  324.                         /*
  325.                          * The allocation failed.  If we built part of
  326.                          * the structure tear it down.
  327.                          */
  328.                         spin_lock_irqsave(&idp->lock, flags);
  329.                         for (new = p; p && p != idp->top; new = p) {
  330.                                 p = p->ary[0];
  331.                                 new->ary[0] = NULL;
  332.                                 new->count = 0;
  333.                                 bitmap_clear(new->bitmap, 0, IDR_SIZE);
  334.                                 __move_to_free_list(idp, new);
  335.                         }
  336.                         spin_unlock_irqrestore(&idp->lock, flags);
  337.                         return -ENOMEM;
  338.                 }
  339.                 new->ary[0] = p;
  340.                 new->count = 1;
  341.                 new->layer = layers-1;
  342.                 new->prefix = id & idr_layer_prefix_mask(new->layer);
  343.                 if (bitmap_full(p->bitmap, IDR_SIZE))
  344.                         __set_bit(0, new->bitmap);
  345.                 p = new;
  346.         }
  347.         rcu_assign_pointer(idp->top, p);
  348.         idp->layers = layers;
  349.         v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
  350.         if (v == -EAGAIN)
  351.                 goto build_up;
  352.         return(v);
  353. }
  354.  
  355. /*
  356.  * @id and @pa are from a successful allocation from idr_get_empty_slot().
  357.  * Install the user pointer @ptr and mark the slot full.
  358.  */
  359. static void idr_fill_slot(struct idr *idr, void *ptr, int id,
  360.                           struct idr_layer **pa)
  361. {
  362.         /* update hint used for lookup, cleared from free_layer() */
  363.         rcu_assign_pointer(idr->hint, pa[0]);
  364.  
  365.         rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
  366.                 pa[0]->count++;
  367.                 idr_mark_full(pa, id);
  368. }
  369.  
  370. /**
  371.  * idr_get_new_above - allocate new idr entry above or equal to a start id
  372.  * @idp: idr handle
  373.  * @ptr: pointer you want associated with the id
  374.  * @starting_id: id to start search at
  375.  * @id: pointer to the allocated handle
  376.  *
  377.  * This is the allocate id function.  It should be called with any
  378.  * required locks.
  379.  *
  380.  * If allocation from IDR's private freelist fails, idr_get_new_above() will
  381.  * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill
  382.  * IDR's preallocation and then retry the idr_get_new_above() call.
  383.  *
  384.  * If the idr is full idr_get_new_above() will return %-ENOSPC.
  385.  *
  386.  * @id returns a value in the range @starting_id ... %0x7fffffff
  387.  */
  388. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  389. {
  390.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  391.         int rv;
  392.  
  393.         rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
  394.         if (rv < 0)
  395.                 return rv == -ENOMEM ? -EAGAIN : rv;
  396.  
  397.         idr_fill_slot(idp, ptr, rv, pa);
  398.         *id = rv;
  399.     return 0;
  400. }
  401. EXPORT_SYMBOL(idr_get_new_above);
  402.  
  403. /**
  404.  * idr_preload - preload for idr_alloc()
  405.  * @gfp_mask: allocation mask to use for preloading
  406.  *
  407.  * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
  408.  * process context and each idr_preload() invocation should be matched with
  409.  * idr_preload_end().  Note that preemption is disabled while preloaded.
  410.  *
  411.  * The first idr_alloc() in the preloaded section can be treated as if it
  412.  * were invoked with @gfp_mask used for preloading.  This allows using more
  413.  * permissive allocation masks for idrs protected by spinlocks.
  414.  *
  415.  * For example, if idr_alloc() below fails, the failure can be treated as
  416.  * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
  417.  *
  418.  *      idr_preload(GFP_KERNEL);
  419.  *      spin_lock(lock);
  420.  *
  421.  *      id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
  422.  *
  423.  *      spin_unlock(lock);
  424.  *      idr_preload_end();
  425.  *      if (id < 0)
  426.  *              error;
  427.  */
  428. void idr_preload(gfp_t gfp_mask)
  429. {
  430.  
  431.         /*
  432.          * idr_alloc() is likely to succeed w/o full idr_layer buffer and
  433.          * return value from idr_alloc() needs to be checked for failure
  434.          * anyway.  Silently give up if allocation fails.  The caller can
  435.          * treat failures from idr_alloc() as if idr_alloc() were called
  436.          * with @gfp_mask which should be enough.
  437.          */
  438.         while (idr_preload_cnt < MAX_IDR_FREE) {
  439.                 struct idr_layer *new;
  440.  
  441.                 new = kzalloc(sizeof(struct idr_layer), gfp_mask);
  442.                 if (!new)
  443.                         break;
  444.  
  445.                 /* link the new one to per-cpu preload list */
  446.                 new->ary[0] = idr_preload_head;
  447.                 idr_preload_head = new;
  448.                 idr_preload_cnt++;
  449.         }
  450. }
  451. EXPORT_SYMBOL(idr_preload);
  452.  
  453. /**
  454.  * idr_alloc - allocate new idr entry
  455.  * @idr: the (initialized) idr
  456.  * @ptr: pointer to be associated with the new id
  457.  * @start: the minimum id (inclusive)
  458.  * @end: the maximum id (exclusive, <= 0 for max)
  459.  * @gfp_mask: memory allocation flags
  460.  *
  461.  * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
  462.  * available in the specified range, returns -ENOSPC.  On memory allocation
  463.  * failure, returns -ENOMEM.
  464.  *
  465.  * Note that @end is treated as max when <= 0.  This is to always allow
  466.  * using @start + N as @end as long as N is inside integer range.
  467.  *
  468.  * The user is responsible for exclusively synchronizing all operations
  469.  * which may modify @idr.  However, read-only accesses such as idr_find()
  470.  * or iteration can be performed under RCU read lock provided the user
  471.  * destroys @ptr in RCU-safe way after removal from idr.
  472.  */
  473. int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
  474. {
  475.         int max = end > 0 ? end - 1 : INT_MAX;  /* inclusive upper limit */
  476.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  477.         int id;
  478.  
  479.         /* sanity checks */
  480.         if (WARN_ON_ONCE(start < 0))
  481.                 return -EINVAL;
  482.         if (unlikely(max < start))
  483.                 return -ENOSPC;
  484.  
  485.         /* allocate id */
  486.         id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
  487.         if (unlikely(id < 0))
  488.                 return id;
  489.         if (unlikely(id > max))
  490.                 return -ENOSPC;
  491.  
  492.         idr_fill_slot(idr, ptr, id, pa);
  493.         return id;
  494. }
  495. EXPORT_SYMBOL_GPL(idr_alloc);
  496.  
  497. static void idr_remove_warning(int id)
  498. {
  499.         printk(KERN_WARNING
  500.                 "idr_remove called for id=%d which is not allocated.\n", id);
  501. //   dump_stack();
  502. }
  503.  
  504. static void sub_remove(struct idr *idp, int shift, int id)
  505. {
  506.         struct idr_layer *p = idp->top;
  507.         struct idr_layer **pa[MAX_IDR_LEVEL + 1];
  508.         struct idr_layer ***paa = &pa[0];
  509.         struct idr_layer *to_free;
  510.         int n;
  511.  
  512.         *paa = NULL;
  513.         *++paa = &idp->top;
  514.  
  515.         while ((shift > 0) && p) {
  516.                 n = (id >> shift) & IDR_MASK;
  517.                 __clear_bit(n, p->bitmap);
  518.                 *++paa = &p->ary[n];
  519.                 p = p->ary[n];
  520.                 shift -= IDR_BITS;
  521.         }
  522.         n = id & IDR_MASK;
  523.         if (likely(p != NULL && test_bit(n, p->bitmap))) {
  524.                 __clear_bit(n, p->bitmap);
  525.                 rcu_assign_pointer(p->ary[n], NULL);
  526.                 to_free = NULL;
  527.                 while(*paa && ! --((**paa)->count)){
  528.                         if (to_free)
  529.                                 free_layer(idp, to_free);
  530.                         to_free = **paa;
  531.                         **paa-- = NULL;
  532.                 }
  533.                 if (!*paa)
  534.                         idp->layers = 0;
  535.                 if (to_free)
  536.                         free_layer(idp, to_free);
  537.         } else
  538.                 idr_remove_warning(id);
  539. }
  540.  
  541. /**
  542.  * idr_remove - remove the given id and free its slot
  543.  * @idp: idr handle
  544.  * @id: unique key
  545.  */
  546. void idr_remove(struct idr *idp, int id)
  547. {
  548.         struct idr_layer *p;
  549.         struct idr_layer *to_free;
  550.  
  551.         /* see comment in idr_find_slowpath() */
  552.         if (WARN_ON_ONCE(id < 0))
  553.                 return;
  554.  
  555.         sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  556.         if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  557.             idp->top->ary[0]) {
  558.                 /*
  559.                  * Single child at leftmost slot: we can shrink the tree.
  560.                  * This level is not needed anymore since when layers are
  561.                  * inserted, they are inserted at the top of the existing
  562.                  * tree.
  563.                  */
  564.                 to_free = idp->top;
  565.                 p = idp->top->ary[0];
  566.                 rcu_assign_pointer(idp->top, p);
  567.                 --idp->layers;
  568.                 to_free->count = 0;
  569.                 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
  570.                 free_layer(idp, to_free);
  571.         }
  572.         while (idp->id_free_cnt >= MAX_IDR_FREE) {
  573.                 p = get_from_free_list(idp);
  574.                 /*
  575.                  * Note: we don't call the rcu callback here, since the only
  576.                  * layers that fall into the freelist are those that have been
  577.                  * preallocated.
  578.                  */
  579.         kfree(p);
  580.         }
  581.         return;
  582. }
  583. EXPORT_SYMBOL(idr_remove);
  584.  
  585. void __idr_remove_all(struct idr *idp)
  586. {
  587.         int n, id, max;
  588.         int bt_mask;
  589.         struct idr_layer *p;
  590.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  591.         struct idr_layer **paa = &pa[0];
  592.  
  593.         n = idp->layers * IDR_BITS;
  594.         p = idp->top;
  595.         rcu_assign_pointer(idp->top, NULL);
  596.         max = idr_max(idp->layers);
  597.  
  598.         id = 0;
  599.         while (id >= 0 && id <= max) {
  600.                 while (n > IDR_BITS && p) {
  601.                         n -= IDR_BITS;
  602.                         *paa++ = p;
  603.                         p = p->ary[(id >> n) & IDR_MASK];
  604.                 }
  605.  
  606.                 bt_mask = id;
  607.                 id += 1 << n;
  608.                 /* Get the highest bit that the above add changed from 0->1. */
  609.                 while (n < fls(id ^ bt_mask)) {
  610.                         if (p)
  611.                                 free_layer(idp, p);
  612.                         n += IDR_BITS;
  613.                         p = *--paa;
  614.                 }
  615.         }
  616.         idp->layers = 0;
  617. }
  618. EXPORT_SYMBOL(__idr_remove_all);
  619.  
  620. /**
  621.  * idr_destroy - release all cached layers within an idr tree
  622.  * @idp: idr handle
  623.  *
  624.  * Free all id mappings and all idp_layers.  After this function, @idp is
  625.  * completely unused and can be freed / recycled.  The caller is
  626.  * responsible for ensuring that no one else accesses @idp during or after
  627.  * idr_destroy().
  628.  *
  629.  * A typical clean-up sequence for objects stored in an idr tree will use
  630.  * idr_for_each() to free all objects, if necessay, then idr_destroy() to
  631.  * free up the id mappings and cached idr_layers.
  632.  */
  633. void idr_destroy(struct idr *idp)
  634. {
  635.         __idr_remove_all(idp);
  636.  
  637.         while (idp->id_free_cnt) {
  638.                 struct idr_layer *p = get_from_free_list(idp);
  639.         kfree(p);
  640.         }
  641. }
  642. EXPORT_SYMBOL(idr_destroy);
  643.  
  644. void *idr_find_slowpath(struct idr *idp, int id)
  645. {
  646.         int n;
  647.         struct idr_layer *p;
  648.  
  649.         /*
  650.          * If @id is negative, idr_find() used to ignore the sign bit and
  651.          * performed lookup with the rest of bits, which is weird and can
  652.          * lead to very obscure bugs.  We're now returning NULL for all
  653.          * negative IDs but just in case somebody was depending on the sign
  654.          * bit being ignored, let's trigger WARN_ON_ONCE() so that they can
  655.          * be detected and fixed.  WARN_ON_ONCE() can later be removed.
  656.          */
  657.         if (WARN_ON_ONCE(id < 0))
  658.                 return NULL;
  659.  
  660.         p = rcu_dereference_raw(idp->top);
  661.         if (!p)
  662.                 return NULL;
  663.         n = (p->layer+1) * IDR_BITS;
  664.  
  665.         if (id > idr_max(p->layer + 1))
  666.                 return NULL;
  667.         BUG_ON(n == 0);
  668.  
  669.         while (n > 0 && p) {
  670.                 n -= IDR_BITS;
  671.                 BUG_ON(n != p->layer*IDR_BITS);
  672.                 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
  673.         }
  674.         return((void *)p);
  675. }
  676. EXPORT_SYMBOL(idr_find_slowpath);
  677.  
  678. #if 0
  679. /**
  680.  * idr_for_each - iterate through all stored pointers
  681.  * @idp: idr handle
  682.  * @fn: function to be called for each pointer
  683.  * @data: data passed back to callback function
  684.  *
  685.  * Iterate over the pointers registered with the given idr.  The
  686.  * callback function will be called for each pointer currently
  687.  * registered, passing the id, the pointer and the data pointer passed
  688.  * to this function.  It is not safe to modify the idr tree while in
  689.  * the callback, so functions such as idr_get_new and idr_remove are
  690.  * not allowed.
  691.  *
  692.  * We check the return of @fn each time. If it returns anything other
  693.  * than %0, we break out and return that value.
  694.  *
  695.  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
  696.  */
  697. int idr_for_each(struct idr *idp,
  698.                  int (*fn)(int id, void *p, void *data), void *data)
  699. {
  700.         int n, id, max, error = 0;
  701.         struct idr_layer *p;
  702.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  703.         struct idr_layer **paa = &pa[0];
  704.  
  705.         n = idp->layers * IDR_BITS;
  706.         p = rcu_dereference_raw(idp->top);
  707.         max = idr_max(idp->layers);
  708.  
  709.         id = 0;
  710.         while (id >= 0 && id <= max) {
  711.                 while (n > 0 && p) {
  712.                         n -= IDR_BITS;
  713.                         *paa++ = p;
  714.                         p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
  715.                 }
  716.  
  717.                 if (p) {
  718.                         error = fn(id, (void *)p, data);
  719.                         if (error)
  720.                                 break;
  721.                 }
  722.  
  723.                 id += 1 << n;
  724.                 while (n < fls(id)) {
  725.                         n += IDR_BITS;
  726.                         p = *--paa;
  727.                 }
  728.         }
  729.  
  730.         return error;
  731. }
  732. EXPORT_SYMBOL(idr_for_each);
  733.  
  734. /**
  735.  * idr_get_next - lookup next object of id to given id.
  736.  * @idp: idr handle
  737.  * @nextidp:  pointer to lookup key
  738.  *
  739.  * Returns pointer to registered object with id, which is next number to
  740.  * given id. After being looked up, *@nextidp will be updated for the next
  741.  * iteration.
  742.  *
  743.  * This function can be called under rcu_read_lock(), given that the leaf
  744.  * pointers lifetimes are correctly managed.
  745.  */
  746. void *idr_get_next(struct idr *idp, int *nextidp)
  747. {
  748.         struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
  749.         struct idr_layer **paa = &pa[0];
  750.         int id = *nextidp;
  751.         int n, max;
  752.  
  753.         /* find first ent */
  754.         p = rcu_dereference_raw(idp->top);
  755.         if (!p)
  756.                 return NULL;
  757.         n = (p->layer + 1) * IDR_BITS;
  758.         max = idr_max(p->layer + 1);
  759.  
  760.         while (id >= 0 && id <= max) {
  761.                 while (n > 0 && p) {
  762.                         n -= IDR_BITS;
  763.                         *paa++ = p;
  764.                         p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
  765.                 }
  766.  
  767.                 if (p) {
  768.                         *nextidp = id;
  769.                         return p;
  770.                 }
  771.  
  772.                 /*
  773.                  * Proceed to the next layer at the current level.  Unlike
  774.                  * idr_for_each(), @id isn't guaranteed to be aligned to
  775.                  * layer boundary at this point and adding 1 << n may
  776.                  * incorrectly skip IDs.  Make sure we jump to the
  777.                  * beginning of the next layer using round_up().
  778.                  */
  779.                 id = round_up(id + 1, 1 << n);
  780.                 while (n < fls(id)) {
  781.                         n += IDR_BITS;
  782.                         p = *--paa;
  783.                 }
  784.         }
  785.         return NULL;
  786. }
  787. EXPORT_SYMBOL(idr_get_next);
  788.  
  789.  
  790. /**
  791.  * idr_replace - replace pointer for given id
  792.  * @idp: idr handle
  793.  * @ptr: pointer you want associated with the id
  794.  * @id: lookup key
  795.  *
  796.  * Replace the pointer registered with an id and return the old value.
  797.  * A %-ENOENT return indicates that @id was not found.
  798.  * A %-EINVAL return indicates that @id was not within valid constraints.
  799.  *
  800.  * The caller must serialize with writers.
  801.  */
  802. void *idr_replace(struct idr *idp, void *ptr, int id)
  803. {
  804.         int n;
  805.         struct idr_layer *p, *old_p;
  806.  
  807.         /* see comment in idr_find_slowpath() */
  808.         if (WARN_ON_ONCE(id < 0))
  809.                 return ERR_PTR(-EINVAL);
  810.  
  811.         p = idp->top;
  812.         if (!p)
  813.                 return ERR_PTR(-EINVAL);
  814.  
  815.         n = (p->layer+1) * IDR_BITS;
  816.  
  817.         if (id >= (1 << n))
  818.                 return ERR_PTR(-EINVAL);
  819.  
  820.         n -= IDR_BITS;
  821.         while ((n > 0) && p) {
  822.                 p = p->ary[(id >> n) & IDR_MASK];
  823.                 n -= IDR_BITS;
  824.         }
  825.  
  826.         n = id & IDR_MASK;
  827.         if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
  828.                 return ERR_PTR(-ENOENT);
  829.  
  830.         old_p = p->ary[n];
  831.         rcu_assign_pointer(p->ary[n], ptr);
  832.  
  833.         return old_p;
  834. }
  835. EXPORT_SYMBOL(idr_replace);
  836.  
  837.  
  838. #endif
  839.  
  840.  
  841. void idr_init_cache(void)
  842. {
  843.     //idr_layer_cache = kmem_cache_create("idr_layer_cache",
  844.     //           sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
  845. }
  846.  
  847. /**
  848.  * idr_init - initialize idr handle
  849.  * @idp:        idr handle
  850.  *
  851.  * This function is use to set up the handle (@idp) that you will pass
  852.  * to the rest of the functions.
  853.  */
  854. void idr_init(struct idr *idp)
  855. {
  856.         memset(idp, 0, sizeof(struct idr));
  857.         spin_lock_init(&idp->lock);
  858. }
  859. EXPORT_SYMBOL(idr_init);
  860.  
  861. #if 0
  862.  
  863. /**
  864.  * DOC: IDA description
  865.  * IDA - IDR based ID allocator
  866.  *
  867.  * This is id allocator without id -> pointer translation.  Memory
  868.  * usage is much lower than full blown idr because each id only
  869.  * occupies a bit.  ida uses a custom leaf node which contains
  870.  * IDA_BITMAP_BITS slots.
  871.  *
  872.  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
  873.  */
  874.  
  875. static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
  876. {
  877.         unsigned long flags;
  878.  
  879.         if (!ida->free_bitmap) {
  880.                 spin_lock_irqsave(&ida->idr.lock, flags);
  881.                 if (!ida->free_bitmap) {
  882.                         ida->free_bitmap = bitmap;
  883.                         bitmap = NULL;
  884.                 }
  885.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  886.         }
  887.  
  888.         kfree(bitmap);
  889. }
  890.  
  891. /**
  892.  * ida_pre_get - reserve resources for ida allocation
  893.  * @ida:        ida handle
  894.  * @gfp_mask:   memory allocation flag
  895.  *
  896.  * This function should be called prior to locking and calling the
  897.  * following function.  It preallocates enough memory to satisfy the
  898.  * worst possible allocation.
  899.  *
  900.  * If the system is REALLY out of memory this function returns %0,
  901.  * otherwise %1.
  902.  */
  903. int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
  904. {
  905.         /* allocate idr_layers */
  906.         if (!idr_pre_get(&ida->idr, gfp_mask))
  907.                 return 0;
  908.  
  909.         /* allocate free_bitmap */
  910.         if (!ida->free_bitmap) {
  911.                 struct ida_bitmap *bitmap;
  912.  
  913.                 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
  914.                 if (!bitmap)
  915.                         return 0;
  916.  
  917.                 free_bitmap(ida, bitmap);
  918.         }
  919.  
  920.         return 1;
  921. }
  922. EXPORT_SYMBOL(ida_pre_get);
  923.  
  924. /**
  925.  * ida_get_new_above - allocate new ID above or equal to a start id
  926.  * @ida:        ida handle
  927.  * @starting_id: id to start search at
  928.  * @p_id:       pointer to the allocated handle
  929.  *
  930.  * Allocate new ID above or equal to @starting_id.  It should be called
  931.  * with any required locks.
  932.  *
  933.  * If memory is required, it will return %-EAGAIN, you should unlock
  934.  * and go back to the ida_pre_get() call.  If the ida is full, it will
  935.  * return %-ENOSPC.
  936.  *
  937.  * @p_id returns a value in the range @starting_id ... %0x7fffffff.
  938.  */
  939. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
  940. {
  941.         struct idr_layer *pa[MAX_IDR_LEVEL + 1];
  942.         struct ida_bitmap *bitmap;
  943.         unsigned long flags;
  944.         int idr_id = starting_id / IDA_BITMAP_BITS;
  945.         int offset = starting_id % IDA_BITMAP_BITS;
  946.         int t, id;
  947.  
  948.  restart:
  949.         /* get vacant slot */
  950.         t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
  951.         if (t < 0)
  952.                 return t == -ENOMEM ? -EAGAIN : t;
  953.  
  954.         if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
  955.                 return -ENOSPC;
  956.  
  957.         if (t != idr_id)
  958.                 offset = 0;
  959.         idr_id = t;
  960.  
  961.         /* if bitmap isn't there, create a new one */
  962.         bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
  963.         if (!bitmap) {
  964.                 spin_lock_irqsave(&ida->idr.lock, flags);
  965.                 bitmap = ida->free_bitmap;
  966.                 ida->free_bitmap = NULL;
  967.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  968.  
  969.                 if (!bitmap)
  970.                         return -EAGAIN;
  971.  
  972.                 memset(bitmap, 0, sizeof(struct ida_bitmap));
  973.                 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
  974.                                 (void *)bitmap);
  975.                 pa[0]->count++;
  976.         }
  977.  
  978.         /* lookup for empty slot */
  979.         t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
  980.         if (t == IDA_BITMAP_BITS) {
  981.                 /* no empty slot after offset, continue to the next chunk */
  982.                 idr_id++;
  983.                 offset = 0;
  984.                 goto restart;
  985.         }
  986.  
  987.         id = idr_id * IDA_BITMAP_BITS + t;
  988.         if (id >= MAX_IDR_BIT)
  989.                 return -ENOSPC;
  990.  
  991.         __set_bit(t, bitmap->bitmap);
  992.         if (++bitmap->nr_busy == IDA_BITMAP_BITS)
  993.                 idr_mark_full(pa, idr_id);
  994.  
  995.         *p_id = id;
  996.  
  997.         /* Each leaf node can handle nearly a thousand slots and the
  998.          * whole idea of ida is to have small memory foot print.
  999.          * Throw away extra resources one by one after each successful
  1000.          * allocation.
  1001.          */
  1002.         if (ida->idr.id_free_cnt || ida->free_bitmap) {
  1003.                 struct idr_layer *p = get_from_free_list(&ida->idr);
  1004.                 if (p)
  1005.                         kmem_cache_free(idr_layer_cache, p);
  1006.         }
  1007.  
  1008.         return 0;
  1009. }
  1010. EXPORT_SYMBOL(ida_get_new_above);
  1011.  
  1012. /**
  1013.  * ida_remove - remove the given ID
  1014.  * @ida:        ida handle
  1015.  * @id:         ID to free
  1016.  */
  1017. void ida_remove(struct ida *ida, int id)
  1018. {
  1019.         struct idr_layer *p = ida->idr.top;
  1020.         int shift = (ida->idr.layers - 1) * IDR_BITS;
  1021.         int idr_id = id / IDA_BITMAP_BITS;
  1022.         int offset = id % IDA_BITMAP_BITS;
  1023.         int n;
  1024.         struct ida_bitmap *bitmap;
  1025.  
  1026.         /* clear full bits while looking up the leaf idr_layer */
  1027.         while ((shift > 0) && p) {
  1028.                 n = (idr_id >> shift) & IDR_MASK;
  1029.                 __clear_bit(n, p->bitmap);
  1030.                 p = p->ary[n];
  1031.                 shift -= IDR_BITS;
  1032.         }
  1033.  
  1034.         if (p == NULL)
  1035.                 goto err;
  1036.  
  1037.         n = idr_id & IDR_MASK;
  1038.         __clear_bit(n, p->bitmap);
  1039.  
  1040.         bitmap = (void *)p->ary[n];
  1041.         if (!test_bit(offset, bitmap->bitmap))
  1042.                 goto err;
  1043.  
  1044.         /* update bitmap and remove it if empty */
  1045.         __clear_bit(offset, bitmap->bitmap);
  1046.         if (--bitmap->nr_busy == 0) {
  1047.                 __set_bit(n, p->bitmap);        /* to please idr_remove() */
  1048.                 idr_remove(&ida->idr, idr_id);
  1049.                 free_bitmap(ida, bitmap);
  1050.         }
  1051.  
  1052.         return;
  1053.  
  1054.  err:
  1055.         printk(KERN_WARNING
  1056.                "ida_remove called for id=%d which is not allocated.\n", id);
  1057. }
  1058. EXPORT_SYMBOL(ida_remove);
  1059.  
  1060. /**
  1061.  * ida_destroy - release all cached layers within an ida tree
  1062.  * @ida:                ida handle
  1063.  */
  1064. void ida_destroy(struct ida *ida)
  1065. {
  1066.         idr_destroy(&ida->idr);
  1067.         kfree(ida->free_bitmap);
  1068. }
  1069. EXPORT_SYMBOL(ida_destroy);
  1070.  
  1071. /**
  1072.  * ida_init - initialize ida handle
  1073.  * @ida:        ida handle
  1074.  *
  1075.  * This function is use to set up the handle (@ida) that you will pass
  1076.  * to the rest of the functions.
  1077.  */
  1078. void ida_init(struct ida *ida)
  1079. {
  1080.         memset(ida, 0, sizeof(struct ida));
  1081.         idr_init(&ida->idr);
  1082.  
  1083. }
  1084. EXPORT_SYMBOL(ida_init);
  1085.  
  1086.  
  1087. #endif
  1088.  
  1089.  
  1090. unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
  1091. {
  1092.         const unsigned long *p = addr;
  1093.         unsigned long result = 0;
  1094.         unsigned long tmp;
  1095.  
  1096.         while (size & ~(BITS_PER_LONG-1)) {
  1097.                 if ((tmp = *(p++)))
  1098.                         goto found;
  1099.                 result += BITS_PER_LONG;
  1100.                 size -= BITS_PER_LONG;
  1101.         }
  1102.         if (!size)
  1103.                 return result;
  1104.  
  1105.         tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
  1106.         if (tmp == 0UL)         /* Are any bits set? */
  1107.                 return result + size;   /* Nope. */
  1108. found:
  1109.         return result + __ffs(tmp);
  1110. }
  1111.  
  1112. unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
  1113.                             unsigned long offset)
  1114. {
  1115.         const unsigned long *p = addr + BITOP_WORD(offset);
  1116.         unsigned long result = offset & ~(BITS_PER_LONG-1);
  1117.         unsigned long tmp;
  1118.  
  1119.         if (offset >= size)
  1120.                 return size;
  1121.         size -= result;
  1122.         offset %= BITS_PER_LONG;
  1123.         if (offset) {
  1124.                 tmp = *(p++);
  1125.                 tmp &= (~0UL << offset);
  1126.                 if (size < BITS_PER_LONG)
  1127.                         goto found_first;
  1128.                 if (tmp)
  1129.                         goto found_middle;
  1130.                 size -= BITS_PER_LONG;
  1131.                 result += BITS_PER_LONG;
  1132.         }
  1133.         while (size & ~(BITS_PER_LONG-1)) {
  1134.                 if ((tmp = *(p++)))
  1135.                         goto found_middle;
  1136.                 result += BITS_PER_LONG;
  1137.                 size -= BITS_PER_LONG;
  1138.         }
  1139.         if (!size)
  1140.                 return result;
  1141.         tmp = *p;
  1142.  
  1143. found_first:
  1144.         tmp &= (~0UL >> (BITS_PER_LONG - size));
  1145.         if (tmp == 0UL)         /* Are any bits set? */
  1146.                 return result + size;   /* Nope. */
  1147. found_middle:
  1148.         return result + __ffs(tmp);
  1149. }
  1150.  
  1151. unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
  1152.                                  unsigned long offset)
  1153. {
  1154.         const unsigned long *p = addr + BITOP_WORD(offset);
  1155.         unsigned long result = offset & ~(BITS_PER_LONG-1);
  1156.         unsigned long tmp;
  1157.  
  1158.         if (offset >= size)
  1159.                 return size;
  1160.         size -= result;
  1161.         offset %= BITS_PER_LONG;
  1162.         if (offset) {
  1163.                 tmp = *(p++);
  1164.                 tmp |= ~0UL >> (BITS_PER_LONG - offset);
  1165.                 if (size < BITS_PER_LONG)
  1166.                         goto found_first;
  1167.                 if (~tmp)
  1168.                         goto found_middle;
  1169.                 size -= BITS_PER_LONG;
  1170.                 result += BITS_PER_LONG;
  1171.         }
  1172.         while (size & ~(BITS_PER_LONG-1)) {
  1173.                 if (~(tmp = *(p++)))
  1174.                         goto found_middle;
  1175.                 result += BITS_PER_LONG;
  1176.                 size -= BITS_PER_LONG;
  1177.         }
  1178.         if (!size)
  1179.                 return result;
  1180.         tmp = *p;
  1181.  
  1182. found_first:
  1183.         tmp |= ~0UL << size;
  1184.         if (tmp == ~0UL)        /* Are any bits zero? */
  1185.                 return result + size;   /* Nope. */
  1186. found_middle:
  1187.         return result + ffz(tmp);
  1188. }
  1189.  
  1190. unsigned int hweight32(unsigned int w)
  1191. {
  1192.         unsigned int res = w - ((w >> 1) & 0x55555555);
  1193.         res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
  1194.         res = (res + (res >> 4)) & 0x0F0F0F0F;
  1195.         res = res + (res >> 8);
  1196.         return (res + (res >> 16)) & 0x000000FF;
  1197. }
  1198.  
  1199.