Subversion Repositories Kolibri OS

Rev

Rev 2966 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
  3.  *      Copyright (C) 2002 by Concurrent Computer Corporation
  4.  *      Distributed under the GNU GPL license version 2.
  5.  *
  6.  * Modified by George Anzinger to reuse immediately and to use
  7.  * find bit instructions.  Also removed _irq on spinlocks.
  8.  *
  9.  * Modified by Nadia Derbey to make it RCU safe.
  10.  *
  11.  * Small id to pointer translation service.
  12.  *
  13.  * It uses a radix tree like structure as a sparse array indexed
  14.  * by the id to obtain the pointer.  The bitmap makes allocating
  15.  * a new id quick.
  16.  *
  17.  * You call it to allocate an id (an int) an associate with that id a
  18.  * pointer or what ever, we treat it as a (void *).  You can pass this
  19.  * id to a user for him to pass back at a later time.  You then pass
  20.  * that id to this code and it returns your pointer.
  21.  
  22.  * You can release ids at any time. When all ids are released, most of
  23.  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  24.  * don't need to go to the memory "store" during an id allocate, just
  25.  * so you don't need to be too concerned about locking and conflicts
  26.  * with the slab allocator.
  27.  */
  28.  
  29. #include <linux/kernel.h>
  30. #include <linux/string.h>
  31. #include <linux/bitops.h>
  32. #include <linux/idr.h>
  33. //#include <stdlib.h>
  34.  
  35. unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
  36. {
  37.     const unsigned long *p = addr;
  38.     unsigned long result = 0;
  39.     unsigned long tmp;
  40.  
  41.     while (size & ~(BITS_PER_LONG-1)) {
  42.         if ((tmp = *(p++)))
  43.             goto found;
  44.         result += BITS_PER_LONG;
  45.         size -= BITS_PER_LONG;
  46.     }
  47.     if (!size)
  48.         return result;
  49.  
  50.     tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
  51.     if (tmp == 0UL)     /* Are any bits set? */
  52.         return result + size;   /* Nope. */
  53. found:
  54.     return result + __ffs(tmp);
  55. }
  56.  
  57. int find_next_bit(const unsigned long *addr, int size, int offset)
  58. {
  59.     const unsigned long *p = addr + (offset >> 5);
  60.     int set = 0, bit = offset & 31, res;
  61.  
  62.     if (bit)
  63.     {
  64.         /*
  65.          * Look for nonzero in the first 32 bits:
  66.          */
  67.         __asm__("bsfl %1,%0\n\t"
  68.                 "jne 1f\n\t"
  69.                 "movl $32, %0\n"
  70.                 "1:"
  71.                 : "=r" (set)
  72.                 : "r" (*p >> bit));
  73.         if (set < (32 - bit))
  74.                 return set + offset;
  75.         set = 32 - bit;
  76.         p++;
  77.     }
  78.     /*
  79.      * No set bit yet, search remaining full words for a bit
  80.      */
  81.     res = find_first_bit (p, size - 32 * (p - addr));
  82.     return (offset + set + res);
  83. }
  84.  
  85. #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
  86.  
  87. #define rcu_dereference(p)     ({ \
  88.                                 typeof(p) _________p1 = ACCESS_ONCE(p); \
  89.                                 (_________p1); \
  90.                                 })
  91.  
  92. #define rcu_assign_pointer(p, v) \
  93.         ({ \
  94.                 if (!__builtin_constant_p(v) || \
  95.                     ((v) != NULL)) \
  96.                 (p) = (v); \
  97.         })
  98.  
  99. //static struct kmem_cache *idr_layer_cache;
  100.  
  101.  
  102.  
  103.  
  104.  
  105. static struct idr_layer *get_from_free_list(struct idr *idp)
  106. {
  107.         struct idr_layer *p;
  108.         unsigned long flags;
  109.  
  110. //   spin_lock_irqsave(&idp->lock, flags);
  111.         if ((p = idp->id_free)) {
  112.                 idp->id_free = p->ary[0];
  113.                 idp->id_free_cnt--;
  114.                 p->ary[0] = NULL;
  115.         }
  116. //   spin_unlock_irqrestore(&idp->lock, flags);
  117.         return(p);
  118. }
  119.  
  120.  
  121. static void idr_layer_rcu_free(struct rcu_head *head)
  122. {
  123.         struct idr_layer *layer;
  124.  
  125.     layer = container_of(head, struct idr_layer, rcu_head);
  126.     kfree(layer);
  127. }
  128.  
  129.  
  130.  
  131. static inline void free_layer(struct idr_layer *p)
  132. {
  133.     kfree(p);
  134. }
  135.  
  136.  
  137. /* only called when idp->lock is held */
  138. static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
  139. {
  140.         p->ary[0] = idp->id_free;
  141.         idp->id_free = p;
  142.         idp->id_free_cnt++;
  143. }
  144.  
  145. static void move_to_free_list(struct idr *idp, struct idr_layer *p)
  146. {
  147.         unsigned long flags;
  148.  
  149.         /*
  150.          * Depends on the return element being zeroed.
  151.          */
  152. //   spin_lock_irqsave(&idp->lock, flags);
  153.         __move_to_free_list(idp, p);
  154. //   spin_unlock_irqrestore(&idp->lock, flags);
  155. }
  156.  
  157. static void idr_mark_full(struct idr_layer **pa, int id)
  158. {
  159.         struct idr_layer *p = pa[0];
  160.         int l = 0;
  161.  
  162.         __set_bit(id & IDR_MASK, &p->bitmap);
  163.         /*
  164.          * If this layer is full mark the bit in the layer above to
  165.          * show that this part of the radix tree is full.  This may
  166.          * complete the layer above and require walking up the radix
  167.          * tree.
  168.          */
  169.         while (p->bitmap == IDR_FULL) {
  170.                 if (!(p = pa[++l]))
  171.                         break;
  172.                 id = id >> IDR_BITS;
  173.                 __set_bit((id & IDR_MASK), &p->bitmap);
  174.         }
  175. }
  176.  
  177.  
  178.  
  179. /**
  180.  * idr_pre_get - reserver resources for idr allocation
  181.  * @idp:        idr handle
  182.  * @gfp_mask:   memory allocation flags
  183.  *
  184.  * This function should be called prior to locking and calling the
  185.  * idr_get_new* functions. It preallocates enough memory to satisfy
  186.  * the worst possible allocation.
  187.  *
  188.  * If the system is REALLY out of memory this function returns 0,
  189.  * otherwise 1.
  190.  */
  191. int idr_pre_get(struct idr *idp, u32_t gfp_mask)
  192. {
  193.    while (idp->id_free_cnt < IDR_FREE_MAX) {
  194.        struct idr_layer *new;
  195.        new = kzalloc(sizeof(struct idr_layer), gfp_mask);
  196.        if (new == NULL)
  197.            return (0);
  198.        move_to_free_list(idp, new);
  199.    }
  200.    return 1;
  201. }
  202.  
  203. static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
  204. {
  205.         int n, m, sh;
  206.         struct idr_layer *p, *new;
  207.         int l, id, oid;
  208.         unsigned long bm;
  209.  
  210.         id = *starting_id;
  211.  restart:
  212.         p = idp->top;
  213.         l = idp->layers;
  214.         pa[l--] = NULL;
  215.         while (1) {
  216.                 /*
  217.                  * We run around this while until we reach the leaf node...
  218.                  */
  219.                 n = (id >> (IDR_BITS*l)) & IDR_MASK;
  220.                 bm = ~p->bitmap;
  221.                 m = find_next_bit(&bm, IDR_SIZE, n);
  222.                 if (m == IDR_SIZE) {
  223.                         /* no space available go back to previous layer. */
  224.                         l++;
  225.                         oid = id;
  226.                         id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  227.  
  228.                         /* if already at the top layer, we need to grow */
  229.                         if (!(p = pa[l])) {
  230.                                 *starting_id = id;
  231.                                 return IDR_NEED_TO_GROW;
  232.                         }
  233.  
  234.                         /* If we need to go up one layer, continue the
  235.                          * loop; otherwise, restart from the top.
  236.                          */
  237.                         sh = IDR_BITS * (l + 1);
  238.                         if (oid >> sh == id >> sh)
  239.                                 continue;
  240.                         else
  241.                                 goto restart;
  242.                 }
  243.                 if (m != n) {
  244.                         sh = IDR_BITS*l;
  245.                         id = ((id >> sh) ^ n ^ m) << sh;
  246.                 }
  247.                 if ((id >= MAX_ID_BIT) || (id < 0))
  248.                         return IDR_NOMORE_SPACE;
  249.                 if (l == 0)
  250.                         break;
  251.                 /*
  252.                  * Create the layer below if it is missing.
  253.                  */
  254.                 if (!p->ary[m]) {
  255.                         new = get_from_free_list(idp);
  256.                         if (!new)
  257.                                 return -1;
  258.                         new->layer = l-1;
  259.                         rcu_assign_pointer(p->ary[m], new);
  260.                         p->count++;
  261.                 }
  262.                 pa[l--] = p;
  263.                 p = p->ary[m];
  264.         }
  265.  
  266.         pa[l] = p;
  267.         return id;
  268. }
  269.  
  270.  
  271. static int idr_get_empty_slot(struct idr *idp, int starting_id,
  272.                               struct idr_layer **pa)
  273. {
  274.         struct idr_layer *p, *new;
  275.         int layers, v, id;
  276.         unsigned long flags;
  277.  
  278.         id = starting_id;
  279. build_up:
  280.         p = idp->top;
  281.         layers = idp->layers;
  282.         if (unlikely(!p)) {
  283.                 if (!(p = get_from_free_list(idp)))
  284.                         return -1;
  285.                 p->layer = 0;
  286.                 layers = 1;
  287.         }
  288.         /*
  289.          * Add a new layer to the top of the tree if the requested
  290.          * id is larger than the currently allocated space.
  291.          */
  292.         while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  293.                 layers++;
  294.                 if (!p->count) {
  295.                         /* special case: if the tree is currently empty,
  296.                          * then we grow the tree by moving the top node
  297.                          * upwards.
  298.                          */
  299.                         p->layer++;
  300.                         continue;
  301.                 }
  302.                 if (!(new = get_from_free_list(idp))) {
  303.                         /*
  304.                          * The allocation failed.  If we built part of
  305.                          * the structure tear it down.
  306.                          */
  307. //           spin_lock_irqsave(&idp->lock, flags);
  308.                         for (new = p; p && p != idp->top; new = p) {
  309.                                 p = p->ary[0];
  310.                                 new->ary[0] = NULL;
  311.                                 new->bitmap = new->count = 0;
  312.                                 __move_to_free_list(idp, new);
  313.                         }
  314. //           spin_unlock_irqrestore(&idp->lock, flags);
  315.                         return -1;
  316.                 }
  317.                 new->ary[0] = p;
  318.                 new->count = 1;
  319.                 new->layer = layers-1;
  320.                 if (p->bitmap == IDR_FULL)
  321.                         __set_bit(0, &new->bitmap);
  322.                 p = new;
  323.         }
  324.         rcu_assign_pointer(idp->top, p);
  325.         idp->layers = layers;
  326.         v = sub_alloc(idp, &id, pa);
  327.         if (v == IDR_NEED_TO_GROW)
  328.                 goto build_up;
  329.         return(v);
  330. }
  331.  
  332. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  333. {
  334.         struct idr_layer *pa[MAX_LEVEL];
  335.         int id;
  336.  
  337.         id = idr_get_empty_slot(idp, starting_id, pa);
  338.         if (id >= 0) {
  339.                 /*
  340.                  * Successfully found an empty slot.  Install the user
  341.                  * pointer and mark the slot full.
  342.                  */
  343.                 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
  344.                                 (struct idr_layer *)ptr);
  345.                 pa[0]->count++;
  346.                 idr_mark_full(pa, id);
  347.         }
  348.  
  349.         return id;
  350. }
  351.  
  352. /**
  353.  * idr_get_new_above - allocate new idr entry above or equal to a start id
  354.  * @idp: idr handle
  355.  * @ptr: pointer you want associated with the ide
  356.  * @start_id: id to start search at
  357.  * @id: pointer to the allocated handle
  358.  *
  359.  * This is the allocate id function.  It should be called with any
  360.  * required locks.
  361.  *
  362.  * If memory is required, it will return -EAGAIN, you should unlock
  363.  * and go back to the idr_pre_get() call.  If the idr is full, it will
  364.  * return -ENOSPC.
  365.  *
  366.  * @id returns a value in the range @starting_id ... 0x7fffffff
  367.  */
  368. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  369. {
  370.         int rv;
  371.     rv = idr_get_new_above_int(idp, ptr, starting_id);
  372.         /*
  373.          * This is a cheap hack until the IDR code can be fixed to
  374.          * return proper error values.
  375.          */
  376.         if (rv < 0)
  377.     {
  378.         dbgprintf("fail\n");
  379.         return _idr_rc_to_errno(rv);
  380.     };
  381.         *id = rv;
  382.     return 0;
  383. }
  384.  
  385. /**
  386.  * idr_get_new - allocate new idr entry
  387.  * @idp: idr handle
  388.  * @ptr: pointer you want associated with the ide
  389.  * @id: pointer to the allocated handle
  390.  *
  391.  * This is the allocate id function.  It should be called with any
  392.  * required locks.
  393.  *
  394.  * If memory is required, it will return -EAGAIN, you should unlock
  395.  * and go back to the idr_pre_get() call.  If the idr is full, it will
  396.  * return -ENOSPC.
  397.  *
  398.  * @id returns a value in the range 0 ... 0x7fffffff
  399.  */
  400. int idr_get_new(struct idr *idp, void *ptr, int *id)
  401. {
  402.         int rv;
  403.  
  404.         rv = idr_get_new_above_int(idp, ptr, 0);
  405.         /*
  406.          * This is a cheap hack until the IDR code can be fixed to
  407.          * return proper error values.
  408.          */
  409.         if (rv < 0)
  410.                 return _idr_rc_to_errno(rv);
  411.         *id = rv;
  412.         return 0;
  413. }
  414.  
  415. static void idr_remove_warning(int id)
  416. {
  417.         printk(KERN_WARNING
  418.                 "idr_remove called for id=%d which is not allocated.\n", id);
  419. //   dump_stack();
  420. }
  421.  
  422. static void sub_remove(struct idr *idp, int shift, int id)
  423. {
  424.         struct idr_layer *p = idp->top;
  425.         struct idr_layer **pa[MAX_LEVEL];
  426.         struct idr_layer ***paa = &pa[0];
  427.         struct idr_layer *to_free;
  428.         int n;
  429.  
  430.         *paa = NULL;
  431.         *++paa = &idp->top;
  432.  
  433.         while ((shift > 0) && p) {
  434.                 n = (id >> shift) & IDR_MASK;
  435.                 __clear_bit(n, &p->bitmap);
  436.                 *++paa = &p->ary[n];
  437.                 p = p->ary[n];
  438.                 shift -= IDR_BITS;
  439.         }
  440.         n = id & IDR_MASK;
  441.         if (likely(p != NULL && test_bit(n, &p->bitmap))){
  442.                 __clear_bit(n, &p->bitmap);
  443.                 rcu_assign_pointer(p->ary[n], NULL);
  444.                 to_free = NULL;
  445.                 while(*paa && ! --((**paa)->count)){
  446.                         if (to_free)
  447.                                 free_layer(to_free);
  448.                         to_free = **paa;
  449.                         **paa-- = NULL;
  450.                 }
  451.                 if (!*paa)
  452.                         idp->layers = 0;
  453.                 if (to_free)
  454.                         free_layer(to_free);
  455.         } else
  456.                 idr_remove_warning(id);
  457. }
  458.  
  459. /**
  460.  * idr_remove - remove the given id and free it's slot
  461.  * @idp: idr handle
  462.  * @id: unique key
  463.  */
  464. void idr_remove(struct idr *idp, int id)
  465. {
  466.         struct idr_layer *p;
  467.         struct idr_layer *to_free;
  468.  
  469.         /* Mask off upper bits we don't use for the search. */
  470.         id &= MAX_ID_MASK;
  471.  
  472.         sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  473.         if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  474.             idp->top->ary[0]) {
  475.                 /*
  476.                  * Single child at leftmost slot: we can shrink the tree.
  477.                  * This level is not needed anymore since when layers are
  478.                  * inserted, they are inserted at the top of the existing
  479.                  * tree.
  480.                  */
  481.                 to_free = idp->top;
  482.                 p = idp->top->ary[0];
  483.                 rcu_assign_pointer(idp->top, p);
  484.                 --idp->layers;
  485.                 to_free->bitmap = to_free->count = 0;
  486.                 free_layer(to_free);
  487.         }
  488.         while (idp->id_free_cnt >= IDR_FREE_MAX) {
  489.                 p = get_from_free_list(idp);
  490.                 /*
  491.                  * Note: we don't call the rcu callback here, since the only
  492.                  * layers that fall into the freelist are those that have been
  493.                  * preallocated.
  494.                  */
  495.         kfree(p);
  496.         }
  497.         return;
  498. }
  499.  
  500.  
  501. /**
  502.  * idr_remove_all - remove all ids from the given idr tree
  503.  * @idp: idr handle
  504.  *
  505.  * idr_destroy() only frees up unused, cached idp_layers, but this
  506.  * function will remove all id mappings and leave all idp_layers
  507.  * unused.
  508.  *
  509.  * A typical clean-up sequence for objects stored in an idr tree, will
  510.  * use idr_for_each() to free all objects, if necessay, then
  511.  * idr_remove_all() to remove all ids, and idr_destroy() to free
  512.  * up the cached idr_layers.
  513.  */
  514. void idr_remove_all(struct idr *idp)
  515. {
  516.         int n, id, max;
  517.         struct idr_layer *p;
  518.         struct idr_layer *pa[MAX_LEVEL];
  519.         struct idr_layer **paa = &pa[0];
  520.  
  521.         n = idp->layers * IDR_BITS;
  522.         p = idp->top;
  523.         rcu_assign_pointer(idp->top, NULL);
  524.         max = 1 << n;
  525.  
  526.         id = 0;
  527.         while (id < max) {
  528.                 while (n > IDR_BITS && p) {
  529.                         n -= IDR_BITS;
  530.                         *paa++ = p;
  531.                         p = p->ary[(id >> n) & IDR_MASK];
  532.                 }
  533.  
  534.                 id += 1 << n;
  535.                 while (n < fls(id)) {
  536.                         if (p)
  537.                                 free_layer(p);
  538.                         n += IDR_BITS;
  539.                         p = *--paa;
  540.                 }
  541.         }
  542.         idp->layers = 0;
  543. }
  544.  
  545. /**
  546.  * idr_destroy - release all cached layers within an idr tree
  547.  * idp: idr handle
  548.  */
  549. void idr_destroy(struct idr *idp)
  550. {
  551.         while (idp->id_free_cnt) {
  552.                 struct idr_layer *p = get_from_free_list(idp);
  553.         kfree(p);
  554.         }
  555. }
  556.  
  557.  
  558. /**
  559.  * idr_find - return pointer for given id
  560.  * @idp: idr handle
  561.  * @id: lookup key
  562.  *
  563.  * Return the pointer given the id it has been registered with.  A %NULL
  564.  * return indicates that @id is not valid or you passed %NULL in
  565.  * idr_get_new().
  566.  *
  567.  * This function can be called under rcu_read_lock(), given that the leaf
  568.  * pointers lifetimes are correctly managed.
  569.  */
  570. void *idr_find(struct idr *idp, int id)
  571. {
  572.         int n;
  573.         struct idr_layer *p;
  574.  
  575.         p = rcu_dereference(idp->top);
  576.         if (!p)
  577.                 return NULL;
  578.         n = (p->layer+1) * IDR_BITS;
  579.  
  580.         /* Mask off upper bits we don't use for the search. */
  581.         id &= MAX_ID_MASK;
  582.  
  583.         if (id >= (1 << n))
  584.                 return NULL;
  585.         BUG_ON(n == 0);
  586.  
  587.         while (n > 0 && p) {
  588.                 n -= IDR_BITS;
  589.                 BUG_ON(n != p->layer*IDR_BITS);
  590.                 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  591.         }
  592.         return((void *)p);
  593. }
  594.  
  595. #if 0
  596. /**
  597.  * idr_for_each - iterate through all stored pointers
  598.  * @idp: idr handle
  599.  * @fn: function to be called for each pointer
  600.  * @data: data passed back to callback function
  601.  *
  602.  * Iterate over the pointers registered with the given idr.  The
  603.  * callback function will be called for each pointer currently
  604.  * registered, passing the id, the pointer and the data pointer passed
  605.  * to this function.  It is not safe to modify the idr tree while in
  606.  * the callback, so functions such as idr_get_new and idr_remove are
  607.  * not allowed.
  608.  *
  609.  * We check the return of @fn each time. If it returns anything other
  610.  * than 0, we break out and return that value.
  611.  *
  612.  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
  613.  */
  614. int idr_for_each(struct idr *idp,
  615.                  int (*fn)(int id, void *p, void *data), void *data)
  616. {
  617.         int n, id, max, error = 0;
  618.         struct idr_layer *p;
  619.         struct idr_layer *pa[MAX_LEVEL];
  620.         struct idr_layer **paa = &pa[0];
  621.  
  622.         n = idp->layers * IDR_BITS;
  623.         p = rcu_dereference(idp->top);
  624.         max = 1 << n;
  625.  
  626.         id = 0;
  627.         while (id < max) {
  628.                 while (n > 0 && p) {
  629.                         n -= IDR_BITS;
  630.                         *paa++ = p;
  631.                         p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  632.                 }
  633.  
  634.                 if (p) {
  635.                         error = fn(id, (void *)p, data);
  636.                         if (error)
  637.                                 break;
  638.                 }
  639.  
  640.                 id += 1 << n;
  641.                 while (n < fls(id)) {
  642.                         n += IDR_BITS;
  643.                         p = *--paa;
  644.                 }
  645.         }
  646.  
  647.         return error;
  648. }
  649. EXPORT_SYMBOL(idr_for_each);
  650.  
  651. /**
  652.  * idr_get_next - lookup next object of id to given id.
  653.  * @idp: idr handle
  654.  * @id:  pointer to lookup key
  655.  *
  656.  * Returns pointer to registered object with id, which is next number to
  657.  * given id.
  658.  */
  659.  
  660. void *idr_get_next(struct idr *idp, int *nextidp)
  661. {
  662.         struct idr_layer *p, *pa[MAX_LEVEL];
  663.         struct idr_layer **paa = &pa[0];
  664.         int id = *nextidp;
  665.         int n, max;
  666.  
  667.         /* find first ent */
  668.         n = idp->layers * IDR_BITS;
  669.         max = 1 << n;
  670.         p = rcu_dereference(idp->top);
  671.         if (!p)
  672.                 return NULL;
  673.  
  674.         while (id < max) {
  675.                 while (n > 0 && p) {
  676.                         n -= IDR_BITS;
  677.                         *paa++ = p;
  678.                         p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  679.                 }
  680.  
  681.                 if (p) {
  682.                         *nextidp = id;
  683.                         return p;
  684.                 }
  685.  
  686.                 id += 1 << n;
  687.                 while (n < fls(id)) {
  688.                         n += IDR_BITS;
  689.                         p = *--paa;
  690.                 }
  691.         }
  692.         return NULL;
  693. }
  694.  
  695.  
  696.  
  697. /**
  698.  * idr_replace - replace pointer for given id
  699.  * @idp: idr handle
  700.  * @ptr: pointer you want associated with the id
  701.  * @id: lookup key
  702.  *
  703.  * Replace the pointer registered with an id and return the old value.
  704.  * A -ENOENT return indicates that @id was not found.
  705.  * A -EINVAL return indicates that @id was not within valid constraints.
  706.  *
  707.  * The caller must serialize with writers.
  708.  */
  709. void *idr_replace(struct idr *idp, void *ptr, int id)
  710. {
  711.         int n;
  712.         struct idr_layer *p, *old_p;
  713.  
  714.         p = idp->top;
  715.         if (!p)
  716.                 return ERR_PTR(-EINVAL);
  717.  
  718.         n = (p->layer+1) * IDR_BITS;
  719.  
  720.         id &= MAX_ID_MASK;
  721.  
  722.         if (id >= (1 << n))
  723.                 return ERR_PTR(-EINVAL);
  724.  
  725.         n -= IDR_BITS;
  726.         while ((n > 0) && p) {
  727.                 p = p->ary[(id >> n) & IDR_MASK];
  728.                 n -= IDR_BITS;
  729.         }
  730.  
  731.         n = id & IDR_MASK;
  732.         if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
  733.                 return ERR_PTR(-ENOENT);
  734.  
  735.         old_p = p->ary[n];
  736.         rcu_assign_pointer(p->ary[n], ptr);
  737.  
  738.         return old_p;
  739. }
  740. EXPORT_SYMBOL(idr_replace);
  741.  
  742.  
  743. #endif
  744.  
  745.  
  746. void idr_init_cache(void)
  747. {
  748.     //idr_layer_cache = kmem_cache_create("idr_layer_cache",
  749.     //           sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
  750. }
  751.  
  752. /**
  753.  * idr_init - initialize idr handle
  754.  * @idp:        idr handle
  755.  *
  756.  * This function is use to set up the handle (@idp) that you will pass
  757.  * to the rest of the functions.
  758.  */
  759. void idr_init(struct idr *idp)
  760. {
  761.         memset(idp, 0, sizeof(struct idr));
  762.  //  spin_lock_init(&idp->lock);
  763. }
  764.  
  765. #if 0
  766.  
  767. /*
  768.  * IDA - IDR based ID allocator
  769.  *
  770.  * this is id allocator without id -> pointer translation.  Memory
  771.  * usage is much lower than full blown idr because each id only
  772.  * occupies a bit.  ida uses a custom leaf node which contains
  773.  * IDA_BITMAP_BITS slots.
  774.  *
  775.  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
  776.  */
  777.  
  778. static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
  779. {
  780.         unsigned long flags;
  781.  
  782.         if (!ida->free_bitmap) {
  783.                 spin_lock_irqsave(&ida->idr.lock, flags);
  784.                 if (!ida->free_bitmap) {
  785.                         ida->free_bitmap = bitmap;
  786.                         bitmap = NULL;
  787.                 }
  788.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  789.         }
  790.  
  791.         kfree(bitmap);
  792. }
  793.  
  794. /**
  795.  * ida_pre_get - reserve resources for ida allocation
  796.  * @ida:        ida handle
  797.  * @gfp_mask:   memory allocation flag
  798.  *
  799.  * This function should be called prior to locking and calling the
  800.  * following function.  It preallocates enough memory to satisfy the
  801.  * worst possible allocation.
  802.  *
  803.  * If the system is REALLY out of memory this function returns 0,
  804.  * otherwise 1.
  805.  */
  806. int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
  807. {
  808.         /* allocate idr_layers */
  809.         if (!idr_pre_get(&ida->idr, gfp_mask))
  810.                 return 0;
  811.  
  812.         /* allocate free_bitmap */
  813.         if (!ida->free_bitmap) {
  814.                 struct ida_bitmap *bitmap;
  815.  
  816.                 bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask);
  817.                 if (!bitmap)
  818.                         return 0;
  819.  
  820.                 free_bitmap(ida, bitmap);
  821.         }
  822.  
  823.         return 1;
  824. }
  825. EXPORT_SYMBOL(ida_pre_get);
  826.  
  827. /**
  828.  * ida_get_new_above - allocate new ID above or equal to a start id
  829.  * @ida:        ida handle
  830.  * @staring_id: id to start search at
  831.  * @p_id:       pointer to the allocated handle
  832.  *
  833.  * Allocate new ID above or equal to @ida.  It should be called with
  834.  * any required locks.
  835.  *
  836.  * If memory is required, it will return -EAGAIN, you should unlock
  837.  * and go back to the ida_pre_get() call.  If the ida is full, it will
  838.  * return -ENOSPC.
  839.  *
  840.  * @p_id returns a value in the range @starting_id ... 0x7fffffff.
  841.  */
  842. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
  843. {
  844.         struct idr_layer *pa[MAX_LEVEL];
  845.         struct ida_bitmap *bitmap;
  846.         unsigned long flags;
  847.         int idr_id = starting_id / IDA_BITMAP_BITS;
  848.         int offset = starting_id % IDA_BITMAP_BITS;
  849.         int t, id;
  850.  
  851.  restart:
  852.         /* get vacant slot */
  853.         t = idr_get_empty_slot(&ida->idr, idr_id, pa);
  854.         if (t < 0)
  855.                 return _idr_rc_to_errno(t);
  856.  
  857.         if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
  858.                 return -ENOSPC;
  859.  
  860.         if (t != idr_id)
  861.                 offset = 0;
  862.         idr_id = t;
  863.  
  864.         /* if bitmap isn't there, create a new one */
  865.         bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
  866.         if (!bitmap) {
  867.                 spin_lock_irqsave(&ida->idr.lock, flags);
  868.                 bitmap = ida->free_bitmap;
  869.                 ida->free_bitmap = NULL;
  870.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  871.  
  872.                 if (!bitmap)
  873.                         return -EAGAIN;
  874.  
  875.                 memset(bitmap, 0, sizeof(struct ida_bitmap));
  876.                 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
  877.                                 (void *)bitmap);
  878.                 pa[0]->count++;
  879.         }
  880.  
  881.         /* lookup for empty slot */
  882.         t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
  883.         if (t == IDA_BITMAP_BITS) {
  884.                 /* no empty slot after offset, continue to the next chunk */
  885.                 idr_id++;
  886.                 offset = 0;
  887.                 goto restart;
  888.         }
  889.  
  890.         id = idr_id * IDA_BITMAP_BITS + t;
  891.         if (id >= MAX_ID_BIT)
  892.                 return -ENOSPC;
  893.  
  894.         __set_bit(t, bitmap->bitmap);
  895.         if (++bitmap->nr_busy == IDA_BITMAP_BITS)
  896.                 idr_mark_full(pa, idr_id);
  897.  
  898.         *p_id = id;
  899.  
  900.         /* Each leaf node can handle nearly a thousand slots and the
  901.          * whole idea of ida is to have small memory foot print.
  902.          * Throw away extra resources one by one after each successful
  903.          * allocation.
  904.          */
  905.         if (ida->idr.id_free_cnt || ida->free_bitmap) {
  906.                 struct idr_layer *p = get_from_free_list(&ida->idr);
  907.                 if (p)
  908.                         kmem_cache_free(idr_layer_cache, p);
  909.         }
  910.  
  911.         return 0;
  912. }
  913. EXPORT_SYMBOL(ida_get_new_above);
  914.  
  915. /**
  916.  * ida_get_new - allocate new ID
  917.  * @ida:        idr handle
  918.  * @p_id:       pointer to the allocated handle
  919.  *
  920.  * Allocate new ID.  It should be called with any required locks.
  921.  *
  922.  * If memory is required, it will return -EAGAIN, you should unlock
  923.  * and go back to the idr_pre_get() call.  If the idr is full, it will
  924.  * return -ENOSPC.
  925.  *
  926.  * @id returns a value in the range 0 ... 0x7fffffff.
  927.  */
  928. int ida_get_new(struct ida *ida, int *p_id)
  929. {
  930.         return ida_get_new_above(ida, 0, p_id);
  931. }
  932. EXPORT_SYMBOL(ida_get_new);
  933.  
  934. /**
  935.  * ida_remove - remove the given ID
  936.  * @ida:        ida handle
  937.  * @id:         ID to free
  938.  */
  939. void ida_remove(struct ida *ida, int id)
  940. {
  941.         struct idr_layer *p = ida->idr.top;
  942.         int shift = (ida->idr.layers - 1) * IDR_BITS;
  943.         int idr_id = id / IDA_BITMAP_BITS;
  944.         int offset = id % IDA_BITMAP_BITS;
  945.         int n;
  946.         struct ida_bitmap *bitmap;
  947.  
  948.         /* clear full bits while looking up the leaf idr_layer */
  949.         while ((shift > 0) && p) {
  950.                 n = (idr_id >> shift) & IDR_MASK;
  951.                 __clear_bit(n, &p->bitmap);
  952.                 p = p->ary[n];
  953.                 shift -= IDR_BITS;
  954.         }
  955.  
  956.         if (p == NULL)
  957.                 goto err;
  958.  
  959.         n = idr_id & IDR_MASK;
  960.         __clear_bit(n, &p->bitmap);
  961.  
  962.         bitmap = (void *)p->ary[n];
  963.         if (!test_bit(offset, bitmap->bitmap))
  964.                 goto err;
  965.  
  966.         /* update bitmap and remove it if empty */
  967.         __clear_bit(offset, bitmap->bitmap);
  968.         if (--bitmap->nr_busy == 0) {
  969.                 __set_bit(n, &p->bitmap);       /* to please idr_remove() */
  970.                 idr_remove(&ida->idr, idr_id);
  971.                 free_bitmap(ida, bitmap);
  972.         }
  973.  
  974.         return;
  975.  
  976.  err:
  977.         printk(KERN_WARNING
  978.                "ida_remove called for id=%d which is not allocated.\n", id);
  979. }
  980. EXPORT_SYMBOL(ida_remove);
  981.  
  982. /**
  983.  * ida_destroy - release all cached layers within an ida tree
  984.  * ida:         ida handle
  985.  */
  986. void ida_destroy(struct ida *ida)
  987. {
  988.         idr_destroy(&ida->idr);
  989.         kfree(ida->free_bitmap);
  990. }
  991. EXPORT_SYMBOL(ida_destroy);
  992.  
  993. /**
  994.  * ida_init - initialize ida handle
  995.  * @ida:        ida handle
  996.  *
  997.  * This function is use to set up the handle (@ida) that you will pass
  998.  * to the rest of the functions.
  999.  */
  1000. void ida_init(struct ida *ida)
  1001. {
  1002.         memset(ida, 0, sizeof(struct ida));
  1003.         idr_init(&ida->idr);
  1004.  
  1005. }
  1006. EXPORT_SYMBOL(ida_init);
  1007.  
  1008.  
  1009. #endif
  1010.