Subversion Repositories Kolibri OS

Rev

Rev 1403 | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
  3.  *      Copyright (C) 2002 by Concurrent Computer Corporation
  4.  *      Distributed under the GNU GPL license version 2.
  5.  *
  6.  * Modified by George Anzinger to reuse immediately and to use
  7.  * find bit instructions.  Also removed _irq on spinlocks.
  8.  *
  9.  * Modified by Nadia Derbey to make it RCU safe.
  10.  *
  11.  * Small id to pointer translation service.
  12.  *
  13.  * It uses a radix tree like structure as a sparse array indexed
  14.  * by the id to obtain the pointer.  The bitmap makes allocating
  15.  * a new id quick.
  16.  *
  17.  * You call it to allocate an id (an int) an associate with that id a
  18.  * pointer or what ever, we treat it as a (void *).  You can pass this
  19.  * id to a user for him to pass back at a later time.  You then pass
  20.  * that id to this code and it returns your pointer.
  21.  
  22.  * You can release ids at any time. When all ids are released, most of
  23.  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  24.  * don't need to go to the memory "store" during an id allocate, just
  25.  * so you don't need to be too concerned about locking and conflicts
  26.  * with the slab allocator.
  27.  */
  28.  
  29. #include <linux/kernel.h>
  30.  
  31. #include <linux/idr.h>
  32. //#include <stdlib.h>
  33. #include "drm.h"
  34. #include "drmP.h"
  35. #include "drm_crtc.h"
  36.  
  37. unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
  38. {
  39.     const unsigned long *p = addr;
  40.     unsigned long result = 0;
  41.     unsigned long tmp;
  42.  
  43.     while (size & ~(BITS_PER_LONG-1)) {
  44.         if ((tmp = *(p++)))
  45.             goto found;
  46.         result += BITS_PER_LONG;
  47.         size -= BITS_PER_LONG;
  48.     }
  49.     if (!size)
  50.         return result;
  51.  
  52.     tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
  53.     if (tmp == 0UL)     /* Are any bits set? */
  54.         return result + size;   /* Nope. */
  55. found:
  56.     return result + __ffs(tmp);
  57. }
  58.  
  59. int find_next_bit(const unsigned long *addr, int size, int offset)
  60. {
  61.     const unsigned long *p = addr + (offset >> 5);
  62.     int set = 0, bit = offset & 31, res;
  63.  
  64.     if (bit)
  65.     {
  66.         /*
  67.          * Look for nonzero in the first 32 bits:
  68.          */
  69.         __asm__("bsfl %1,%0\n\t"
  70.                 "jne 1f\n\t"
  71.                 "movl $32, %0\n"
  72.                 "1:"
  73.                 : "=r" (set)
  74.                 : "r" (*p >> bit));
  75.         if (set < (32 - bit))
  76.                 return set + offset;
  77.         set = 32 - bit;
  78.         p++;
  79.     }
  80.     /*
  81.      * No set bit yet, search remaining full words for a bit
  82.      */
  83.     res = find_first_bit (p, size - 32 * (p - addr));
  84.     return (offset + set + res);
  85. }
  86.  
  87. #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
  88.  
  89. #define rcu_dereference(p)     ({ \
  90.                                 typeof(p) _________p1 = ACCESS_ONCE(p); \
  91.                                 (_________p1); \
  92.                                 })
  93.  
  94. #define rcu_assign_pointer(p, v) \
  95.         ({ \
  96.                 if (!__builtin_constant_p(v) || \
  97.                     ((v) != NULL)) \
  98.                 (p) = (v); \
  99.         })
  100.  
  101. //static struct kmem_cache *idr_layer_cache;
  102.  
  103.  
  104.  
  105.  
  106.  
  107. static struct idr_layer *get_from_free_list(struct idr *idp)
  108. {
  109.         struct idr_layer *p;
  110.         unsigned long flags;
  111.  
  112. //   spin_lock_irqsave(&idp->lock, flags);
  113.         if ((p = idp->id_free)) {
  114.                 idp->id_free = p->ary[0];
  115.                 idp->id_free_cnt--;
  116.                 p->ary[0] = NULL;
  117.         }
  118. //   spin_unlock_irqrestore(&idp->lock, flags);
  119.         return(p);
  120. }
  121.  
  122.  
  123. static void idr_layer_rcu_free(struct rcu_head *head)
  124. {
  125.         struct idr_layer *layer;
  126.  
  127.     layer = container_of(head, struct idr_layer, rcu_head);
  128.     kfree(layer);
  129. }
  130.  
  131.  
  132.  
  133. static inline void free_layer(struct idr_layer *p)
  134. {
  135.     kfree(p);
  136. }
  137.  
  138.  
  139. /* only called when idp->lock is held */
  140. static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
  141. {
  142.         p->ary[0] = idp->id_free;
  143.         idp->id_free = p;
  144.         idp->id_free_cnt++;
  145. }
  146.  
  147. static void move_to_free_list(struct idr *idp, struct idr_layer *p)
  148. {
  149.         unsigned long flags;
  150.  
  151.         /*
  152.          * Depends on the return element being zeroed.
  153.          */
  154. //   spin_lock_irqsave(&idp->lock, flags);
  155.         __move_to_free_list(idp, p);
  156. //   spin_unlock_irqrestore(&idp->lock, flags);
  157. }
  158.  
  159. static void idr_mark_full(struct idr_layer **pa, int id)
  160. {
  161.         struct idr_layer *p = pa[0];
  162.         int l = 0;
  163.  
  164.         __set_bit(id & IDR_MASK, &p->bitmap);
  165.         /*
  166.          * If this layer is full mark the bit in the layer above to
  167.          * show that this part of the radix tree is full.  This may
  168.          * complete the layer above and require walking up the radix
  169.          * tree.
  170.          */
  171.         while (p->bitmap == IDR_FULL) {
  172.                 if (!(p = pa[++l]))
  173.                         break;
  174.                 id = id >> IDR_BITS;
  175.                 __set_bit((id & IDR_MASK), &p->bitmap);
  176.         }
  177. }
  178.  
  179.  
  180.  
  181. /**
  182.  * idr_pre_get - reserver resources for idr allocation
  183.  * @idp:        idr handle
  184.  * @gfp_mask:   memory allocation flags
  185.  *
  186.  * This function should be called prior to locking and calling the
  187.  * idr_get_new* functions. It preallocates enough memory to satisfy
  188.  * the worst possible allocation.
  189.  *
  190.  * If the system is REALLY out of memory this function returns 0,
  191.  * otherwise 1.
  192.  */
  193. int idr_pre_get(struct idr *idp, u32_t gfp_mask)
  194. {
  195.    while (idp->id_free_cnt < IDR_FREE_MAX) {
  196.        struct idr_layer *new;
  197.        new = kzalloc(sizeof(struct idr_layer), gfp_mask);
  198.        if (new == NULL)
  199.            return (0);
  200.        move_to_free_list(idp, new);
  201.    }
  202.    return 1;
  203. }
  204. EXPORT_SYMBOL(idr_pre_get);
  205.  
  206.  
  207. static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
  208. {
  209.         int n, m, sh;
  210.         struct idr_layer *p, *new;
  211.         int l, id, oid;
  212.         unsigned long bm;
  213.  
  214.         id = *starting_id;
  215.  restart:
  216.         p = idp->top;
  217.         l = idp->layers;
  218.         pa[l--] = NULL;
  219.         while (1) {
  220.                 /*
  221.                  * We run around this while until we reach the leaf node...
  222.                  */
  223.                 n = (id >> (IDR_BITS*l)) & IDR_MASK;
  224.                 bm = ~p->bitmap;
  225.                 m = find_next_bit(&bm, IDR_SIZE, n);
  226.                 if (m == IDR_SIZE) {
  227.                         /* no space available go back to previous layer. */
  228.                         l++;
  229.                         oid = id;
  230.                         id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  231.  
  232.                         /* if already at the top layer, we need to grow */
  233.                         if (!(p = pa[l])) {
  234.                                 *starting_id = id;
  235.                                 return IDR_NEED_TO_GROW;
  236.                         }
  237.  
  238.                         /* If we need to go up one layer, continue the
  239.                          * loop; otherwise, restart from the top.
  240.                          */
  241.                         sh = IDR_BITS * (l + 1);
  242.                         if (oid >> sh == id >> sh)
  243.                                 continue;
  244.                         else
  245.                                 goto restart;
  246.                 }
  247.                 if (m != n) {
  248.                         sh = IDR_BITS*l;
  249.                         id = ((id >> sh) ^ n ^ m) << sh;
  250.                 }
  251.                 if ((id >= MAX_ID_BIT) || (id < 0))
  252.                         return IDR_NOMORE_SPACE;
  253.                 if (l == 0)
  254.                         break;
  255.                 /*
  256.                  * Create the layer below if it is missing.
  257.                  */
  258.                 if (!p->ary[m]) {
  259.                         new = get_from_free_list(idp);
  260.                         if (!new)
  261.                                 return -1;
  262.                         new->layer = l-1;
  263.                         rcu_assign_pointer(p->ary[m], new);
  264.                         p->count++;
  265.                 }
  266.                 pa[l--] = p;
  267.                 p = p->ary[m];
  268.         }
  269.  
  270.         pa[l] = p;
  271.         return id;
  272. }
  273.  
  274.  
  275. static int idr_get_empty_slot(struct idr *idp, int starting_id,
  276.                               struct idr_layer **pa)
  277. {
  278.         struct idr_layer *p, *new;
  279.         int layers, v, id;
  280.         unsigned long flags;
  281.  
  282.         id = starting_id;
  283. build_up:
  284.         p = idp->top;
  285.         layers = idp->layers;
  286.         if (unlikely(!p)) {
  287.                 if (!(p = get_from_free_list(idp)))
  288.                         return -1;
  289.                 p->layer = 0;
  290.                 layers = 1;
  291.         }
  292.         /*
  293.          * Add a new layer to the top of the tree if the requested
  294.          * id is larger than the currently allocated space.
  295.          */
  296.         while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  297.                 layers++;
  298.                 if (!p->count) {
  299.                         /* special case: if the tree is currently empty,
  300.                          * then we grow the tree by moving the top node
  301.                          * upwards.
  302.                          */
  303.                         p->layer++;
  304.                         continue;
  305.                 }
  306.                 if (!(new = get_from_free_list(idp))) {
  307.                         /*
  308.                          * The allocation failed.  If we built part of
  309.                          * the structure tear it down.
  310.                          */
  311. //           spin_lock_irqsave(&idp->lock, flags);
  312.                         for (new = p; p && p != idp->top; new = p) {
  313.                                 p = p->ary[0];
  314.                                 new->ary[0] = NULL;
  315.                                 new->bitmap = new->count = 0;
  316.                                 __move_to_free_list(idp, new);
  317.                         }
  318. //           spin_unlock_irqrestore(&idp->lock, flags);
  319.                         return -1;
  320.                 }
  321.                 new->ary[0] = p;
  322.                 new->count = 1;
  323.                 new->layer = layers-1;
  324.                 if (p->bitmap == IDR_FULL)
  325.                         __set_bit(0, &new->bitmap);
  326.                 p = new;
  327.         }
  328.         rcu_assign_pointer(idp->top, p);
  329.         idp->layers = layers;
  330.         v = sub_alloc(idp, &id, pa);
  331.         if (v == IDR_NEED_TO_GROW)
  332.                 goto build_up;
  333.         return(v);
  334. }
  335.  
  336. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  337. {
  338.         struct idr_layer *pa[MAX_LEVEL];
  339.         int id;
  340.  
  341.         id = idr_get_empty_slot(idp, starting_id, pa);
  342.         if (id >= 0) {
  343.                 /*
  344.                  * Successfully found an empty slot.  Install the user
  345.                  * pointer and mark the slot full.
  346.                  */
  347.                 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
  348.                                 (struct idr_layer *)ptr);
  349.                 pa[0]->count++;
  350.                 idr_mark_full(pa, id);
  351.         }
  352.  
  353.         return id;
  354. }
  355.  
  356. /**
  357.  * idr_get_new_above - allocate new idr entry above or equal to a start id
  358.  * @idp: idr handle
  359.  * @ptr: pointer you want associated with the ide
  360.  * @start_id: id to start search at
  361.  * @id: pointer to the allocated handle
  362.  *
  363.  * This is the allocate id function.  It should be called with any
  364.  * required locks.
  365.  *
  366.  * If memory is required, it will return -EAGAIN, you should unlock
  367.  * and go back to the idr_pre_get() call.  If the idr is full, it will
  368.  * return -ENOSPC.
  369.  *
  370.  * @id returns a value in the range @starting_id ... 0x7fffffff
  371.  */
  372. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  373. {
  374.         int rv;
  375.     rv = idr_get_new_above_int(idp, ptr, starting_id);
  376.         /*
  377.          * This is a cheap hack until the IDR code can be fixed to
  378.          * return proper error values.
  379.          */
  380.         if (rv < 0)
  381.     {
  382.         dbgprintf("fail\n");
  383.         return _idr_rc_to_errno(rv);
  384.     };
  385.         *id = rv;
  386.     return 0;
  387. }
  388. EXPORT_SYMBOL(idr_get_new_above);
  389.  
  390. /**
  391.  * idr_get_new - allocate new idr entry
  392.  * @idp: idr handle
  393.  * @ptr: pointer you want associated with the ide
  394.  * @id: pointer to the allocated handle
  395.  *
  396.  * This is the allocate id function.  It should be called with any
  397.  * required locks.
  398.  *
  399.  * If memory is required, it will return -EAGAIN, you should unlock
  400.  * and go back to the idr_pre_get() call.  If the idr is full, it will
  401.  * return -ENOSPC.
  402.  *
  403.  * @id returns a value in the range 0 ... 0x7fffffff
  404.  */
  405. int idr_get_new(struct idr *idp, void *ptr, int *id)
  406. {
  407.         int rv;
  408.  
  409.         rv = idr_get_new_above_int(idp, ptr, 0);
  410.         /*
  411.          * This is a cheap hack until the IDR code can be fixed to
  412.          * return proper error values.
  413.          */
  414.         if (rv < 0)
  415.                 return _idr_rc_to_errno(rv);
  416.         *id = rv;
  417.         return 0;
  418. }
  419. EXPORT_SYMBOL(idr_get_new);
  420.  
  421. static void idr_remove_warning(int id)
  422. {
  423.         printk(KERN_WARNING
  424.                 "idr_remove called for id=%d which is not allocated.\n", id);
  425. //   dump_stack();
  426. }
  427.  
  428. static void sub_remove(struct idr *idp, int shift, int id)
  429. {
  430.         struct idr_layer *p = idp->top;
  431.         struct idr_layer **pa[MAX_LEVEL];
  432.         struct idr_layer ***paa = &pa[0];
  433.         struct idr_layer *to_free;
  434.         int n;
  435.  
  436.         *paa = NULL;
  437.         *++paa = &idp->top;
  438.  
  439.         while ((shift > 0) && p) {
  440.                 n = (id >> shift) & IDR_MASK;
  441.                 __clear_bit(n, &p->bitmap);
  442.                 *++paa = &p->ary[n];
  443.                 p = p->ary[n];
  444.                 shift -= IDR_BITS;
  445.         }
  446.         n = id & IDR_MASK;
  447.         if (likely(p != NULL && test_bit(n, &p->bitmap))){
  448.                 __clear_bit(n, &p->bitmap);
  449.                 rcu_assign_pointer(p->ary[n], NULL);
  450.                 to_free = NULL;
  451.                 while(*paa && ! --((**paa)->count)){
  452.                         if (to_free)
  453.                                 free_layer(to_free);
  454.                         to_free = **paa;
  455.                         **paa-- = NULL;
  456.                 }
  457.                 if (!*paa)
  458.                         idp->layers = 0;
  459.                 if (to_free)
  460.                         free_layer(to_free);
  461.         } else
  462.                 idr_remove_warning(id);
  463. }
  464.  
  465. /**
  466.  * idr_remove - remove the given id and free it's slot
  467.  * @idp: idr handle
  468.  * @id: unique key
  469.  */
  470. void idr_remove(struct idr *idp, int id)
  471. {
  472.         struct idr_layer *p;
  473.         struct idr_layer *to_free;
  474.  
  475.         /* Mask off upper bits we don't use for the search. */
  476.         id &= MAX_ID_MASK;
  477.  
  478.         sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  479.         if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  480.             idp->top->ary[0]) {
  481.                 /*
  482.                  * Single child at leftmost slot: we can shrink the tree.
  483.                  * This level is not needed anymore since when layers are
  484.                  * inserted, they are inserted at the top of the existing
  485.                  * tree.
  486.                  */
  487.                 to_free = idp->top;
  488.                 p = idp->top->ary[0];
  489.                 rcu_assign_pointer(idp->top, p);
  490.                 --idp->layers;
  491.                 to_free->bitmap = to_free->count = 0;
  492.                 free_layer(to_free);
  493.         }
  494.         while (idp->id_free_cnt >= IDR_FREE_MAX) {
  495.                 p = get_from_free_list(idp);
  496.                 /*
  497.                  * Note: we don't call the rcu callback here, since the only
  498.                  * layers that fall into the freelist are those that have been
  499.                  * preallocated.
  500.                  */
  501.         kfree(p);
  502.         }
  503.         return;
  504. }
  505. EXPORT_SYMBOL(idr_remove);
  506.  
  507.  
  508. /**
  509.  * idr_remove_all - remove all ids from the given idr tree
  510.  * @idp: idr handle
  511.  *
  512.  * idr_destroy() only frees up unused, cached idp_layers, but this
  513.  * function will remove all id mappings and leave all idp_layers
  514.  * unused.
  515.  *
  516.  * A typical clean-up sequence for objects stored in an idr tree, will
  517.  * use idr_for_each() to free all objects, if necessay, then
  518.  * idr_remove_all() to remove all ids, and idr_destroy() to free
  519.  * up the cached idr_layers.
  520.  */
  521. void idr_remove_all(struct idr *idp)
  522. {
  523.         int n, id, max;
  524.         struct idr_layer *p;
  525.         struct idr_layer *pa[MAX_LEVEL];
  526.         struct idr_layer **paa = &pa[0];
  527.  
  528.         n = idp->layers * IDR_BITS;
  529.         p = idp->top;
  530.         rcu_assign_pointer(idp->top, NULL);
  531.         max = 1 << n;
  532.  
  533.         id = 0;
  534.         while (id < max) {
  535.                 while (n > IDR_BITS && p) {
  536.                         n -= IDR_BITS;
  537.                         *paa++ = p;
  538.                         p = p->ary[(id >> n) & IDR_MASK];
  539.                 }
  540.  
  541.                 id += 1 << n;
  542.                 while (n < fls(id)) {
  543.                         if (p)
  544.                                 free_layer(p);
  545.                         n += IDR_BITS;
  546.                         p = *--paa;
  547.                 }
  548.         }
  549.         idp->layers = 0;
  550. }
  551. EXPORT_SYMBOL(idr_remove_all);
  552.  
  553. /**
  554.  * idr_destroy - release all cached layers within an idr tree
  555.  * idp: idr handle
  556.  */
  557. void idr_destroy(struct idr *idp)
  558. {
  559.         while (idp->id_free_cnt) {
  560.                 struct idr_layer *p = get_from_free_list(idp);
  561.         kfree(p);
  562.         }
  563. }
  564. EXPORT_SYMBOL(idr_destroy);
  565.  
  566.  
  567. /**
  568.  * idr_find - return pointer for given id
  569.  * @idp: idr handle
  570.  * @id: lookup key
  571.  *
  572.  * Return the pointer given the id it has been registered with.  A %NULL
  573.  * return indicates that @id is not valid or you passed %NULL in
  574.  * idr_get_new().
  575.  *
  576.  * This function can be called under rcu_read_lock(), given that the leaf
  577.  * pointers lifetimes are correctly managed.
  578.  */
  579. void *idr_find(struct idr *idp, int id)
  580. {
  581.         int n;
  582.         struct idr_layer *p;
  583.  
  584.         p = rcu_dereference(idp->top);
  585.         if (!p)
  586.                 return NULL;
  587.         n = (p->layer+1) * IDR_BITS;
  588.  
  589.         /* Mask off upper bits we don't use for the search. */
  590.         id &= MAX_ID_MASK;
  591.  
  592.         if (id >= (1 << n))
  593.                 return NULL;
  594.         BUG_ON(n == 0);
  595.  
  596.         while (n > 0 && p) {
  597.                 n -= IDR_BITS;
  598.                 BUG_ON(n != p->layer*IDR_BITS);
  599.                 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  600.         }
  601.         return((void *)p);
  602. }
  603. EXPORT_SYMBOL(idr_find);
  604.  
  605. #if 0
  606. /**
  607.  * idr_for_each - iterate through all stored pointers
  608.  * @idp: idr handle
  609.  * @fn: function to be called for each pointer
  610.  * @data: data passed back to callback function
  611.  *
  612.  * Iterate over the pointers registered with the given idr.  The
  613.  * callback function will be called for each pointer currently
  614.  * registered, passing the id, the pointer and the data pointer passed
  615.  * to this function.  It is not safe to modify the idr tree while in
  616.  * the callback, so functions such as idr_get_new and idr_remove are
  617.  * not allowed.
  618.  *
  619.  * We check the return of @fn each time. If it returns anything other
  620.  * than 0, we break out and return that value.
  621.  *
  622.  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
  623.  */
  624. int idr_for_each(struct idr *idp,
  625.                  int (*fn)(int id, void *p, void *data), void *data)
  626. {
  627.         int n, id, max, error = 0;
  628.         struct idr_layer *p;
  629.         struct idr_layer *pa[MAX_LEVEL];
  630.         struct idr_layer **paa = &pa[0];
  631.  
  632.         n = idp->layers * IDR_BITS;
  633.         p = rcu_dereference(idp->top);
  634.         max = 1 << n;
  635.  
  636.         id = 0;
  637.         while (id < max) {
  638.                 while (n > 0 && p) {
  639.                         n -= IDR_BITS;
  640.                         *paa++ = p;
  641.                         p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  642.                 }
  643.  
  644.                 if (p) {
  645.                         error = fn(id, (void *)p, data);
  646.                         if (error)
  647.                                 break;
  648.                 }
  649.  
  650.                 id += 1 << n;
  651.                 while (n < fls(id)) {
  652.                         n += IDR_BITS;
  653.                         p = *--paa;
  654.                 }
  655.         }
  656.  
  657.         return error;
  658. }
  659. EXPORT_SYMBOL(idr_for_each);
  660.  
  661. /**
  662.  * idr_get_next - lookup next object of id to given id.
  663.  * @idp: idr handle
  664.  * @id:  pointer to lookup key
  665.  *
  666.  * Returns pointer to registered object with id, which is next number to
  667.  * given id.
  668.  */
  669.  
  670. void *idr_get_next(struct idr *idp, int *nextidp)
  671. {
  672.         struct idr_layer *p, *pa[MAX_LEVEL];
  673.         struct idr_layer **paa = &pa[0];
  674.         int id = *nextidp;
  675.         int n, max;
  676.  
  677.         /* find first ent */
  678.         n = idp->layers * IDR_BITS;
  679.         max = 1 << n;
  680.         p = rcu_dereference(idp->top);
  681.         if (!p)
  682.                 return NULL;
  683.  
  684.         while (id < max) {
  685.                 while (n > 0 && p) {
  686.                         n -= IDR_BITS;
  687.                         *paa++ = p;
  688.                         p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  689.                 }
  690.  
  691.                 if (p) {
  692.                         *nextidp = id;
  693.                         return p;
  694.                 }
  695.  
  696.                 id += 1 << n;
  697.                 while (n < fls(id)) {
  698.                         n += IDR_BITS;
  699.                         p = *--paa;
  700.                 }
  701.         }
  702.         return NULL;
  703. }
  704.  
  705.  
  706.  
  707. /**
  708.  * idr_replace - replace pointer for given id
  709.  * @idp: idr handle
  710.  * @ptr: pointer you want associated with the id
  711.  * @id: lookup key
  712.  *
  713.  * Replace the pointer registered with an id and return the old value.
  714.  * A -ENOENT return indicates that @id was not found.
  715.  * A -EINVAL return indicates that @id was not within valid constraints.
  716.  *
  717.  * The caller must serialize with writers.
  718.  */
  719. void *idr_replace(struct idr *idp, void *ptr, int id)
  720. {
  721.         int n;
  722.         struct idr_layer *p, *old_p;
  723.  
  724.         p = idp->top;
  725.         if (!p)
  726.                 return ERR_PTR(-EINVAL);
  727.  
  728.         n = (p->layer+1) * IDR_BITS;
  729.  
  730.         id &= MAX_ID_MASK;
  731.  
  732.         if (id >= (1 << n))
  733.                 return ERR_PTR(-EINVAL);
  734.  
  735.         n -= IDR_BITS;
  736.         while ((n > 0) && p) {
  737.                 p = p->ary[(id >> n) & IDR_MASK];
  738.                 n -= IDR_BITS;
  739.         }
  740.  
  741.         n = id & IDR_MASK;
  742.         if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
  743.                 return ERR_PTR(-ENOENT);
  744.  
  745.         old_p = p->ary[n];
  746.         rcu_assign_pointer(p->ary[n], ptr);
  747.  
  748.         return old_p;
  749. }
  750. EXPORT_SYMBOL(idr_replace);
  751.  
  752.  
  753. #endif
  754.  
  755.  
  756. void idr_init_cache(void)
  757. {
  758.     //idr_layer_cache = kmem_cache_create("idr_layer_cache",
  759.     //           sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
  760. }
  761.  
  762. /**
  763.  * idr_init - initialize idr handle
  764.  * @idp:        idr handle
  765.  *
  766.  * This function is use to set up the handle (@idp) that you will pass
  767.  * to the rest of the functions.
  768.  */
  769. void idr_init(struct idr *idp)
  770. {
  771.         memset(idp, 0, sizeof(struct idr));
  772.  //  spin_lock_init(&idp->lock);
  773. }
  774. EXPORT_SYMBOL(idr_init);
  775.  
  776. #if 0
  777.  
  778. /*
  779.  * IDA - IDR based ID allocator
  780.  *
  781.  * this is id allocator without id -> pointer translation.  Memory
  782.  * usage is much lower than full blown idr because each id only
  783.  * occupies a bit.  ida uses a custom leaf node which contains
  784.  * IDA_BITMAP_BITS slots.
  785.  *
  786.  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
  787.  */
  788.  
  789. static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
  790. {
  791.         unsigned long flags;
  792.  
  793.         if (!ida->free_bitmap) {
  794.                 spin_lock_irqsave(&ida->idr.lock, flags);
  795.                 if (!ida->free_bitmap) {
  796.                         ida->free_bitmap = bitmap;
  797.                         bitmap = NULL;
  798.                 }
  799.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  800.         }
  801.  
  802.         kfree(bitmap);
  803. }
  804.  
  805. /**
  806.  * ida_pre_get - reserve resources for ida allocation
  807.  * @ida:        ida handle
  808.  * @gfp_mask:   memory allocation flag
  809.  *
  810.  * This function should be called prior to locking and calling the
  811.  * following function.  It preallocates enough memory to satisfy the
  812.  * worst possible allocation.
  813.  *
  814.  * If the system is REALLY out of memory this function returns 0,
  815.  * otherwise 1.
  816.  */
  817. int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
  818. {
  819.         /* allocate idr_layers */
  820.         if (!idr_pre_get(&ida->idr, gfp_mask))
  821.                 return 0;
  822.  
  823.         /* allocate free_bitmap */
  824.         if (!ida->free_bitmap) {
  825.                 struct ida_bitmap *bitmap;
  826.  
  827.                 bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask);
  828.                 if (!bitmap)
  829.                         return 0;
  830.  
  831.                 free_bitmap(ida, bitmap);
  832.         }
  833.  
  834.         return 1;
  835. }
  836. EXPORT_SYMBOL(ida_pre_get);
  837.  
  838. /**
  839.  * ida_get_new_above - allocate new ID above or equal to a start id
  840.  * @ida:        ida handle
  841.  * @staring_id: id to start search at
  842.  * @p_id:       pointer to the allocated handle
  843.  *
  844.  * Allocate new ID above or equal to @ida.  It should be called with
  845.  * any required locks.
  846.  *
  847.  * If memory is required, it will return -EAGAIN, you should unlock
  848.  * and go back to the ida_pre_get() call.  If the ida is full, it will
  849.  * return -ENOSPC.
  850.  *
  851.  * @p_id returns a value in the range @starting_id ... 0x7fffffff.
  852.  */
  853. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
  854. {
  855.         struct idr_layer *pa[MAX_LEVEL];
  856.         struct ida_bitmap *bitmap;
  857.         unsigned long flags;
  858.         int idr_id = starting_id / IDA_BITMAP_BITS;
  859.         int offset = starting_id % IDA_BITMAP_BITS;
  860.         int t, id;
  861.  
  862.  restart:
  863.         /* get vacant slot */
  864.         t = idr_get_empty_slot(&ida->idr, idr_id, pa);
  865.         if (t < 0)
  866.                 return _idr_rc_to_errno(t);
  867.  
  868.         if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
  869.                 return -ENOSPC;
  870.  
  871.         if (t != idr_id)
  872.                 offset = 0;
  873.         idr_id = t;
  874.  
  875.         /* if bitmap isn't there, create a new one */
  876.         bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
  877.         if (!bitmap) {
  878.                 spin_lock_irqsave(&ida->idr.lock, flags);
  879.                 bitmap = ida->free_bitmap;
  880.                 ida->free_bitmap = NULL;
  881.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  882.  
  883.                 if (!bitmap)
  884.                         return -EAGAIN;
  885.  
  886.                 memset(bitmap, 0, sizeof(struct ida_bitmap));
  887.                 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
  888.                                 (void *)bitmap);
  889.                 pa[0]->count++;
  890.         }
  891.  
  892.         /* lookup for empty slot */
  893.         t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
  894.         if (t == IDA_BITMAP_BITS) {
  895.                 /* no empty slot after offset, continue to the next chunk */
  896.                 idr_id++;
  897.                 offset = 0;
  898.                 goto restart;
  899.         }
  900.  
  901.         id = idr_id * IDA_BITMAP_BITS + t;
  902.         if (id >= MAX_ID_BIT)
  903.                 return -ENOSPC;
  904.  
  905.         __set_bit(t, bitmap->bitmap);
  906.         if (++bitmap->nr_busy == IDA_BITMAP_BITS)
  907.                 idr_mark_full(pa, idr_id);
  908.  
  909.         *p_id = id;
  910.  
  911.         /* Each leaf node can handle nearly a thousand slots and the
  912.          * whole idea of ida is to have small memory foot print.
  913.          * Throw away extra resources one by one after each successful
  914.          * allocation.
  915.          */
  916.         if (ida->idr.id_free_cnt || ida->free_bitmap) {
  917.                 struct idr_layer *p = get_from_free_list(&ida->idr);
  918.                 if (p)
  919.                         kmem_cache_free(idr_layer_cache, p);
  920.         }
  921.  
  922.         return 0;
  923. }
  924. EXPORT_SYMBOL(ida_get_new_above);
  925.  
  926. /**
  927.  * ida_get_new - allocate new ID
  928.  * @ida:        idr handle
  929.  * @p_id:       pointer to the allocated handle
  930.  *
  931.  * Allocate new ID.  It should be called with any required locks.
  932.  *
  933.  * If memory is required, it will return -EAGAIN, you should unlock
  934.  * and go back to the idr_pre_get() call.  If the idr is full, it will
  935.  * return -ENOSPC.
  936.  *
  937.  * @id returns a value in the range 0 ... 0x7fffffff.
  938.  */
  939. int ida_get_new(struct ida *ida, int *p_id)
  940. {
  941.         return ida_get_new_above(ida, 0, p_id);
  942. }
  943. EXPORT_SYMBOL(ida_get_new);
  944.  
  945. /**
  946.  * ida_remove - remove the given ID
  947.  * @ida:        ida handle
  948.  * @id:         ID to free
  949.  */
  950. void ida_remove(struct ida *ida, int id)
  951. {
  952.         struct idr_layer *p = ida->idr.top;
  953.         int shift = (ida->idr.layers - 1) * IDR_BITS;
  954.         int idr_id = id / IDA_BITMAP_BITS;
  955.         int offset = id % IDA_BITMAP_BITS;
  956.         int n;
  957.         struct ida_bitmap *bitmap;
  958.  
  959.         /* clear full bits while looking up the leaf idr_layer */
  960.         while ((shift > 0) && p) {
  961.                 n = (idr_id >> shift) & IDR_MASK;
  962.                 __clear_bit(n, &p->bitmap);
  963.                 p = p->ary[n];
  964.                 shift -= IDR_BITS;
  965.         }
  966.  
  967.         if (p == NULL)
  968.                 goto err;
  969.  
  970.         n = idr_id & IDR_MASK;
  971.         __clear_bit(n, &p->bitmap);
  972.  
  973.         bitmap = (void *)p->ary[n];
  974.         if (!test_bit(offset, bitmap->bitmap))
  975.                 goto err;
  976.  
  977.         /* update bitmap and remove it if empty */
  978.         __clear_bit(offset, bitmap->bitmap);
  979.         if (--bitmap->nr_busy == 0) {
  980.                 __set_bit(n, &p->bitmap);       /* to please idr_remove() */
  981.                 idr_remove(&ida->idr, idr_id);
  982.                 free_bitmap(ida, bitmap);
  983.         }
  984.  
  985.         return;
  986.  
  987.  err:
  988.         printk(KERN_WARNING
  989.                "ida_remove called for id=%d which is not allocated.\n", id);
  990. }
  991. EXPORT_SYMBOL(ida_remove);
  992.  
  993. /**
  994.  * ida_destroy - release all cached layers within an ida tree
  995.  * ida:         ida handle
  996.  */
  997. void ida_destroy(struct ida *ida)
  998. {
  999.         idr_destroy(&ida->idr);
  1000.         kfree(ida->free_bitmap);
  1001. }
  1002. EXPORT_SYMBOL(ida_destroy);
  1003.  
  1004. /**
  1005.  * ida_init - initialize ida handle
  1006.  * @ida:        ida handle
  1007.  *
  1008.  * This function is use to set up the handle (@ida) that you will pass
  1009.  * to the rest of the functions.
  1010.  */
  1011. void ida_init(struct ida *ida)
  1012. {
  1013.         memset(ida, 0, sizeof(struct ida));
  1014.         idr_init(&ida->idr);
  1015.  
  1016. }
  1017. EXPORT_SYMBOL(ida_init);
  1018.  
  1019.  
  1020. #endif
  1021.