Subversion Repositories Kolibri OS

Rev

Rev 1246 | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
  3.  *      Copyright (C) 2002 by Concurrent Computer Corporation
  4.  *      Distributed under the GNU GPL license version 2.
  5.  *
  6.  * Modified by George Anzinger to reuse immediately and to use
  7.  * find bit instructions.  Also removed _irq on spinlocks.
  8.  *
  9.  * Modified by Nadia Derbey to make it RCU safe.
  10.  *
  11.  * Small id to pointer translation service.
  12.  *
  13.  * It uses a radix tree like structure as a sparse array indexed
  14.  * by the id to obtain the pointer.  The bitmap makes allocating
  15.  * a new id quick.
  16.  *
  17.  * You call it to allocate an id (an int) an associate with that id a
  18.  * pointer or what ever, we treat it as a (void *).  You can pass this
  19.  * id to a user for him to pass back at a later time.  You then pass
  20.  * that id to this code and it returns your pointer.
  21.  
  22.  * You can release ids at any time. When all ids are released, most of
  23.  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  24.  * don't need to go to the memory "store" during an id allocate, just
  25.  * so you don't need to be too concerned about locking and conflicts
  26.  * with the slab allocator.
  27.  */
  28.  
  29. #include <linux/kernel.h>
  30.  
  31. #include <linux/idr.h>
  32. //#include <stdlib.h>
  33. #include "drm.h"
  34. #include "drmP.h"
  35. #include "drm_crtc.h"
  36.  
  37.  
  38. #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
  39.  
  40. #define rcu_dereference(p)     ({ \
  41.                                 typeof(p) _________p1 = ACCESS_ONCE(p); \
  42.                                 (_________p1); \
  43.                                 })
  44.  
  45. #define rcu_assign_pointer(p, v) \
  46.         ({ \
  47.                 if (!__builtin_constant_p(v) || \
  48.                     ((v) != NULL)) \
  49.                 (p) = (v); \
  50.         })
  51.  
  52. //static struct kmem_cache *idr_layer_cache;
  53.  
  54.  
  55.  
  56.  
  57.  
  58. static struct idr_layer *get_from_free_list(struct idr *idp)
  59. {
  60.         struct idr_layer *p;
  61.         unsigned long flags;
  62.  
  63. //   spin_lock_irqsave(&idp->lock, flags);
  64.         if ((p = idp->id_free)) {
  65.                 idp->id_free = p->ary[0];
  66.                 idp->id_free_cnt--;
  67.                 p->ary[0] = NULL;
  68.         }
  69. //   spin_unlock_irqrestore(&idp->lock, flags);
  70.         return(p);
  71. }
  72.  
  73.  
  74. static void idr_layer_rcu_free(struct rcu_head *head)
  75. {
  76.         struct idr_layer *layer;
  77.  
  78.     layer = container_of(head, struct idr_layer, rcu_head);
  79.     kfree(layer);
  80. }
  81.  
  82.  
  83.  
  84. static inline void free_layer(struct idr_layer *p)
  85. {
  86.     kfree(p);
  87. }
  88.  
  89.  
  90. /* only called when idp->lock is held */
  91. static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
  92. {
  93.         p->ary[0] = idp->id_free;
  94.         idp->id_free = p;
  95.         idp->id_free_cnt++;
  96. }
  97.  
  98. static void move_to_free_list(struct idr *idp, struct idr_layer *p)
  99. {
  100.         unsigned long flags;
  101.  
  102.         /*
  103.          * Depends on the return element being zeroed.
  104.          */
  105. //   spin_lock_irqsave(&idp->lock, flags);
  106.         __move_to_free_list(idp, p);
  107. //   spin_unlock_irqrestore(&idp->lock, flags);
  108. }
  109.  
  110. static void idr_mark_full(struct idr_layer **pa, int id)
  111. {
  112.         struct idr_layer *p = pa[0];
  113.         int l = 0;
  114.  
  115.         __set_bit(id & IDR_MASK, &p->bitmap);
  116.         /*
  117.          * If this layer is full mark the bit in the layer above to
  118.          * show that this part of the radix tree is full.  This may
  119.          * complete the layer above and require walking up the radix
  120.          * tree.
  121.          */
  122.         while (p->bitmap == IDR_FULL) {
  123.                 if (!(p = pa[++l]))
  124.                         break;
  125.                 id = id >> IDR_BITS;
  126.                 __set_bit((id & IDR_MASK), &p->bitmap);
  127.         }
  128. }
  129.  
  130.  
  131.  
  132. /**
  133.  * idr_pre_get - reserver resources for idr allocation
  134.  * @idp:        idr handle
  135.  * @gfp_mask:   memory allocation flags
  136.  *
  137.  * This function should be called prior to locking and calling the
  138.  * idr_get_new* functions. It preallocates enough memory to satisfy
  139.  * the worst possible allocation.
  140.  *
  141.  * If the system is REALLY out of memory this function returns 0,
  142.  * otherwise 1.
  143.  */
  144. int idr_pre_get(struct idr *idp, u32_t gfp_mask)
  145. {
  146.    while (idp->id_free_cnt < IDR_FREE_MAX) {
  147.        struct idr_layer *new;
  148.        new = kzalloc(sizeof(struct idr_layer), gfp_mask);
  149.        if (new == NULL)
  150.            return (0);
  151.        move_to_free_list(idp, new);
  152.    }
  153.    return 1;
  154. }
  155. EXPORT_SYMBOL(idr_pre_get);
  156.  
  157.  
  158. static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
  159. {
  160.         int n, m, sh;
  161.         struct idr_layer *p, *new;
  162.         int l, id, oid;
  163.         unsigned long bm;
  164.  
  165.         id = *starting_id;
  166.  restart:
  167.         p = idp->top;
  168.         l = idp->layers;
  169.         pa[l--] = NULL;
  170.         while (1) {
  171.                 /*
  172.                  * We run around this while until we reach the leaf node...
  173.                  */
  174.                 n = (id >> (IDR_BITS*l)) & IDR_MASK;
  175.                 bm = ~p->bitmap;
  176.                 m = find_next_bit(&bm, IDR_SIZE, n);
  177.                 if (m == IDR_SIZE) {
  178.                         /* no space available go back to previous layer. */
  179.                         l++;
  180.                         oid = id;
  181.                         id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  182.  
  183.                         /* if already at the top layer, we need to grow */
  184.                         if (!(p = pa[l])) {
  185.                                 *starting_id = id;
  186.                                 return IDR_NEED_TO_GROW;
  187.                         }
  188.  
  189.                         /* If we need to go up one layer, continue the
  190.                          * loop; otherwise, restart from the top.
  191.                          */
  192.                         sh = IDR_BITS * (l + 1);
  193.                         if (oid >> sh == id >> sh)
  194.                                 continue;
  195.                         else
  196.                                 goto restart;
  197.                 }
  198.                 if (m != n) {
  199.                         sh = IDR_BITS*l;
  200.                         id = ((id >> sh) ^ n ^ m) << sh;
  201.                 }
  202.                 if ((id >= MAX_ID_BIT) || (id < 0))
  203.                         return IDR_NOMORE_SPACE;
  204.                 if (l == 0)
  205.                         break;
  206.                 /*
  207.                  * Create the layer below if it is missing.
  208.                  */
  209.                 if (!p->ary[m]) {
  210.                         new = get_from_free_list(idp);
  211.                         if (!new)
  212.                                 return -1;
  213.                         new->layer = l-1;
  214.                         rcu_assign_pointer(p->ary[m], new);
  215.                         p->count++;
  216.                 }
  217.                 pa[l--] = p;
  218.                 p = p->ary[m];
  219.         }
  220.  
  221.         pa[l] = p;
  222.         return id;
  223. }
  224.  
  225.  
  226. static int idr_get_empty_slot(struct idr *idp, int starting_id,
  227.                               struct idr_layer **pa)
  228. {
  229.         struct idr_layer *p, *new;
  230.         int layers, v, id;
  231.         unsigned long flags;
  232.  
  233.         id = starting_id;
  234. build_up:
  235.         p = idp->top;
  236.         layers = idp->layers;
  237.         if (unlikely(!p)) {
  238.                 if (!(p = get_from_free_list(idp)))
  239.                         return -1;
  240.                 p->layer = 0;
  241.                 layers = 1;
  242.         }
  243.         /*
  244.          * Add a new layer to the top of the tree if the requested
  245.          * id is larger than the currently allocated space.
  246.          */
  247.         while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  248.                 layers++;
  249.                 if (!p->count) {
  250.                         /* special case: if the tree is currently empty,
  251.                          * then we grow the tree by moving the top node
  252.                          * upwards.
  253.                          */
  254.                         p->layer++;
  255.                         continue;
  256.                 }
  257.                 if (!(new = get_from_free_list(idp))) {
  258.                         /*
  259.                          * The allocation failed.  If we built part of
  260.                          * the structure tear it down.
  261.                          */
  262. //           spin_lock_irqsave(&idp->lock, flags);
  263.                         for (new = p; p && p != idp->top; new = p) {
  264.                                 p = p->ary[0];
  265.                                 new->ary[0] = NULL;
  266.                                 new->bitmap = new->count = 0;
  267.                                 __move_to_free_list(idp, new);
  268.                         }
  269. //           spin_unlock_irqrestore(&idp->lock, flags);
  270.                         return -1;
  271.                 }
  272.                 new->ary[0] = p;
  273.                 new->count = 1;
  274.                 new->layer = layers-1;
  275.                 if (p->bitmap == IDR_FULL)
  276.                         __set_bit(0, &new->bitmap);
  277.                 p = new;
  278.         }
  279.         rcu_assign_pointer(idp->top, p);
  280.         idp->layers = layers;
  281.         v = sub_alloc(idp, &id, pa);
  282.         if (v == IDR_NEED_TO_GROW)
  283.                 goto build_up;
  284.         return(v);
  285. }
  286.  
  287. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  288. {
  289.         struct idr_layer *pa[MAX_LEVEL];
  290.         int id;
  291.  
  292.         id = idr_get_empty_slot(idp, starting_id, pa);
  293.         if (id >= 0) {
  294.                 /*
  295.                  * Successfully found an empty slot.  Install the user
  296.                  * pointer and mark the slot full.
  297.                  */
  298.                 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
  299.                                 (struct idr_layer *)ptr);
  300.                 pa[0]->count++;
  301.                 idr_mark_full(pa, id);
  302.         }
  303.  
  304.         return id;
  305. }
  306.  
  307. /**
  308.  * idr_get_new_above - allocate new idr entry above or equal to a start id
  309.  * @idp: idr handle
  310.  * @ptr: pointer you want associated with the ide
  311.  * @start_id: id to start search at
  312.  * @id: pointer to the allocated handle
  313.  *
  314.  * This is the allocate id function.  It should be called with any
  315.  * required locks.
  316.  *
  317.  * If memory is required, it will return -EAGAIN, you should unlock
  318.  * and go back to the idr_pre_get() call.  If the idr is full, it will
  319.  * return -ENOSPC.
  320.  *
  321.  * @id returns a value in the range @starting_id ... 0x7fffffff
  322.  */
  323. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  324. {
  325.         int rv;
  326.     rv = idr_get_new_above_int(idp, ptr, starting_id);
  327.         /*
  328.          * This is a cheap hack until the IDR code can be fixed to
  329.          * return proper error values.
  330.          */
  331.         if (rv < 0)
  332.     {
  333.         dbgprintf("fail\n");
  334.         return _idr_rc_to_errno(rv);
  335.     };
  336.         *id = rv;
  337.     return 0;
  338. }
  339. EXPORT_SYMBOL(idr_get_new_above);
  340.  
  341. /**
  342.  * idr_get_new - allocate new idr entry
  343.  * @idp: idr handle
  344.  * @ptr: pointer you want associated with the ide
  345.  * @id: pointer to the allocated handle
  346.  *
  347.  * This is the allocate id function.  It should be called with any
  348.  * required locks.
  349.  *
  350.  * If memory is required, it will return -EAGAIN, you should unlock
  351.  * and go back to the idr_pre_get() call.  If the idr is full, it will
  352.  * return -ENOSPC.
  353.  *
  354.  * @id returns a value in the range 0 ... 0x7fffffff
  355.  */
  356. int idr_get_new(struct idr *idp, void *ptr, int *id)
  357. {
  358.         int rv;
  359.  
  360.         rv = idr_get_new_above_int(idp, ptr, 0);
  361.         /*
  362.          * This is a cheap hack until the IDR code can be fixed to
  363.          * return proper error values.
  364.          */
  365.         if (rv < 0)
  366.                 return _idr_rc_to_errno(rv);
  367.         *id = rv;
  368.         return 0;
  369. }
  370. EXPORT_SYMBOL(idr_get_new);
  371.  
  372. static void idr_remove_warning(int id)
  373. {
  374.         printk(KERN_WARNING
  375.                 "idr_remove called for id=%d which is not allocated.\n", id);
  376. //   dump_stack();
  377. }
  378.  
  379. static void sub_remove(struct idr *idp, int shift, int id)
  380. {
  381.         struct idr_layer *p = idp->top;
  382.         struct idr_layer **pa[MAX_LEVEL];
  383.         struct idr_layer ***paa = &pa[0];
  384.         struct idr_layer *to_free;
  385.         int n;
  386.  
  387.         *paa = NULL;
  388.         *++paa = &idp->top;
  389.  
  390.         while ((shift > 0) && p) {
  391.                 n = (id >> shift) & IDR_MASK;
  392.                 __clear_bit(n, &p->bitmap);
  393.                 *++paa = &p->ary[n];
  394.                 p = p->ary[n];
  395.                 shift -= IDR_BITS;
  396.         }
  397.         n = id & IDR_MASK;
  398.         if (likely(p != NULL && test_bit(n, &p->bitmap))){
  399.                 __clear_bit(n, &p->bitmap);
  400.                 rcu_assign_pointer(p->ary[n], NULL);
  401.                 to_free = NULL;
  402.                 while(*paa && ! --((**paa)->count)){
  403.                         if (to_free)
  404.                                 free_layer(to_free);
  405.                         to_free = **paa;
  406.                         **paa-- = NULL;
  407.                 }
  408.                 if (!*paa)
  409.                         idp->layers = 0;
  410.                 if (to_free)
  411.                         free_layer(to_free);
  412.         } else
  413.                 idr_remove_warning(id);
  414. }
  415.  
  416. /**
  417.  * idr_remove - remove the given id and free it's slot
  418.  * @idp: idr handle
  419.  * @id: unique key
  420.  */
  421. void idr_remove(struct idr *idp, int id)
  422. {
  423.         struct idr_layer *p;
  424.         struct idr_layer *to_free;
  425.  
  426.         /* Mask off upper bits we don't use for the search. */
  427.         id &= MAX_ID_MASK;
  428.  
  429.         sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  430.         if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  431.             idp->top->ary[0]) {
  432.                 /*
  433.                  * Single child at leftmost slot: we can shrink the tree.
  434.                  * This level is not needed anymore since when layers are
  435.                  * inserted, they are inserted at the top of the existing
  436.                  * tree.
  437.                  */
  438.                 to_free = idp->top;
  439.                 p = idp->top->ary[0];
  440.                 rcu_assign_pointer(idp->top, p);
  441.                 --idp->layers;
  442.                 to_free->bitmap = to_free->count = 0;
  443.                 free_layer(to_free);
  444.         }
  445.         while (idp->id_free_cnt >= IDR_FREE_MAX) {
  446.                 p = get_from_free_list(idp);
  447.                 /*
  448.                  * Note: we don't call the rcu callback here, since the only
  449.                  * layers that fall into the freelist are those that have been
  450.                  * preallocated.
  451.                  */
  452.         kfree(p);
  453.         }
  454.         return;
  455. }
  456. EXPORT_SYMBOL(idr_remove);
  457.  
  458.  
  459. /**
  460.  * idr_remove_all - remove all ids from the given idr tree
  461.  * @idp: idr handle
  462.  *
  463.  * idr_destroy() only frees up unused, cached idp_layers, but this
  464.  * function will remove all id mappings and leave all idp_layers
  465.  * unused.
  466.  *
  467.  * A typical clean-up sequence for objects stored in an idr tree, will
  468.  * use idr_for_each() to free all objects, if necessay, then
  469.  * idr_remove_all() to remove all ids, and idr_destroy() to free
  470.  * up the cached idr_layers.
  471.  */
  472. void idr_remove_all(struct idr *idp)
  473. {
  474.         int n, id, max;
  475.         struct idr_layer *p;
  476.         struct idr_layer *pa[MAX_LEVEL];
  477.         struct idr_layer **paa = &pa[0];
  478.  
  479.         n = idp->layers * IDR_BITS;
  480.         p = idp->top;
  481.         rcu_assign_pointer(idp->top, NULL);
  482.         max = 1 << n;
  483.  
  484.         id = 0;
  485.         while (id < max) {
  486.                 while (n > IDR_BITS && p) {
  487.                         n -= IDR_BITS;
  488.                         *paa++ = p;
  489.                         p = p->ary[(id >> n) & IDR_MASK];
  490.                 }
  491.  
  492.                 id += 1 << n;
  493.                 while (n < fls(id)) {
  494.                         if (p)
  495.                                 free_layer(p);
  496.                         n += IDR_BITS;
  497.                         p = *--paa;
  498.                 }
  499.         }
  500.         idp->layers = 0;
  501. }
  502. EXPORT_SYMBOL(idr_remove_all);
  503.  
  504. /**
  505.  * idr_destroy - release all cached layers within an idr tree
  506.  * idp: idr handle
  507.  */
  508. void idr_destroy(struct idr *idp)
  509. {
  510.         while (idp->id_free_cnt) {
  511.                 struct idr_layer *p = get_from_free_list(idp);
  512.         kfree(p);
  513.         }
  514. }
  515. EXPORT_SYMBOL(idr_destroy);
  516.  
  517.  
  518. /**
  519.  * idr_find - return pointer for given id
  520.  * @idp: idr handle
  521.  * @id: lookup key
  522.  *
  523.  * Return the pointer given the id it has been registered with.  A %NULL
  524.  * return indicates that @id is not valid or you passed %NULL in
  525.  * idr_get_new().
  526.  *
  527.  * This function can be called under rcu_read_lock(), given that the leaf
  528.  * pointers lifetimes are correctly managed.
  529.  */
  530. void *idr_find(struct idr *idp, int id)
  531. {
  532.         int n;
  533.         struct idr_layer *p;
  534.  
  535.         p = rcu_dereference(idp->top);
  536.         if (!p)
  537.                 return NULL;
  538.         n = (p->layer+1) * IDR_BITS;
  539.  
  540.         /* Mask off upper bits we don't use for the search. */
  541.         id &= MAX_ID_MASK;
  542.  
  543.         if (id >= (1 << n))
  544.                 return NULL;
  545.         BUG_ON(n == 0);
  546.  
  547.         while (n > 0 && p) {
  548.                 n -= IDR_BITS;
  549.                 BUG_ON(n != p->layer*IDR_BITS);
  550.                 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  551.         }
  552.         return((void *)p);
  553. }
  554. EXPORT_SYMBOL(idr_find);
  555.  
  556. #if 0
  557. /**
  558.  * idr_for_each - iterate through all stored pointers
  559.  * @idp: idr handle
  560.  * @fn: function to be called for each pointer
  561.  * @data: data passed back to callback function
  562.  *
  563.  * Iterate over the pointers registered with the given idr.  The
  564.  * callback function will be called for each pointer currently
  565.  * registered, passing the id, the pointer and the data pointer passed
  566.  * to this function.  It is not safe to modify the idr tree while in
  567.  * the callback, so functions such as idr_get_new and idr_remove are
  568.  * not allowed.
  569.  *
  570.  * We check the return of @fn each time. If it returns anything other
  571.  * than 0, we break out and return that value.
  572.  *
  573.  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
  574.  */
  575. int idr_for_each(struct idr *idp,
  576.                  int (*fn)(int id, void *p, void *data), void *data)
  577. {
  578.         int n, id, max, error = 0;
  579.         struct idr_layer *p;
  580.         struct idr_layer *pa[MAX_LEVEL];
  581.         struct idr_layer **paa = &pa[0];
  582.  
  583.         n = idp->layers * IDR_BITS;
  584.         p = rcu_dereference(idp->top);
  585.         max = 1 << n;
  586.  
  587.         id = 0;
  588.         while (id < max) {
  589.                 while (n > 0 && p) {
  590.                         n -= IDR_BITS;
  591.                         *paa++ = p;
  592.                         p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  593.                 }
  594.  
  595.                 if (p) {
  596.                         error = fn(id, (void *)p, data);
  597.                         if (error)
  598.                                 break;
  599.                 }
  600.  
  601.                 id += 1 << n;
  602.                 while (n < fls(id)) {
  603.                         n += IDR_BITS;
  604.                         p = *--paa;
  605.                 }
  606.         }
  607.  
  608.         return error;
  609. }
  610. EXPORT_SYMBOL(idr_for_each);
  611.  
  612. /**
  613.  * idr_get_next - lookup next object of id to given id.
  614.  * @idp: idr handle
  615.  * @id:  pointer to lookup key
  616.  *
  617.  * Returns pointer to registered object with id, which is next number to
  618.  * given id.
  619.  */
  620.  
  621. void *idr_get_next(struct idr *idp, int *nextidp)
  622. {
  623.         struct idr_layer *p, *pa[MAX_LEVEL];
  624.         struct idr_layer **paa = &pa[0];
  625.         int id = *nextidp;
  626.         int n, max;
  627.  
  628.         /* find first ent */
  629.         n = idp->layers * IDR_BITS;
  630.         max = 1 << n;
  631.         p = rcu_dereference(idp->top);
  632.         if (!p)
  633.                 return NULL;
  634.  
  635.         while (id < max) {
  636.                 while (n > 0 && p) {
  637.                         n -= IDR_BITS;
  638.                         *paa++ = p;
  639.                         p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  640.                 }
  641.  
  642.                 if (p) {
  643.                         *nextidp = id;
  644.                         return p;
  645.                 }
  646.  
  647.                 id += 1 << n;
  648.                 while (n < fls(id)) {
  649.                         n += IDR_BITS;
  650.                         p = *--paa;
  651.                 }
  652.         }
  653.         return NULL;
  654. }
  655.  
  656.  
  657.  
  658. /**
  659.  * idr_replace - replace pointer for given id
  660.  * @idp: idr handle
  661.  * @ptr: pointer you want associated with the id
  662.  * @id: lookup key
  663.  *
  664.  * Replace the pointer registered with an id and return the old value.
  665.  * A -ENOENT return indicates that @id was not found.
  666.  * A -EINVAL return indicates that @id was not within valid constraints.
  667.  *
  668.  * The caller must serialize with writers.
  669.  */
  670. void *idr_replace(struct idr *idp, void *ptr, int id)
  671. {
  672.         int n;
  673.         struct idr_layer *p, *old_p;
  674.  
  675.         p = idp->top;
  676.         if (!p)
  677.                 return ERR_PTR(-EINVAL);
  678.  
  679.         n = (p->layer+1) * IDR_BITS;
  680.  
  681.         id &= MAX_ID_MASK;
  682.  
  683.         if (id >= (1 << n))
  684.                 return ERR_PTR(-EINVAL);
  685.  
  686.         n -= IDR_BITS;
  687.         while ((n > 0) && p) {
  688.                 p = p->ary[(id >> n) & IDR_MASK];
  689.                 n -= IDR_BITS;
  690.         }
  691.  
  692.         n = id & IDR_MASK;
  693.         if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
  694.                 return ERR_PTR(-ENOENT);
  695.  
  696.         old_p = p->ary[n];
  697.         rcu_assign_pointer(p->ary[n], ptr);
  698.  
  699.         return old_p;
  700. }
  701. EXPORT_SYMBOL(idr_replace);
  702.  
  703.  
  704. #endif
  705.  
  706.  
  707. void idr_init_cache(void)
  708. {
  709.     //idr_layer_cache = kmem_cache_create("idr_layer_cache",
  710.     //           sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
  711. }
  712.  
  713. /**
  714.  * idr_init - initialize idr handle
  715.  * @idp:        idr handle
  716.  *
  717.  * This function is use to set up the handle (@idp) that you will pass
  718.  * to the rest of the functions.
  719.  */
  720. void idr_init(struct idr *idp)
  721. {
  722.         memset(idp, 0, sizeof(struct idr));
  723.  //  spin_lock_init(&idp->lock);
  724. }
  725. EXPORT_SYMBOL(idr_init);
  726.  
  727. #if 0
  728.  
  729. /*
  730.  * IDA - IDR based ID allocator
  731.  *
  732.  * this is id allocator without id -> pointer translation.  Memory
  733.  * usage is much lower than full blown idr because each id only
  734.  * occupies a bit.  ida uses a custom leaf node which contains
  735.  * IDA_BITMAP_BITS slots.
  736.  *
  737.  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
  738.  */
  739.  
  740. static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
  741. {
  742.         unsigned long flags;
  743.  
  744.         if (!ida->free_bitmap) {
  745.                 spin_lock_irqsave(&ida->idr.lock, flags);
  746.                 if (!ida->free_bitmap) {
  747.                         ida->free_bitmap = bitmap;
  748.                         bitmap = NULL;
  749.                 }
  750.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  751.         }
  752.  
  753.         kfree(bitmap);
  754. }
  755.  
  756. /**
  757.  * ida_pre_get - reserve resources for ida allocation
  758.  * @ida:        ida handle
  759.  * @gfp_mask:   memory allocation flag
  760.  *
  761.  * This function should be called prior to locking and calling the
  762.  * following function.  It preallocates enough memory to satisfy the
  763.  * worst possible allocation.
  764.  *
  765.  * If the system is REALLY out of memory this function returns 0,
  766.  * otherwise 1.
  767.  */
  768. int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
  769. {
  770.         /* allocate idr_layers */
  771.         if (!idr_pre_get(&ida->idr, gfp_mask))
  772.                 return 0;
  773.  
  774.         /* allocate free_bitmap */
  775.         if (!ida->free_bitmap) {
  776.                 struct ida_bitmap *bitmap;
  777.  
  778.                 bitmap = kzalloc(sizeof(struct ida_bitmap), gfp_mask);
  779.                 if (!bitmap)
  780.                         return 0;
  781.  
  782.                 free_bitmap(ida, bitmap);
  783.         }
  784.  
  785.         return 1;
  786. }
  787. EXPORT_SYMBOL(ida_pre_get);
  788.  
  789. /**
  790.  * ida_get_new_above - allocate new ID above or equal to a start id
  791.  * @ida:        ida handle
  792.  * @staring_id: id to start search at
  793.  * @p_id:       pointer to the allocated handle
  794.  *
  795.  * Allocate new ID above or equal to @ida.  It should be called with
  796.  * any required locks.
  797.  *
  798.  * If memory is required, it will return -EAGAIN, you should unlock
  799.  * and go back to the ida_pre_get() call.  If the ida is full, it will
  800.  * return -ENOSPC.
  801.  *
  802.  * @p_id returns a value in the range @starting_id ... 0x7fffffff.
  803.  */
  804. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
  805. {
  806.         struct idr_layer *pa[MAX_LEVEL];
  807.         struct ida_bitmap *bitmap;
  808.         unsigned long flags;
  809.         int idr_id = starting_id / IDA_BITMAP_BITS;
  810.         int offset = starting_id % IDA_BITMAP_BITS;
  811.         int t, id;
  812.  
  813.  restart:
  814.         /* get vacant slot */
  815.         t = idr_get_empty_slot(&ida->idr, idr_id, pa);
  816.         if (t < 0)
  817.                 return _idr_rc_to_errno(t);
  818.  
  819.         if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
  820.                 return -ENOSPC;
  821.  
  822.         if (t != idr_id)
  823.                 offset = 0;
  824.         idr_id = t;
  825.  
  826.         /* if bitmap isn't there, create a new one */
  827.         bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
  828.         if (!bitmap) {
  829.                 spin_lock_irqsave(&ida->idr.lock, flags);
  830.                 bitmap = ida->free_bitmap;
  831.                 ida->free_bitmap = NULL;
  832.                 spin_unlock_irqrestore(&ida->idr.lock, flags);
  833.  
  834.                 if (!bitmap)
  835.                         return -EAGAIN;
  836.  
  837.                 memset(bitmap, 0, sizeof(struct ida_bitmap));
  838.                 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
  839.                                 (void *)bitmap);
  840.                 pa[0]->count++;
  841.         }
  842.  
  843.         /* lookup for empty slot */
  844.         t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
  845.         if (t == IDA_BITMAP_BITS) {
  846.                 /* no empty slot after offset, continue to the next chunk */
  847.                 idr_id++;
  848.                 offset = 0;
  849.                 goto restart;
  850.         }
  851.  
  852.         id = idr_id * IDA_BITMAP_BITS + t;
  853.         if (id >= MAX_ID_BIT)
  854.                 return -ENOSPC;
  855.  
  856.         __set_bit(t, bitmap->bitmap);
  857.         if (++bitmap->nr_busy == IDA_BITMAP_BITS)
  858.                 idr_mark_full(pa, idr_id);
  859.  
  860.         *p_id = id;
  861.  
  862.         /* Each leaf node can handle nearly a thousand slots and the
  863.          * whole idea of ida is to have small memory foot print.
  864.          * Throw away extra resources one by one after each successful
  865.          * allocation.
  866.          */
  867.         if (ida->idr.id_free_cnt || ida->free_bitmap) {
  868.                 struct idr_layer *p = get_from_free_list(&ida->idr);
  869.                 if (p)
  870.                         kmem_cache_free(idr_layer_cache, p);
  871.         }
  872.  
  873.         return 0;
  874. }
  875. EXPORT_SYMBOL(ida_get_new_above);
  876.  
  877. /**
  878.  * ida_get_new - allocate new ID
  879.  * @ida:        idr handle
  880.  * @p_id:       pointer to the allocated handle
  881.  *
  882.  * Allocate new ID.  It should be called with any required locks.
  883.  *
  884.  * If memory is required, it will return -EAGAIN, you should unlock
  885.  * and go back to the idr_pre_get() call.  If the idr is full, it will
  886.  * return -ENOSPC.
  887.  *
  888.  * @id returns a value in the range 0 ... 0x7fffffff.
  889.  */
  890. int ida_get_new(struct ida *ida, int *p_id)
  891. {
  892.         return ida_get_new_above(ida, 0, p_id);
  893. }
  894. EXPORT_SYMBOL(ida_get_new);
  895.  
  896. /**
  897.  * ida_remove - remove the given ID
  898.  * @ida:        ida handle
  899.  * @id:         ID to free
  900.  */
  901. void ida_remove(struct ida *ida, int id)
  902. {
  903.         struct idr_layer *p = ida->idr.top;
  904.         int shift = (ida->idr.layers - 1) * IDR_BITS;
  905.         int idr_id = id / IDA_BITMAP_BITS;
  906.         int offset = id % IDA_BITMAP_BITS;
  907.         int n;
  908.         struct ida_bitmap *bitmap;
  909.  
  910.         /* clear full bits while looking up the leaf idr_layer */
  911.         while ((shift > 0) && p) {
  912.                 n = (idr_id >> shift) & IDR_MASK;
  913.                 __clear_bit(n, &p->bitmap);
  914.                 p = p->ary[n];
  915.                 shift -= IDR_BITS;
  916.         }
  917.  
  918.         if (p == NULL)
  919.                 goto err;
  920.  
  921.         n = idr_id & IDR_MASK;
  922.         __clear_bit(n, &p->bitmap);
  923.  
  924.         bitmap = (void *)p->ary[n];
  925.         if (!test_bit(offset, bitmap->bitmap))
  926.                 goto err;
  927.  
  928.         /* update bitmap and remove it if empty */
  929.         __clear_bit(offset, bitmap->bitmap);
  930.         if (--bitmap->nr_busy == 0) {
  931.                 __set_bit(n, &p->bitmap);       /* to please idr_remove() */
  932.                 idr_remove(&ida->idr, idr_id);
  933.                 free_bitmap(ida, bitmap);
  934.         }
  935.  
  936.         return;
  937.  
  938.  err:
  939.         printk(KERN_WARNING
  940.                "ida_remove called for id=%d which is not allocated.\n", id);
  941. }
  942. EXPORT_SYMBOL(ida_remove);
  943.  
  944. /**
  945.  * ida_destroy - release all cached layers within an ida tree
  946.  * ida:         ida handle
  947.  */
  948. void ida_destroy(struct ida *ida)
  949. {
  950.         idr_destroy(&ida->idr);
  951.         kfree(ida->free_bitmap);
  952. }
  953. EXPORT_SYMBOL(ida_destroy);
  954.  
  955. /**
  956.  * ida_init - initialize ida handle
  957.  * @ida:        ida handle
  958.  *
  959.  * This function is use to set up the handle (@ida) that you will pass
  960.  * to the rest of the functions.
  961.  */
  962. void ida_init(struct ida *ida)
  963. {
  964.         memset(ida, 0, sizeof(struct ida));
  965.         idr_init(&ida->idr);
  966.  
  967. }
  968. EXPORT_SYMBOL(ida_init);
  969.  
  970.  
  971. #endif
  972.