Subversion Repositories Kolibri OS

Rev

Rev 6102 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * include/linux/idr.h
  3.  *
  4.  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
  5.  *      Copyright (C) 2002 by Concurrent Computer Corporation
  6.  *      Distributed under the GNU GPL license version 2.
  7.  *
  8.  * Small id to pointer translation service avoiding fixed sized
  9.  * tables.
  10.  */
  11.  
  12. #ifndef __IDR_H__
  13. #define __IDR_H__
  14.  
  15. #include <linux/types.h>
  16. #include <linux/bitops.h>
  17. #include <linux/init.h>
  18. #include <linux/rcupdate.h>
  19.  
  20. /*
  21.  * We want shallower trees and thus more bits covered at each layer.  8
  22.  * bits gives us large enough first layer for most use cases and maximum
  23.  * tree depth of 4.  Each idr_layer is slightly larger than 2k on 64bit and
  24.  * 1k on 32bit.
  25.  */
  26. #define IDR_BITS 8
  27. #define IDR_SIZE (1 << IDR_BITS)
  28. #define IDR_MASK ((1 << IDR_BITS)-1)
  29.  
  30. struct idr_layer {
  31.         int                     prefix; /* the ID prefix of this idr_layer */
  32.         int                     layer;  /* distance from leaf */
  33.         struct idr_layer __rcu  *ary[1<<IDR_BITS];
  34.         int                     count;  /* When zero, we can release it */
  35.         union {
  36.                 /* A zero bit means "space here" */
  37.                 DECLARE_BITMAP(bitmap, IDR_SIZE);
  38.                 struct rcu_head         rcu_head;
  39.         };
  40. };
  41.  
  42. struct idr {
  43.         struct idr_layer __rcu  *hint;  /* the last layer allocated from */
  44.         struct idr_layer __rcu  *top;
  45.         int                     layers; /* only valid w/o concurrent changes */
  46.         int                     cur;    /* current pos for cyclic allocation */
  47.         spinlock_t              lock;
  48.         int                     id_free_cnt;
  49.         struct idr_layer        *id_free;
  50. };
  51.  
  52. #define IDR_INIT(name)                                                  \
  53. {                                                                       \
  54.         .lock                   = __SPIN_LOCK_UNLOCKED(name.lock),      \
  55. }
  56. #define DEFINE_IDR(name)        struct idr name = IDR_INIT(name)
  57.  
  58. /**
  59.  * DOC: idr sync
  60.  * idr synchronization (stolen from radix-tree.h)
  61.  *
  62.  * idr_find() is able to be called locklessly, using RCU. The caller must
  63.  * ensure calls to this function are made within rcu_read_lock() regions.
  64.  * Other readers (lock-free or otherwise) and modifications may be running
  65.  * concurrently.
  66.  *
  67.  * It is still required that the caller manage the synchronization and
  68.  * lifetimes of the items. So if RCU lock-free lookups are used, typically
  69.  * this would mean that the items have their own locks, or are amenable to
  70.  * lock-free access; and that the items are freed by RCU (or only freed after
  71.  * having been deleted from the idr tree *and* a synchronize_rcu() grace
  72.  * period).
  73.  */
  74.  
  75. /*
  76.  * This is what we export.
  77.  */
  78.  
  79. void *idr_find_slowpath(struct idr *idp, int id);
  80. void idr_preload(gfp_t gfp_mask);
  81. int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
  82. int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
  83. int idr_for_each(struct idr *idp,
  84.                  int (*fn)(int id, void *p, void *data), void *data);
  85. void *idr_get_next(struct idr *idp, int *nextid);
  86. void *idr_replace(struct idr *idp, void *ptr, int id);
  87. void idr_remove(struct idr *idp, int id);
  88. void idr_destroy(struct idr *idp);
  89. void idr_init(struct idr *idp);
  90. bool idr_is_empty(struct idr *idp);
  91.  
  92. /**
  93.  * idr_preload_end - end preload section started with idr_preload()
  94.  *
  95.  * Each idr_preload() should be matched with an invocation of this
  96.  * function.  See idr_preload() for details.
  97.  */
  98. static inline void idr_preload_end(void)
  99. {
  100. //      preempt_enable();
  101. }
  102.  
  103. /**
  104.  * idr_find - return pointer for given id
  105.  * @idr: idr handle
  106.  * @id: lookup key
  107.  *
  108.  * Return the pointer given the id it has been registered with.  A %NULL
  109.  * return indicates that @id is not valid or you passed %NULL in
  110.  * idr_get_new().
  111.  *
  112.  * This function can be called under rcu_read_lock(), given that the leaf
  113.  * pointers lifetimes are correctly managed.
  114.  */
  115. static inline void *idr_find(struct idr *idr, int id)
  116. {
  117.         struct idr_layer *hint = rcu_dereference_raw(idr->hint);
  118.  
  119.         if (hint && (id & ~IDR_MASK) == hint->prefix)
  120.                 return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
  121.  
  122.         return idr_find_slowpath(idr, id);
  123. }
  124.  
  125. /**
  126.  * idr_for_each_entry - iterate over an idr's elements of a given type
  127.  * @idp:     idr handle
  128.  * @entry:   the type * to use as cursor
  129.  * @id:      id entry's key
  130.  *
  131.  * @entry and @id do not need to be initialized before the loop, and
  132.  * after normal terminatinon @entry is left with the value NULL.  This
  133.  * is convenient for a "not found" value.
  134.  */
  135. #define idr_for_each_entry(idp, entry, id)                      \
  136.         for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
  137.  
  138. /*
  139.  * IDA - IDR based id allocator, use when translation from id to
  140.  * pointer isn't necessary.
  141.  *
  142.  * IDA_BITMAP_LONGS is calculated to be one less to accommodate
  143.  * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
  144.  */
  145. #define IDA_CHUNK_SIZE          128     /* 128 bytes per chunk */
  146. #define IDA_BITMAP_LONGS        (IDA_CHUNK_SIZE / sizeof(long) - 1)
  147. #define IDA_BITMAP_BITS         (IDA_BITMAP_LONGS * sizeof(long) * 8)
  148.  
  149. struct ida_bitmap {
  150.         long                    nr_busy;
  151.         unsigned long           bitmap[IDA_BITMAP_LONGS];
  152. };
  153.  
  154. struct ida {
  155.         struct idr              idr;
  156.         struct ida_bitmap       *free_bitmap;
  157. };
  158.  
  159. #define IDA_INIT(name)          { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
  160. #define DEFINE_IDA(name)        struct ida name = IDA_INIT(name)
  161.  
  162. int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
  163. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
  164. void ida_remove(struct ida *ida, int id);
  165. void ida_destroy(struct ida *ida);
  166. void ida_init(struct ida *ida);
  167.  
  168. int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
  169.                    gfp_t gfp_mask);
  170. void ida_simple_remove(struct ida *ida, unsigned int id);
  171.  
  172. /**
  173.  * ida_get_new - allocate new ID
  174.  * @ida:        idr handle
  175.  * @p_id:       pointer to the allocated handle
  176.  *
  177.  * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
  178.  */
  179. static inline int ida_get_new(struct ida *ida, int *p_id)
  180. {
  181.         return ida_get_new_above(ida, 0, p_id);
  182. }
  183.  
  184. void __init idr_init_cache(void);
  185.  
  186. #endif /* __IDR_H__ */
  187.