Subversion Repositories Kolibri OS

Rev

Rev 4065 | Rev 5270 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * include/linux/idr.h
  3.  *
  4.  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
  5.  *      Copyright (C) 2002 by Concurrent Computer Corporation
  6.  *      Distributed under the GNU GPL license version 2.
  7.  *
  8.  * Small id to pointer translation service avoiding fixed sized
  9.  * tables.
  10.  */
  11.  
  12. #ifndef __IDR_H__
  13. #define __IDR_H__
  14.  
  15. #include <syscall.h>
  16. #include <linux/types.h>
  17. #include <errno-base.h>
  18. #include <linux/bitops.h>
  19. //#include <linux/init.h>
  20. //#include <linux/rcupdate.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/bitmap.h>
  23. #include <linux/bug.h>
  24.  
  25.  
  26. /*
  27.  * We want shallower trees and thus more bits covered at each layer.  8
  28.  * bits gives us large enough first layer for most use cases and maximum
  29.  * tree depth of 4.  Each idr_layer is slightly larger than 2k on 64bit and
  30.  * 1k on 32bit.
  31.  */
  32. #define IDR_BITS 8
  33. #define IDR_SIZE (1 << IDR_BITS)
  34. #define IDR_MASK ((1 << IDR_BITS)-1)
  35.  
  36. struct idr_layer {
  37.         int                     prefix; /* the ID prefix of this idr_layer */
  38.         int                     layer;  /* distance from leaf */
  39.         struct idr_layer __rcu  *ary[1<<IDR_BITS];
  40.         int                      count;  /* When zero, we can release it */
  41.         union {
  42.                 /* A zero bit means "space here" */
  43.                 DECLARE_BITMAP(bitmap, IDR_SIZE);
  44.     struct rcu_head      rcu_head;
  45.         };
  46. };
  47.  
  48. struct idr {
  49.         struct idr_layer __rcu  *hint;  /* the last layer allocated from */
  50.         struct idr_layer __rcu *top;
  51.         int                     layers; /* only valid w/o concurrent changes */
  52.         int                     cur;    /* current pos for cyclic allocation */
  53.         spinlock_t              lock;
  54.         int                     id_free_cnt;
  55.         struct idr_layer        *id_free;
  56. };
  57.  
  58. #define IDR_INIT(name)                                          \
  59. {                                                               \
  60.         .lock                   = __SPIN_LOCK_UNLOCKED(name.lock),      \
  61. }
  62. #define DEFINE_IDR(name)        struct idr name = IDR_INIT(name)
  63.  
  64. /**
  65.  * DOC: idr sync
  66.  * idr synchronization (stolen from radix-tree.h)
  67.  *
  68.  * idr_find() is able to be called locklessly, using RCU. The caller must
  69.  * ensure calls to this function are made within rcu_read_lock() regions.
  70.  * Other readers (lock-free or otherwise) and modifications may be running
  71.  * concurrently.
  72.  *
  73.  * It is still required that the caller manage the synchronization and
  74.  * lifetimes of the items. So if RCU lock-free lookups are used, typically
  75.  * this would mean that the items have their own locks, or are amenable to
  76.  * lock-free access; and that the items are freed by RCU (or only freed after
  77.  * having been deleted from the idr tree *and* a synchronize_rcu() grace
  78.  * period).
  79.  */
  80.  
  81. /*
  82.  * This is what we export.
  83.  */
  84.  
  85. void *idr_find_slowpath(struct idr *idp, int id);
  86. void idr_preload(gfp_t gfp_mask);
  87. int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
  88. int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
  89. int idr_for_each(struct idr *idp,
  90.                  int (*fn)(int id, void *p, void *data), void *data);
  91. void *idr_get_next(struct idr *idp, int *nextid);
  92. void *idr_replace(struct idr *idp, void *ptr, int id);
  93. void idr_remove(struct idr *idp, int id);
  94. void idr_destroy(struct idr *idp);
  95. void idr_init(struct idr *idp);
  96. bool idr_is_empty(struct idr *idp);
  97.  
  98. /**
  99.  * idr_preload_end - end preload section started with idr_preload()
  100.  *
  101.  * Each idr_preload() should be matched with an invocation of this
  102.  * function.  See idr_preload() for details.
  103.  */
  104. static inline void idr_preload_end(void)
  105. {
  106. //      preempt_enable();
  107. }
  108.  
  109. /**
  110.  * idr_find - return pointer for given id
  111.  * @idr: idr handle
  112.  * @id: lookup key
  113.  *
  114.  * Return the pointer given the id it has been registered with.  A %NULL
  115.  * return indicates that @id is not valid or you passed %NULL in
  116.  * idr_get_new().
  117.  *
  118.  * This function can be called under rcu_read_lock(), given that the leaf
  119.  * pointers lifetimes are correctly managed.
  120.  */
  121. static inline void *idr_find(struct idr *idr, int id)
  122. {
  123.         struct idr_layer *hint = rcu_dereference_raw(idr->hint);
  124.  
  125.         if (hint && (id & ~IDR_MASK) == hint->prefix)
  126.                 return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
  127.  
  128.         return idr_find_slowpath(idr, id);
  129. }
  130.  
  131. /**
  132.  * idr_for_each_entry - iterate over an idr's elements of a given type
  133.  * @idp:     idr handle
  134.  * @entry:   the type * to use as cursor
  135.  * @id:      id entry's key
  136.  *
  137.  * @entry and @id do not need to be initialized before the loop, and
  138.  * after normal terminatinon @entry is left with the value NULL.  This
  139.  * is convenient for a "not found" value.
  140.  */
  141. #define idr_for_each_entry(idp, entry, id)                      \
  142.         for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
  143.  
  144. /*
  145.  * IDA - IDR based id allocator, use when translation from id to
  146.  * pointer isn't necessary.
  147.  *
  148.  * IDA_BITMAP_LONGS is calculated to be one less to accommodate
  149.  * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
  150.  */
  151. #define IDA_CHUNK_SIZE          128     /* 128 bytes per chunk */
  152. #define IDA_BITMAP_LONGS        (IDA_CHUNK_SIZE / sizeof(long) - 1)
  153. #define IDA_BITMAP_BITS         (IDA_BITMAP_LONGS * sizeof(long) * 8)
  154.  
  155. struct ida_bitmap {
  156.         long                    nr_busy;
  157.         unsigned long           bitmap[IDA_BITMAP_LONGS];
  158. };
  159.  
  160. struct ida {
  161.         struct idr              idr;
  162.         struct ida_bitmap       *free_bitmap;
  163. };
  164.  
  165. #define IDA_INIT(name)          { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
  166. #define DEFINE_IDA(name)        struct ida name = IDA_INIT(name)
  167.  
  168. int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
  169. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
  170. void ida_remove(struct ida *ida, int id);
  171. void ida_destroy(struct ida *ida);
  172. void ida_init(struct ida *ida);
  173.  
  174. int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
  175.                    gfp_t gfp_mask);
  176. void ida_simple_remove(struct ida *ida, unsigned int id);
  177.  
  178. /**
  179.  * ida_get_new - allocate new ID
  180.  * @ida:        idr handle
  181.  * @p_id:       pointer to the allocated handle
  182.  *
  183.  * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
  184.  */
  185. static inline int ida_get_new(struct ida *ida, int *p_id)
  186. {
  187.         return ida_get_new_above(ida, 0, p_id);
  188. }
  189.  
  190. void __init idr_init_cache(void);
  191.  
  192. #endif /* __IDR_H__ */
  193.