Subversion Repositories Kolibri OS

Rev

Rev 5270 | Rev 6102 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * include/linux/idr.h
  3.  *
  4.  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
  5.  *      Copyright (C) 2002 by Concurrent Computer Corporation
  6.  *      Distributed under the GNU GPL license version 2.
  7.  *
  8.  * Small id to pointer translation service avoiding fixed sized
  9.  * tables.
  10.  */
  11.  
  12. #ifndef __IDR_H__
  13. #define __IDR_H__
  14.  
  15. #include <syscall.h>
  16. #include <linux/types.h>
  17. #include <linux/bitops.h>
  18. //#include <linux/init.h>
  19. #include <linux/rcupdate.h>
  20.  
  21. /*
  22.  * We want shallower trees and thus more bits covered at each layer.  8
  23.  * bits gives us large enough first layer for most use cases and maximum
  24.  * tree depth of 4.  Each idr_layer is slightly larger than 2k on 64bit and
  25.  * 1k on 32bit.
  26.  */
  27. #define IDR_BITS 8
  28. #define IDR_SIZE (1 << IDR_BITS)
  29. #define IDR_MASK ((1 << IDR_BITS)-1)
  30.  
  31. struct idr_layer {
  32.         int                     prefix; /* the ID prefix of this idr_layer */
  33.         int                     layer;  /* distance from leaf */
  34.         struct idr_layer __rcu  *ary[1<<IDR_BITS];
  35.         int                     count;  /* When zero, we can release it */
  36.         union {
  37.                 /* A zero bit means "space here" */
  38.                 DECLARE_BITMAP(bitmap, IDR_SIZE);
  39.                 struct rcu_head         rcu_head;
  40.         };
  41. };
  42.  
  43. struct idr {
  44.         struct idr_layer __rcu  *hint;  /* the last layer allocated from */
  45.         struct idr_layer __rcu  *top;
  46.         int                     layers; /* only valid w/o concurrent changes */
  47.         int                     cur;    /* current pos for cyclic allocation */
  48.         spinlock_t              lock;
  49.         int                     id_free_cnt;
  50.         struct idr_layer        *id_free;
  51. };
  52.  
  53. #define IDR_INIT(name)                                                  \
  54. {                                                                       \
  55.         .lock                   = __SPIN_LOCK_UNLOCKED(name.lock),      \
  56. }
  57. #define DEFINE_IDR(name)        struct idr name = IDR_INIT(name)
  58.  
  59. /**
  60.  * DOC: idr sync
  61.  * idr synchronization (stolen from radix-tree.h)
  62.  *
  63.  * idr_find() is able to be called locklessly, using RCU. The caller must
  64.  * ensure calls to this function are made within rcu_read_lock() regions.
  65.  * Other readers (lock-free or otherwise) and modifications may be running
  66.  * concurrently.
  67.  *
  68.  * It is still required that the caller manage the synchronization and
  69.  * lifetimes of the items. So if RCU lock-free lookups are used, typically
  70.  * this would mean that the items have their own locks, or are amenable to
  71.  * lock-free access; and that the items are freed by RCU (or only freed after
  72.  * having been deleted from the idr tree *and* a synchronize_rcu() grace
  73.  * period).
  74.  */
  75.  
  76. /*
  77.  * This is what we export.
  78.  */
  79.  
  80. void *idr_find_slowpath(struct idr *idp, int id);
  81. void idr_preload(gfp_t gfp_mask);
  82. int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
  83. int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
  84. int idr_for_each(struct idr *idp,
  85.                  int (*fn)(int id, void *p, void *data), void *data);
  86. void *idr_get_next(struct idr *idp, int *nextid);
  87. void *idr_replace(struct idr *idp, void *ptr, int id);
  88. void idr_remove(struct idr *idp, int id);
  89. void idr_destroy(struct idr *idp);
  90. void idr_init(struct idr *idp);
  91. bool idr_is_empty(struct idr *idp);
  92.  
  93. /**
  94.  * idr_preload_end - end preload section started with idr_preload()
  95.  *
  96.  * Each idr_preload() should be matched with an invocation of this
  97.  * function.  See idr_preload() for details.
  98.  */
  99. static inline void idr_preload_end(void)
  100. {
  101. //      preempt_enable();
  102. }
  103.  
  104. /**
  105.  * idr_find - return pointer for given id
  106.  * @idr: idr handle
  107.  * @id: lookup key
  108.  *
  109.  * Return the pointer given the id it has been registered with.  A %NULL
  110.  * return indicates that @id is not valid or you passed %NULL in
  111.  * idr_get_new().
  112.  *
  113.  * This function can be called under rcu_read_lock(), given that the leaf
  114.  * pointers lifetimes are correctly managed.
  115.  */
  116. static inline void *idr_find(struct idr *idr, int id)
  117. {
  118.         struct idr_layer *hint = rcu_dereference_raw(idr->hint);
  119.  
  120.         if (hint && (id & ~IDR_MASK) == hint->prefix)
  121.                 return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
  122.  
  123.         return idr_find_slowpath(idr, id);
  124. }
  125.  
  126. /**
  127.  * idr_for_each_entry - iterate over an idr's elements of a given type
  128.  * @idp:     idr handle
  129.  * @entry:   the type * to use as cursor
  130.  * @id:      id entry's key
  131.  *
  132.  * @entry and @id do not need to be initialized before the loop, and
  133.  * after normal terminatinon @entry is left with the value NULL.  This
  134.  * is convenient for a "not found" value.
  135.  */
  136. #define idr_for_each_entry(idp, entry, id)                      \
  137.         for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
  138.  
  139. /*
  140.  * IDA - IDR based id allocator, use when translation from id to
  141.  * pointer isn't necessary.
  142.  *
  143.  * IDA_BITMAP_LONGS is calculated to be one less to accommodate
  144.  * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
  145.  */
  146. #define IDA_CHUNK_SIZE          128     /* 128 bytes per chunk */
  147. #define IDA_BITMAP_LONGS        (IDA_CHUNK_SIZE / sizeof(long) - 1)
  148. #define IDA_BITMAP_BITS         (IDA_BITMAP_LONGS * sizeof(long) * 8)
  149.  
  150. struct ida_bitmap {
  151.         long                    nr_busy;
  152.         unsigned long           bitmap[IDA_BITMAP_LONGS];
  153. };
  154.  
  155. struct ida {
  156.         struct idr              idr;
  157.         struct ida_bitmap       *free_bitmap;
  158. };
  159.  
  160. #define IDA_INIT(name)          { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
  161. #define DEFINE_IDA(name)        struct ida name = IDA_INIT(name)
  162.  
  163. int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
  164. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
  165. void ida_remove(struct ida *ida, int id);
  166. void ida_destroy(struct ida *ida);
  167. void ida_init(struct ida *ida);
  168.  
  169. int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
  170.                    gfp_t gfp_mask);
  171. void ida_simple_remove(struct ida *ida, unsigned int id);
  172.  
  173. /**
  174.  * ida_get_new - allocate new ID
  175.  * @ida:        idr handle
  176.  * @p_id:       pointer to the allocated handle
  177.  *
  178.  * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
  179.  */
  180. static inline int ida_get_new(struct ida *ida, int *p_id)
  181. {
  182.         return ida_get_new_above(ida, 0, p_id);
  183. }
  184.  
  185. void __init idr_init_cache(void);
  186.  
  187. #endif /* __IDR_H__ */
  188.