Rev 5270 | Rev 6102 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1408 | serge | 1 | /* |
2 | * include/linux/idr.h |
||
6082 | serge | 3 | * |
1408 | serge | 4 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
5 | * Copyright (C) 2002 by Concurrent Computer Corporation |
||
6 | * Distributed under the GNU GPL license version 2. |
||
7 | * |
||
8 | * Small id to pointer translation service avoiding fixed sized |
||
9 | * tables. |
||
10 | */ |
||
11 | |||
12 | #ifndef __IDR_H__ |
||
13 | #define __IDR_H__ |
||
14 | |||
3391 | Serge | 15 | #include |
1964 | serge | 16 | #include |
17 | #include |
||
1408 | serge | 18 | //#include |
5270 | serge | 19 | #include |
1408 | serge | 20 | |
3391 | Serge | 21 | /* |
22 | * We want shallower trees and thus more bits covered at each layer. 8 |
||
23 | * bits gives us large enough first layer for most use cases and maximum |
||
24 | * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and |
||
25 | * 1k on 32bit. |
||
26 | */ |
||
27 | #define IDR_BITS 8 |
||
1408 | serge | 28 | #define IDR_SIZE (1 << IDR_BITS) |
29 | #define IDR_MASK ((1 << IDR_BITS)-1) |
||
30 | |||
31 | struct idr_layer { |
||
3391 | Serge | 32 | int prefix; /* the ID prefix of this idr_layer */ |
5056 | serge | 33 | int layer; /* distance from leaf */ |
1964 | serge | 34 | struct idr_layer __rcu *ary[1< |
6082 | serge | 35 | int count; /* When zero, we can release it */ |
5056 | serge | 36 | union { |
37 | /* A zero bit means "space here" */ |
||
38 | DECLARE_BITMAP(bitmap, IDR_SIZE); |
||
6082 | serge | 39 | struct rcu_head rcu_head; |
5056 | serge | 40 | }; |
1408 | serge | 41 | }; |
42 | |||
43 | struct idr { |
||
3391 | Serge | 44 | struct idr_layer __rcu *hint; /* the last layer allocated from */ |
6082 | serge | 45 | struct idr_layer __rcu *top; |
3391 | Serge | 46 | int layers; /* only valid w/o concurrent changes */ |
4065 | Serge | 47 | int cur; /* current pos for cyclic allocation */ |
3391 | Serge | 48 | spinlock_t lock; |
5056 | serge | 49 | int id_free_cnt; |
50 | struct idr_layer *id_free; |
||
1408 | serge | 51 | }; |
52 | |||
6082 | serge | 53 | #define IDR_INIT(name) \ |
54 | { \ |
||
3391 | Serge | 55 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
1408 | serge | 56 | } |
57 | #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) |
||
58 | |||
59 | /** |
||
1964 | serge | 60 | * DOC: idr sync |
1408 | serge | 61 | * idr synchronization (stolen from radix-tree.h) |
62 | * |
||
63 | * idr_find() is able to be called locklessly, using RCU. The caller must |
||
64 | * ensure calls to this function are made within rcu_read_lock() regions. |
||
65 | * Other readers (lock-free or otherwise) and modifications may be running |
||
66 | * concurrently. |
||
67 | * |
||
68 | * It is still required that the caller manage the synchronization and |
||
69 | * lifetimes of the items. So if RCU lock-free lookups are used, typically |
||
70 | * this would mean that the items have their own locks, or are amenable to |
||
71 | * lock-free access; and that the items are freed by RCU (or only freed after |
||
72 | * having been deleted from the idr tree *and* a synchronize_rcu() grace |
||
73 | * period). |
||
74 | */ |
||
75 | |||
76 | /* |
||
77 | * This is what we export. |
||
78 | */ |
||
79 | |||
3391 | Serge | 80 | void *idr_find_slowpath(struct idr *idp, int id); |
81 | void idr_preload(gfp_t gfp_mask); |
||
82 | int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); |
||
4065 | Serge | 83 | int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); |
1408 | serge | 84 | int idr_for_each(struct idr *idp, |
85 | int (*fn)(int id, void *p, void *data), void *data); |
||
86 | void *idr_get_next(struct idr *idp, int *nextid); |
||
87 | void *idr_replace(struct idr *idp, void *ptr, int id); |
||
88 | void idr_remove(struct idr *idp, int id); |
||
89 | void idr_destroy(struct idr *idp); |
||
90 | void idr_init(struct idr *idp); |
||
5056 | serge | 91 | bool idr_is_empty(struct idr *idp); |
1408 | serge | 92 | |
3391 | Serge | 93 | /** |
94 | * idr_preload_end - end preload section started with idr_preload() |
||
95 | * |
||
96 | * Each idr_preload() should be matched with an invocation of this |
||
97 | * function. See idr_preload() for details. |
||
98 | */ |
||
99 | static inline void idr_preload_end(void) |
||
100 | { |
||
101 | // preempt_enable(); |
||
102 | } |
||
1408 | serge | 103 | |
3391 | Serge | 104 | /** |
105 | * idr_find - return pointer for given id |
||
4065 | Serge | 106 | * @idr: idr handle |
3391 | Serge | 107 | * @id: lookup key |
108 | * |
||
109 | * Return the pointer given the id it has been registered with. A %NULL |
||
110 | * return indicates that @id is not valid or you passed %NULL in |
||
111 | * idr_get_new(). |
||
112 | * |
||
113 | * This function can be called under rcu_read_lock(), given that the leaf |
||
114 | * pointers lifetimes are correctly managed. |
||
115 | */ |
||
116 | static inline void *idr_find(struct idr *idr, int id) |
||
117 | { |
||
118 | struct idr_layer *hint = rcu_dereference_raw(idr->hint); |
||
119 | |||
120 | if (hint && (id & ~IDR_MASK) == hint->prefix) |
||
121 | return rcu_dereference_raw(hint->ary[id & IDR_MASK]); |
||
122 | |||
123 | return idr_find_slowpath(idr, id); |
||
124 | } |
||
125 | |||
126 | /** |
||
4065 | Serge | 127 | * idr_for_each_entry - iterate over an idr's elements of a given type |
128 | * @idp: idr handle |
||
129 | * @entry: the type * to use as cursor |
||
130 | * @id: id entry's key |
||
131 | * |
||
132 | * @entry and @id do not need to be initialized before the loop, and |
||
133 | * after normal terminatinon @entry is left with the value NULL. This |
||
134 | * is convenient for a "not found" value. |
||
135 | */ |
||
136 | #define idr_for_each_entry(idp, entry, id) \ |
||
137 | for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) |
||
138 | |||
139 | /* |
||
1408 | serge | 140 | * IDA - IDR based id allocator, use when translation from id to |
141 | * pointer isn't necessary. |
||
1964 | serge | 142 | * |
143 | * IDA_BITMAP_LONGS is calculated to be one less to accommodate |
||
144 | * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. |
||
1408 | serge | 145 | */ |
146 | #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ |
||
1964 | serge | 147 | #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) |
6082 | serge | 148 | #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) |
1408 | serge | 149 | |
150 | struct ida_bitmap { |
||
151 | long nr_busy; |
||
152 | unsigned long bitmap[IDA_BITMAP_LONGS]; |
||
153 | }; |
||
154 | |||
155 | struct ida { |
||
156 | struct idr idr; |
||
157 | struct ida_bitmap *free_bitmap; |
||
158 | }; |
||
159 | |||
3391 | Serge | 160 | #define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } |
1408 | serge | 161 | #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) |
162 | |||
1964 | serge | 163 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask); |
1408 | serge | 164 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); |
165 | void ida_remove(struct ida *ida, int id); |
||
166 | void ida_destroy(struct ida *ida); |
||
167 | void ida_init(struct ida *ida); |
||
168 | |||
4065 | Serge | 169 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
170 | gfp_t gfp_mask); |
||
171 | void ida_simple_remove(struct ida *ida, unsigned int id); |
||
1408 | serge | 172 | |
4065 | Serge | 173 | /** |
174 | * ida_get_new - allocate new ID |
||
175 | * @ida: idr handle |
||
176 | * @p_id: pointer to the allocated handle |
||
177 | * |
||
178 | * Simple wrapper around ida_get_new_above() w/ @starting_id of zero. |
||
179 | */ |
||
180 | static inline int ida_get_new(struct ida *ida, int *p_id) |
||
181 | { |
||
182 | return ida_get_new_above(ida, 0, p_id); |
||
183 | } |
||
3391 | Serge | 184 | |
4065 | Serge | 185 | void __init idr_init_cache(void); |
3391 | Serge | 186 | |
1408 | serge | 187 | #endif /* __IDR_H__ */ |