Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5362 | serge | 1 | /* |
2 | * Copyright (c) 2007 Intel Corporation. All Rights Reserved. |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the |
||
6 | * "Software"), to deal in the Software without restriction, including |
||
7 | * without limitation the rights to use, copy, modify, merge, publish, |
||
8 | * distribute, sub license, and/or sell copies of the Software, and to |
||
9 | * permit persons to whom the Software is furnished to do so, subject to |
||
10 | * the following conditions: |
||
11 | * |
||
12 | * The above copyright notice and this permission notice (including the |
||
13 | * next paragraph) shall be included in all copies or substantial portions |
||
14 | * of the Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
||
17 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
||
18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
||
19 | * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR |
||
20 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
||
21 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
||
22 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
23 | */ |
||
24 | |||
25 | #include |
||
26 | #include |
||
27 | #include "object_heap.h" |
||
28 | |||
29 | #define ASSERT assert |
||
30 | |||
31 | #define LAST_FREE -1 |
||
32 | #define ALLOCATED -2 |
||
33 | |||
34 | /* |
||
35 | * Expands the heap |
||
36 | * Return 0 on success, -1 on error |
||
37 | */ |
||
38 | static int |
||
39 | object_heap_expand(object_heap_p heap) |
||
40 | { |
||
41 | int i; |
||
42 | void *new_heap_index; |
||
43 | int next_free; |
||
44 | int new_heap_size = heap->heap_size + heap->heap_increment; |
||
45 | int bucket_index = new_heap_size / heap->heap_increment - 1; |
||
46 | |||
47 | if (bucket_index >= heap->num_buckets) { |
||
48 | int new_num_buckets = heap->num_buckets + 8; |
||
49 | void **new_bucket; |
||
50 | |||
51 | new_bucket = realloc(heap->bucket, new_num_buckets * sizeof(void *)); |
||
52 | if (NULL == new_bucket) { |
||
53 | return -1; |
||
54 | } |
||
55 | |||
56 | heap->num_buckets = new_num_buckets; |
||
57 | heap->bucket = new_bucket; |
||
58 | } |
||
59 | |||
60 | new_heap_index = (void *) malloc(heap->heap_increment * heap->object_size); |
||
61 | if (NULL == new_heap_index) { |
||
62 | return -1; /* Out of memory */ |
||
63 | } |
||
64 | |||
65 | heap->bucket[bucket_index] = new_heap_index; |
||
66 | next_free = heap->next_free; |
||
67 | for (i = new_heap_size; i-- > heap->heap_size;) { |
||
68 | object_base_p obj = (object_base_p)(new_heap_index + (i - heap->heap_size) * heap->object_size); |
||
69 | obj->id = i + heap->id_offset; |
||
70 | obj->next_free = next_free; |
||
71 | next_free = i; |
||
72 | } |
||
73 | heap->next_free = next_free; |
||
74 | heap->heap_size = new_heap_size; |
||
75 | return 0; /* Success */ |
||
76 | } |
||
77 | |||
78 | /* |
||
79 | * Return 0 on success, -1 on error |
||
80 | */ |
||
81 | int |
||
82 | object_heap_init(object_heap_p heap, int object_size, int id_offset) |
||
83 | { |
||
84 | pthread_mutex_init(&heap->mutex, NULL); |
||
85 | heap->object_size = object_size; |
||
86 | heap->id_offset = id_offset & OBJECT_HEAP_OFFSET_MASK; |
||
87 | heap->heap_size = 0; |
||
88 | heap->heap_increment = 16; |
||
89 | heap->next_free = LAST_FREE; |
||
90 | heap->num_buckets = 0; |
||
91 | heap->bucket = NULL; |
||
92 | return object_heap_expand(heap); |
||
93 | } |
||
94 | |||
95 | /* |
||
96 | * Allocates an object |
||
97 | * Returns the object ID on success, returns -1 on error |
||
98 | */ |
||
99 | static int |
||
100 | object_heap_allocate_unlocked(object_heap_p heap) |
||
101 | { |
||
102 | object_base_p obj; |
||
103 | int bucket_index, obj_index; |
||
104 | |||
105 | if (LAST_FREE == heap->next_free) { |
||
106 | if (-1 == object_heap_expand(heap)) { |
||
107 | return -1; /* Out of memory */ |
||
108 | } |
||
109 | } |
||
110 | ASSERT(heap->next_free >= 0); |
||
111 | |||
112 | bucket_index = heap->next_free / heap->heap_increment; |
||
113 | obj_index = heap->next_free % heap->heap_increment; |
||
114 | |||
115 | obj = (object_base_p)(heap->bucket[bucket_index] + obj_index * heap->object_size); |
||
116 | heap->next_free = obj->next_free; |
||
117 | obj->next_free = ALLOCATED; |
||
118 | return obj->id; |
||
119 | } |
||
120 | |||
121 | int |
||
122 | object_heap_allocate(object_heap_p heap) |
||
123 | { |
||
124 | int ret; |
||
125 | |||
126 | pthread_mutex_lock(&heap->mutex); |
||
127 | ret = object_heap_allocate_unlocked(heap); |
||
128 | pthread_mutex_unlock(&heap->mutex); |
||
129 | return ret; |
||
130 | } |
||
131 | |||
132 | /* |
||
133 | * Lookup an object by object ID |
||
134 | * Returns a pointer to the object on success, returns NULL on error |
||
135 | */ |
||
136 | static object_base_p |
||
137 | object_heap_lookup_unlocked(object_heap_p heap, int id) |
||
138 | { |
||
139 | object_base_p obj; |
||
140 | int bucket_index, obj_index; |
||
141 | |||
142 | if ((id < heap->id_offset) || (id > (heap->heap_size + heap->id_offset))) { |
||
143 | return NULL; |
||
144 | } |
||
145 | id &= OBJECT_HEAP_ID_MASK; |
||
146 | bucket_index = id / heap->heap_increment; |
||
147 | obj_index = id % heap->heap_increment; |
||
148 | obj = (object_base_p)(heap->bucket[bucket_index] + obj_index * heap->object_size); |
||
149 | |||
150 | /* Check if the object has in fact been allocated */ |
||
151 | if (obj->next_free != ALLOCATED) { |
||
152 | return NULL; |
||
153 | } |
||
154 | return obj; |
||
155 | } |
||
156 | |||
157 | object_base_p |
||
158 | object_heap_lookup(object_heap_p heap, int id) |
||
159 | { |
||
160 | object_base_p obj; |
||
161 | |||
162 | pthread_mutex_lock(&heap->mutex); |
||
163 | obj = object_heap_lookup_unlocked(heap, id); |
||
164 | pthread_mutex_unlock(&heap->mutex); |
||
165 | return obj; |
||
166 | } |
||
167 | |||
168 | /* |
||
169 | * Iterate over all objects in the heap. |
||
170 | * Returns a pointer to the first object on the heap, returns NULL if heap is empty. |
||
171 | */ |
||
172 | object_base_p |
||
173 | object_heap_first(object_heap_p heap, object_heap_iterator *iter) |
||
174 | { |
||
175 | *iter = -1; |
||
176 | return object_heap_next(heap, iter); |
||
177 | } |
||
178 | |||
179 | /* |
||
180 | * Iterate over all objects in the heap. |
||
181 | * Returns a pointer to the next object on the heap, returns NULL if heap is empty. |
||
182 | */ |
||
183 | static object_base_p |
||
184 | object_heap_next_unlocked(object_heap_p heap, object_heap_iterator *iter) |
||
185 | { |
||
186 | object_base_p obj; |
||
187 | int bucket_index, obj_index; |
||
188 | int i = *iter + 1; |
||
189 | |||
190 | while (i < heap->heap_size) { |
||
191 | bucket_index = i / heap->heap_increment; |
||
192 | obj_index = i % heap->heap_increment; |
||
193 | |||
194 | obj = (object_base_p)(heap->bucket[bucket_index] + obj_index * heap->object_size); |
||
195 | if (obj->next_free == ALLOCATED) { |
||
196 | *iter = i; |
||
197 | return obj; |
||
198 | } |
||
199 | i++; |
||
200 | } |
||
201 | *iter = i; |
||
202 | return NULL; |
||
203 | } |
||
204 | |||
205 | object_base_p |
||
206 | object_heap_next(object_heap_p heap, object_heap_iterator *iter) |
||
207 | { |
||
208 | object_base_p obj; |
||
209 | |||
210 | pthread_mutex_lock(&heap->mutex); |
||
211 | obj = object_heap_next_unlocked(heap, iter); |
||
212 | pthread_mutex_unlock(&heap->mutex); |
||
213 | return obj; |
||
214 | } |
||
215 | |||
216 | /* |
||
217 | * Frees an object |
||
218 | */ |
||
219 | static void |
||
220 | object_heap_free_unlocked(object_heap_p heap, object_base_p obj) |
||
221 | { |
||
222 | /* Check if the object has in fact been allocated */ |
||
223 | ASSERT(obj->next_free == ALLOCATED); |
||
224 | |||
225 | obj->next_free = heap->next_free; |
||
226 | heap->next_free = obj->id & OBJECT_HEAP_ID_MASK; |
||
227 | } |
||
228 | |||
229 | void |
||
230 | object_heap_free(object_heap_p heap, object_base_p obj) |
||
231 | { |
||
232 | if (!obj) |
||
233 | return; |
||
234 | pthread_mutex_lock(&heap->mutex); |
||
235 | object_heap_free_unlocked(heap, obj); |
||
236 | pthread_mutex_unlock(&heap->mutex); |
||
237 | } |
||
238 | |||
239 | /* |
||
240 | * Destroys a heap, the heap must be empty. |
||
241 | */ |
||
242 | void |
||
243 | object_heap_destroy(object_heap_p heap) |
||
244 | { |
||
245 | object_base_p obj; |
||
246 | int bucket_index, obj_index, i; |
||
247 | |||
248 | /* Check if heap is empty */ |
||
249 | for (i = 0; i < heap->heap_size; i++) { |
||
250 | /* Check if object is not still allocated */ |
||
251 | bucket_index = i / heap->heap_increment; |
||
252 | obj_index = i % heap->heap_increment; |
||
253 | obj = (object_base_p)(heap->bucket[bucket_index] + obj_index * heap->object_size); |
||
254 | ASSERT(obj->next_free != ALLOCATED); |
||
255 | } |
||
256 | |||
257 | for (i = 0; i < heap->heap_size / heap->heap_increment; i++) { |
||
258 | free(heap->bucket[i]); |
||
259 | } |
||
260 | |||
261 | pthread_mutex_destroy(&heap->mutex); |
||
262 | |||
263 | free(heap->bucket); |
||
264 | heap->bucket = NULL; |
||
265 | heap->heap_size = 0; |
||
266 | heap->next_free = LAST_FREE; |
||
267 | }>>>> |