Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
3770 Serge 1
/**************************************************************************
2
 *
3
 * Copyright 2008 VMware, Inc.
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
/**
29
 * @file
30
 * Improved cache implementation.
31
 *
32
 * Fixed size array with linear probing on collision and LRU eviction
33
 * on full.
34
 *
35
 * @author Jose Fonseca 
36
 */
37
 
38
 
39
#include "pipe/p_compiler.h"
40
#include "util/u_debug.h"
41
 
42
#include "util/u_math.h"
43
#include "util/u_memory.h"
44
#include "util/u_cache.h"
45
#include "util/u_simple_list.h"
46
 
47
 
48
struct util_cache_entry
49
{
50
   enum { EMPTY = 0, FILLED, DELETED } state;
51
   uint32_t hash;
52
 
53
   struct util_cache_entry *next;
54
   struct util_cache_entry *prev;
55
 
56
   void *key;
57
   void *value;
58
 
59
#ifdef DEBUG
60
   unsigned count;
61
#endif
62
};
63
 
64
 
65
struct util_cache
66
{
67
   /** Hash function */
68
   uint32_t (*hash)(const void *key);
69
 
70
   /** Compare two keys */
71
   int (*compare)(const void *key1, const void *key2);
72
 
73
   /** Destroy a (key, value) pair */
74
   void (*destroy)(void *key, void *value);
75
 
76
   uint32_t size;
77
 
78
   struct util_cache_entry *entries;
79
 
80
   unsigned count;
81
   struct util_cache_entry lru;
82
};
83
 
84
static void
85
ensure_sanity(const struct util_cache *cache);
86
 
87
#define CACHE_DEFAULT_ALPHA 2
88
 
89
struct util_cache *
90
util_cache_create(uint32_t (*hash)(const void *key),
91
                  int (*compare)(const void *key1, const void *key2),
92
                  void (*destroy)(void *key, void *value),
93
                  uint32_t size)
94
{
95
   struct util_cache *cache;
96
 
97
   cache = CALLOC_STRUCT(util_cache);
98
   if(!cache)
99
      return NULL;
100
 
101
   cache->hash = hash;
102
   cache->compare = compare;
103
   cache->destroy = destroy;
104
 
105
   make_empty_list(&cache->lru);
106
 
107
   size *= CACHE_DEFAULT_ALPHA;
108
   cache->size = size;
109
 
110
   cache->entries = CALLOC(size, sizeof(struct util_cache_entry));
111
   if(!cache->entries) {
112
      FREE(cache);
113
      return NULL;
114
   }
115
 
116
   ensure_sanity(cache);
117
   return cache;
118
}
119
 
120
 
121
static struct util_cache_entry *
122
util_cache_entry_get(struct util_cache *cache,
123
                     uint32_t hash,
124
                     const void *key)
125
{
126
   struct util_cache_entry *first_unfilled = NULL;
127
   uint32_t index = hash % cache->size;
128
   uint32_t probe;
129
 
130
   /* Probe until we find either a matching FILLED entry or an EMPTY
131
    * slot (which has never been occupied).
132
    *
133
    * Deleted or non-matching slots are not indicative of completion
134
    * as a previous linear probe for the same key could have continued
135
    * past this point.
136
    */
137
   for (probe = 0; probe < cache->size; probe++) {
138
      uint32_t i = (index + probe) % cache->size;
139
      struct util_cache_entry *current = &cache->entries[i];
140
 
141
      if (current->state == FILLED) {
142
         if (current->hash == hash &&
143
             cache->compare(key, current->key) == 0)
144
            return current;
145
      }
146
      else {
147
         if (!first_unfilled)
148
            first_unfilled = current;
149
 
150
         if (current->state == EMPTY)
151
            return first_unfilled;
152
      }
153
   }
154
 
155
   return NULL;
156
}
157
 
158
static INLINE void
159
util_cache_entry_destroy(struct util_cache *cache,
160
                         struct util_cache_entry *entry)
161
{
162
   void *key = entry->key;
163
   void *value = entry->value;
164
 
165
   entry->key = NULL;
166
   entry->value = NULL;
167
 
168
   if (entry->state == FILLED) {
169
      remove_from_list(entry);
170
      cache->count--;
171
 
172
      if(cache->destroy)
173
         cache->destroy(key, value);
174
 
175
      entry->state = DELETED;
176
   }
177
}
178
 
179
 
180
void
181
util_cache_set(struct util_cache *cache,
182
               void *key,
183
               void *value)
184
{
185
   struct util_cache_entry *entry;
186
   uint32_t hash;
187
 
188
   assert(cache);
189
   if (!cache)
190
      return;
191
   hash = cache->hash(key);
192
   entry = util_cache_entry_get(cache, hash, key);
193
   if (!entry)
194
      entry = cache->lru.prev;
195
 
196
   if (cache->count >= cache->size / CACHE_DEFAULT_ALPHA)
197
      util_cache_entry_destroy(cache, cache->lru.prev);
198
 
199
   util_cache_entry_destroy(cache, entry);
200
 
201
#ifdef DEBUG
202
   ++entry->count;
203
#endif
204
 
205
   entry->key = key;
206
   entry->hash = hash;
207
   entry->value = value;
208
   entry->state = FILLED;
209
   insert_at_head(&cache->lru, entry);
210
   cache->count++;
211
 
212
   ensure_sanity(cache);
213
}
214
 
215
 
216
void *
217
util_cache_get(struct util_cache *cache,
218
               const void *key)
219
{
220
   struct util_cache_entry *entry;
221
   uint32_t hash;
222
 
223
   assert(cache);
224
   if (!cache)
225
      return NULL;
226
   hash = cache->hash(key);
227
   entry = util_cache_entry_get(cache, hash, key);
228
   if (!entry)
229
      return NULL;
230
 
231
   if (entry->state == FILLED)
232
      move_to_head(&cache->lru, entry);
233
 
234
   return entry->value;
235
}
236
 
237
 
238
void
239
util_cache_clear(struct util_cache *cache)
240
{
241
   uint32_t i;
242
 
243
   assert(cache);
244
   if (!cache)
245
      return;
246
 
247
   for(i = 0; i < cache->size; ++i) {
248
      util_cache_entry_destroy(cache, &cache->entries[i]);
249
      cache->entries[i].state = EMPTY;
250
   }
251
 
252
   assert(cache->count == 0);
253
   assert(is_empty_list(&cache->lru));
254
   ensure_sanity(cache);
255
}
256
 
257
 
258
void
259
util_cache_destroy(struct util_cache *cache)
260
{
261
   assert(cache);
262
   if (!cache)
263
      return;
264
 
265
#ifdef DEBUG
266
   if(cache->count >= 20*cache->size) {
267
      /* Normal approximation of the Poisson distribution */
268
      double mean = (double)cache->count/(double)cache->size;
269
      double stddev = sqrt(mean);
270
      unsigned i;
271
      for(i = 0; i < cache->size; ++i) {
272
         double z = fabs(cache->entries[i].count - mean)/stddev;
273
         /* This assert should not fail 99.9999998027% of the times, unless
274
          * the hash function is a poor one */
275
         assert(z <= 6.0);
276
      }
277
   }
278
#endif
279
 
280
   util_cache_clear(cache);
281
 
282
   FREE(cache->entries);
283
   FREE(cache);
284
}
285
 
286
 
287
void
288
util_cache_remove(struct util_cache *cache,
289
                  const void *key)
290
{
291
   struct util_cache_entry *entry;
292
   uint32_t hash;
293
 
294
   assert(cache);
295
   if (!cache)
296
      return;
297
 
298
   hash = cache->hash(key);
299
 
300
   entry = util_cache_entry_get(cache, hash, key);
301
   if (!entry)
302
      return;
303
 
304
   if (entry->state == FILLED)
305
      util_cache_entry_destroy(cache, entry);
306
 
307
   ensure_sanity(cache);
308
}
309
 
310
 
311
static void
312
ensure_sanity(const struct util_cache *cache)
313
{
314
#ifdef DEBUG
315
   unsigned i, cnt = 0;
316
 
317
   assert(cache);
318
   for (i = 0; i < cache->size; i++) {
319
      struct util_cache_entry *header = &cache->entries[i];
320
 
321
      assert(header);
322
      assert(header->state == FILLED ||
323
             header->state == EMPTY ||
324
             header->state == DELETED);
325
      if (header->state == FILLED) {
326
         cnt++;
327
         assert(header->hash == cache->hash(header->key));
328
      }
329
   }
330
 
331
   assert(cnt == cache->count);
332
   assert(cache->size >= cnt);
333
 
334
   if (cache->count == 0) {
335
      assert (is_empty_list(&cache->lru));
336
   }
337
   else {
338
      struct util_cache_entry *header = cache->lru.next;
339
 
340
      assert (header);
341
      assert (!is_empty_list(&cache->lru));
342
 
343
      for (i = 0; i < cache->count; i++)
344
         header = header->next;
345
 
346
      assert(header == &cache->lru);
347
   }
348
#endif
349
 
350
   (void)cache;
351
}