Subversion Repositories Kolibri OS

Rev

Rev 1066 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
859 serge 1
 
2
#include 
3
#include 
4
#include 
5
#include 
6
#include 
7
8
 
9
 
10
11
 
12
 
13
14
 
15
16
 
17
18
 
19
20
 
21
22
 
23
 
24
 * Allocate frames for slab space and initialize
25
 *
26
 */
27
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
28
{
29
    void *data;
2971 Serge 30
    slab_t *slab;
31
    size_t fsize;
32
    unsigned int i;
33
    u32_t p;
34
859 serge 35
 
1066 serge 36
37
 
38
    if (!data) {
2971 Serge 39
        return NULL;
40
    }
41
    slab = (slab_t*)slab_create();
42
    if (!slab) {
43
        frame_free(KA2PA(data));
1066 serge 44
        return NULL;
2971 Serge 45
    }
46
859 serge 47
 
48
    for (i = 0; i < ((u32_t) 1 << cache->order); i++)
2971 Serge 49
        frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
50
859 serge 51
 
2971 Serge 52
    slab->available = cache->objects;
53
    slab->nextavail = (void*)data;
54
    slab->cache = cache;
55
859 serge 56
 
2971 Serge 57
    {
58
        *(addr_t *)p = p+cache->size;
59
        p = p+cache->size;
60
    };
61
    atomic_inc(&cache->allocated_slabs);
62
    return slab;
63
}
859 serge 64
65
 
66
 * Take new object from slab or create new if needed
67
 *
68
 * @return Object address or null
69
 */
70
static void * slab_obj_create(slab_cache_t *cache, int flags)
71
{
72
  slab_t *slab;
73
  void *obj;
74
75
 
76
77
 
78
    slab = slab_space_alloc(cache, flags);
79
    if (!slab)
80
    {
81
      spinlock_unlock(&cache->slablock);
82
      return NULL;
83
    }
84
  } else {
85
    slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
86
    list_remove(&slab->link);
87
  }
88
89
 
90
  slab->nextavail = *(void**)obj;
91
  slab->available--;
92
93
 
94
    list_prepend(&slab->link, &cache->full_slabs);
95
  else
96
    list_prepend(&slab->link, &cache->partial_slabs);
97
98
 
99
100
 
101
    /* Bad, bad, construction failed */
102
//    slab_obj_destroy(cache, obj, slab);
103
//    return NULL;
104
//  }
105
  return obj;
106
}
107
108
 
109
 
110
static slab_t * obj2slab(void *obj)
111
{
112
  return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
113
}
114
115
 
116
 
117
    memory */
118
void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
119
{
120
   eflags_t efl;
121
   void *result = NULL;
122
123
 
124
   efl = safe_cli();
125
126
 
127
 //   result = magazine_obj_get(cache);
128
 // }
129
//  if (!result)
130
    result = slab_obj_create(cache, flags);
131
132
 
133
134
 
135
//    atomic_inc(&cache->allocated_objs);
136
137
 
138
}
139
140
 
141
 
142
 
143
/* Slab cache functions */
144
145
 
146
static unsigned int comp_objects(slab_cache_t *cache)
147
{
148
  if (cache->flags & SLAB_CACHE_SLINSIDE)
149
    return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
150
  else
151
    return (PAGE_SIZE << cache->order) / cache->size;
152
}
153
154
 
155
static unsigned int badness(slab_cache_t *cache)
156
{
157
  unsigned int objects;
158
  unsigned int ssize;
159
  size_t val;
160
161
 
162
  ssize = PAGE_SIZE << cache->order;
163
  if (cache->flags & SLAB_CACHE_SLINSIDE)
164
    ssize -= sizeof(slab_t);
165
  val = ssize - objects * cache->size;
166
  return val;
167
168
 
169
170
 
171
 
172
static void
173
_slab_cache_create(slab_cache_t *cache,
174
       size_t size,
175
       size_t align,
176
       int (*constructor)(void *obj, int kmflag),
177
       int (*destructor)(void *obj),
178
       int flags)
179
{
180
    int pages;
2971 Serge 181
 // ipl_t ipl;
859 serge 182
183
 
184
//  cache->name = name;
185
186
 
187
//    align = sizeof(unative_t);
188
//  size = ALIGN_UP(size, align);
189
190
 
2971 Serge 191
859 serge 192
 
193
//  cache->destructor = destructor;
194
    cache->flags = flags;
2971 Serge 195
859 serge 196
 
2971 Serge 197
    list_initialize(&cache->partial_slabs);
198
    list_initialize(&cache->magazines);
199
//  spinlock_initialize(&cache->slablock, "slab_lock");
859 serge 200
//  spinlock_initialize(&cache->maglock, "slab_maglock");
201
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
202
//    make_magcache(cache);
203
204
 
205
206
 
207
    pages = SIZE2FRAMES(cache->size);
2971 Serge 208
  /* We need the 2^order >= pages */
859 serge 209
    if (pages <= 1)
2971 Serge 210
        cache->order = 0;
211
    else
212
        cache->order = fnzb(pages-1)+1;
213
859 serge 214
 
2971 Serge 215
        cache->order += 1;
216
    }
217
    cache->objects = comp_objects(cache);
218
859 serge 219
 
220
//  ipl = interrupts_disable();
221
//  spinlock_lock(&slab_cache_lock);
222
223
 
2971 Serge 224
859 serge 225
 
226
//  interrupts_restore(ipl);
227
}
228
229
 
230
slab_cache_t * slab_cache_create(
231
				 size_t size,
232
				 size_t align,
233
				 int (*constructor)(void *obj, int kmflag),
234
				 int (*destructor)(void *obj),
235
				 int flags)
236
{
237
	slab_cache_t *cache;
238
239
 
1066 serge 240
241
 
859 serge 242
2971 Serge 243
 
244
245
 
859 serge 246
}
247
248
 
249
 * Deallocate space associated with slab
250
 *
251
 * @return number of freed frames
252
 */
253
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
254
{
255
	frame_free(KA2PA(slab->start));
256
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
257
    slab_free(slab_cache, slab);
258
259
 
260
261
 
262
}
263
264
 
265
 * Return object to slab and call a destructor
266
 *
267
 * @param slab If the caller knows directly slab of the object, otherwise NULL
268
 *
269
 * @return Number of freed pages
270
 */
271
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
272
				slab_t *slab)
273
{
274
	int freed = 0;
275
276
 
277
		slab = obj2slab(obj);
278
279
 
280
281
 
282
//    freed = cache->destructor(obj);
283
284
 
285
//	ASSERT(slab->available < cache->objects);
286
287
 
288
	slab->nextavail = obj;
289
	slab->available++;
290
291
 
292
	if (slab->available == cache->objects) {
293
		/* Free associated memory */
294
		list_remove(&slab->link);
295
//		spinlock_unlock(&cache->slablock);
296
297
 
298
299
 
300
		/* It was in full, move to partial */
301
		list_remove(&slab->link);
302
		list_prepend(&slab->link, &cache->partial_slabs);
303
	}
304
//	spinlock_unlock(&cache->slablock);
305
	return freed;
306
}
307
308
 
309
 
310
 
311
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
312
{
313
   eflags_t efl;
886 serge 314
859 serge 315
 
886 serge 316
859 serge 317
 
318
//      || magazine_obj_put(cache, obj)) {
886 serge 319
859 serge 320
 
321
322
 
323
  safe_sti(efl);
886 serge 324
  atomic_dec(&cache->allocated_objs);
325
}
859 serge 326
327
 
328
void __fastcall slab_free(slab_cache_t *cache, void *obj)
886 serge 329
{
859 serge 330
	_slab_free(cache, obj, NULL);
331
}
332
333
 
334
{
335
    slab_t *slab;
2971 Serge 336
    void *obj;
337
    u32_t p;
338
859 serge 339
 
1066 serge 340
341
 
859 serge 342
343
 
344
//    spinlock_unlock(&cache->slablock);
345
//    slab = slab_create();
346
347
 
348
    unsigned int i;
349
350
 
1066 serge 351
    if (!data) {
859 serge 352
      return NULL;
353
    }
354
355
 
356
357
 
358
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
359
360
 
361
    slab->available = slab_cache->objects;
362
    slab->nextavail = (void*)data;
363
    slab->cache = slab_cache;
364
365
 
366
    {
367
      *(int *)p = p+slab_cache->size;
368
      p = p+slab_cache->size;
369
    };
370
371
 
372
 
373
//    spinlock_lock(&cache->slablock);
374
  } else {
375
    slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
376
    list_remove(&slab->link);
377
  }
378
  obj = slab->nextavail;
379
  slab->nextavail = *((void**)obj);
380
  slab->available--;
381
382
 
383
    list_prepend(&slab->link, &slab_cache->full_slabs);
384
  else
385
    list_prepend(&slab->link, &slab_cache->partial_slabs);
386
387
 
388
389
 
390
}
391
392
 
393
{
394
    slab_t *slab;
2971 Serge 395
    void *obj;
396
    u32_t *p;
397
859 serge 398
 
1066 serge 399
400
 
401
    {
402
//    spinlock_unlock(&cache->slablock);
859 serge 403
//    slab = slab_create();
404
405
 
2971 Serge 406
        unsigned int i;
407
859 serge 408
 
1066 serge 409
        if (!data) {
2971 Serge 410
            return NULL;
411
        }
412
859 serge 413
 
2971 Serge 414
859 serge 415
 
416
        frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
2971 Serge 417
859 serge 418
 
2971 Serge 419
        slab->available = slab_cache_cache.objects;
420
        slab->nextavail = (void*)data;
421
        slab->cache = &slab_cache_cache;
422
859 serge 423
 
2971 Serge 424
        {
425
            *p = (u32_t)p+slab_cache_cache.size;
426
            p = (u32_t*)((u32_t)p+slab_cache_cache.size);
427
        };
428
859 serge 429
 
430
 
2971 Serge 431
//    spinlock_lock(&cache->slablock);
859 serge 432
    }
1066 serge 433
    else {
434
        slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
2971 Serge 435
        list_remove(&slab->link);
436
    }
437
    obj = slab->nextavail;
438
    slab->nextavail = *((void**)obj);
439
    slab->available--;
440
859 serge 441
 
2971 Serge 442
        list_prepend(&slab->link, &slab_cache_cache.full_slabs);
443
    else
444
        list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
445
859 serge 446
 
447
448
 
2971 Serge 449
}
859 serge 450
451
 
452
{
453
    DBG("%s\n", __FUNCTION__);
1066 serge 454
859 serge 455
 
2971 Serge 456
                     sizeof(void *), NULL, NULL,
859 serge 457
                     SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
458
459
 
460
    slab_cache = slab_cache_create(sizeof(slab_t),
2971 Serge 461
					      0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
859 serge 462
};
463