Subversion Repositories Kolibri OS

Rev

Rev 886 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 886 Rev 1066
Line 31... Line 31...
31
  size_t fsize;
31
  size_t fsize;
32
  unsigned int i;
32
  unsigned int i;
33
  u32_t p;
33
  u32_t p;
34
 
34
 
Line -... Line 35...
-
 
35
    DBG("%s order %d\n", __FUNCTION__, cache->order);
-
 
36
 
35
  data = (void*)PA2KA(core_alloc(cache->order));
37
    data = (void*)PA2KA(frame_alloc(1 << cache->order));
36
  if (!data) {
38
  if (!data) {
37
    return NULL;
39
    return NULL;
38
  }
40
  }
39
  slab = (slab_t*)slab_create();
41
  slab = (slab_t*)slab_create();
40
  if (!slab) {
42
  if (!slab) {
41
    core_free(KA2PA(data));
43
        frame_free(KA2PA(data));
42
      return NULL;
44
      return NULL;
43
  }
45
  }
Line 44... Line 46...
44
 
46
 
45
  /* Fill in slab structures */
47
  /* Fill in slab structures */
Line 72... Line 74...
72
 
74
 
Line 73... Line 75...
73
  spinlock_lock(&cache->slablock);
75
  spinlock_lock(&cache->slablock);
Line 74... Line 76...
74
 
76
 
75
  if (list_empty(&cache->partial_slabs)) {
-
 
76
    /* Allow recursion and reclaiming
-
 
77
     * - this should work, as the slab control structures
-
 
78
     *   are small and do not need to allocate with anything
-
 
79
     *   other than frame_alloc when they are allocating,
-
 
80
     *   that's why we should get recursion at most 1-level deep
-
 
81
     */
77
  if (list_empty(&cache->partial_slabs)) {
82
    slab = slab_space_alloc(cache, flags);
78
    slab = slab_space_alloc(cache, flags);
83
    if (!slab)
79
    if (!slab)
84
    {
80
    {
85
      spinlock_unlock(&cache->slablock);
81
      spinlock_unlock(&cache->slablock);
Line 209... Line 205...
209
 
205
 
Line 210... Line 206...
210
  /* Minimum slab order */
206
  /* Minimum slab order */
211
  pages = SIZE2FRAMES(cache->size);
207
  pages = SIZE2FRAMES(cache->size);
212
  /* We need the 2^order >= pages */
208
  /* We need the 2^order >= pages */
213
  if (pages == 1)
209
  if (pages <= 1)
214
    cache->order = 0;
210
    cache->order = 0;
215
  else
211
  else
216
    cache->order = fnzb(pages-1)+1;
212
    cache->order = fnzb(pages-1)+1;
Line 217... Line 213...
217
 
213
 
Line 239... Line 235...
239
				 int flags)
235
				 int flags)
240
{
236
{
241
	slab_cache_t *cache;
237
	slab_cache_t *cache;
242
 
238
 
Line -... Line 239...
-
 
239
    DBG("%s\n", __FUNCTION__);
-
 
240
 
243
	cache = (slab_cache_t*)slab_cache_alloc();
241
	cache = (slab_cache_t*)slab_cache_alloc();
244
  _slab_cache_create(cache, size, align, constructor, destructor, flags);
242
  _slab_cache_create(cache, size, align, constructor, destructor, flags);
245
	return cache;
243
	return cache;
246
}
244
}
Line 335... Line 333...
335
  slab_t *slab;
333
  slab_t *slab;
336
  void *obj;
334
  void *obj;
337
  u32_t p;
335
  u32_t p;
338
 
336
 
Line -... Line 337...
-
 
337
    DBG("%s\n", __FUNCTION__);
-
 
338
 
339
//  spinlock_lock(&cache->slablock);
339
//  spinlock_lock(&cache->slablock);
Line 340... Line 340...
340
 
340
 
341
  if (list_empty(&slab_cache->partial_slabs)) {
-
 
342
    /* Allow recursion and reclaiming
-
 
343
     * - this should work, as the slab control structures
-
 
344
     *   are small and do not need to allocate with anything
-
 
345
     *   other than frame_alloc when they are allocating,
-
 
346
     *   that's why we should get recursion at most 1-level deep
-
 
347
     */
341
  if (list_empty(&slab_cache->partial_slabs)) {
348
//    spinlock_unlock(&cache->slablock);
342
//    spinlock_unlock(&cache->slablock);
Line 349... Line 343...
349
//    slab = slab_create();
343
//    slab = slab_create();
350
 
344
 
Line 351... Line 345...
351
    void *data;
345
    void *data;
352
    unsigned int i;
346
    unsigned int i;
353
 
347
 
354
    data = (void*)PA2KA(core_alloc(0));
348
    data = (void*)PA2KA(alloc_page());
Line 355... Line 349...
355
    if (!data) {
349
    if (!data) {
Line 398... Line 392...
398
  slab_t *slab;
392
  slab_t *slab;
399
  void *obj;
393
  void *obj;
400
  u32_t *p;
394
  u32_t *p;
401
 
395
 
Line 402... Line -...
402
  if (list_empty(&slab_cache_cache.partial_slabs)) {
-
 
403
    /* Allow recursion and reclaiming
396
    DBG("%s\n", __FUNCTION__);
404
     * - this should work, as the slab control structures
-
 
405
     *   are small and do not need to allocate with anything
-
 
-
 
397
 
406
     *   other than frame_alloc when they are allocating,
398
    if (list_empty(&slab_cache_cache.partial_slabs))
407
     *   that's why we should get recursion at most 1-level deep
-
 
408
     */
399
    {
409
//    spinlock_unlock(&cache->slablock);
400
//    spinlock_unlock(&cache->slablock);
410
//    slab = slab_create();
401
//    slab = slab_create();
Line 411... Line 402...
411
 
402
 
412
    void *data;
403
    void *data;
Line 413... Line 404...
413
    unsigned int i;
404
    unsigned int i;
414
 
405
 
415
    data = (void*)(PA2KA(core_alloc(0)));
406
        data = (void*)(PA2KA(alloc_page()));
416
    if (!data) {
407
    if (!data) {
Line 417... Line 408...
417
      return NULL;
408
      return NULL;
Line 435... Line 426...
435
 
426
 
Line 436... Line 427...
436
 
427
 
437
    atomic_inc(&slab_cache_cache.allocated_slabs);
428
    atomic_inc(&slab_cache_cache.allocated_slabs);
-
 
429
//    spinlock_lock(&cache->slablock);
438
//    spinlock_lock(&cache->slablock);
430
    }
439
  } else {
431
    else {
440
    slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
432
    slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
441
    list_remove(&slab->link);
433
    list_remove(&slab->link);
442
  }
434
  }
443
  obj = slab->nextavail;
435
  obj = slab->nextavail;
Line 455... Line 447...
455
}
447
}
456
 
448
 
Line 457... Line 449...
457
void slab_cache_init(void)
449
void slab_cache_init(void)
458
{
450
{
-
 
451
    DBG("%s\n", __FUNCTION__);
Line 459... Line 452...
459
 
452
 
460
  _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
453
  _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
461
                     sizeof(void *), NULL, NULL,
454
                     sizeof(void *), NULL, NULL,