Subversion Repositories Kolibri OS

Rev

Rev 1066 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1066 Rev 2971
Line 25... Line 25...
25
 *
25
 *
26
 */
26
 */
27
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
27
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
28
{
28
{
29
  void *data;
29
    void *data;
30
  slab_t *slab;
30
    slab_t *slab;
31
  size_t fsize;
31
    size_t fsize;
32
  unsigned int i;
32
    unsigned int i;
33
  u32_t p;
33
    u32_t p;
34
 
34
 
Line 35... Line 35...
35
    DBG("%s order %d\n", __FUNCTION__, cache->order);
35
    DBG("%s order %d\n", __FUNCTION__, cache->order);
Line 36... Line 36...
36
 
36
 
37
    data = (void*)PA2KA(frame_alloc(1 << cache->order));
37
    data = (void*)PA2KA(frame_alloc(1 << cache->order));
38
  if (!data) {
38
    if (!data) {
39
    return NULL;
39
        return NULL;
40
  }
40
    }
41
  slab = (slab_t*)slab_create();
41
    slab = (slab_t*)slab_create();
42
  if (!slab) {
42
    if (!slab) {
43
        frame_free(KA2PA(data));
43
        frame_free(KA2PA(data));
44
      return NULL;
44
        return NULL;
Line 45... Line 45...
45
  }
45
    }
46
 
46
 
47
  /* Fill in slab structures */
47
  /* Fill in slab structures */
-
 
48
    for (i = 0; i < ((u32_t) 1 << cache->order); i++)
-
 
49
        frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
-
 
50
 
-
 
51
    slab->start = data;
-
 
52
    slab->available = cache->objects;
Line 48... Line -...
48
  for (i = 0; i < ((u32_t) 1 << cache->order); i++)
-
 
49
    frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
-
 
50
 
-
 
51
  slab->start = data;
-
 
52
  slab->available = cache->objects;
-
 
53
  slab->nextavail = (void*)data;
53
    slab->nextavail = (void*)data;
54
  slab->cache = cache;
54
    slab->cache = cache;
55
 
55
 
56
  for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
56
    for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
57
  {
57
    {
58
    *(addr_t *)p = p+cache->size;
58
        *(addr_t *)p = p+cache->size;
59
    p = p+cache->size;
59
        p = p+cache->size;
60
  };
60
    };
Line 61... Line 61...
61
  atomic_inc(&cache->allocated_slabs);
61
    atomic_inc(&cache->allocated_slabs);
62
  return slab;
62
    return slab;
63
}
63
}
Line 176... Line 176...
176
       int (*constructor)(void *obj, int kmflag),
176
       int (*constructor)(void *obj, int kmflag),
177
       int (*destructor)(void *obj),
177
       int (*destructor)(void *obj),
178
       int flags)
178
       int flags)
179
{
179
{
180
  int pages;
180
    int pages;
181
 // ipl_t ipl;
181
 // ipl_t ipl;
182
 
182
 
Line 183... Line 183...
183
//  memsetb((uintptr_t)cache, sizeof(*cache), 0);
183
//  memsetb((uintptr_t)cache, sizeof(*cache), 0);
184
//  cache->name = name;
184
//  cache->name = name;
Line 185... Line 185...
185
 
185
 
186
//if (align < sizeof(unative_t))
186
//if (align < sizeof(unative_t))
187
//    align = sizeof(unative_t);
187
//    align = sizeof(unative_t);
Line 188... Line 188...
188
//  size = ALIGN_UP(size, align);
188
//  size = ALIGN_UP(size, align);
Line 189... Line 189...
189
 
189
 
190
  cache->size = size;
190
    cache->size = size;
191
 
191
 
Line 192... Line 192...
192
//  cache->constructor = constructor;
192
//  cache->constructor = constructor;
193
//  cache->destructor = destructor;
193
//  cache->destructor = destructor;
194
  cache->flags = flags;
194
    cache->flags = flags;
195
 
195
 
196
  list_initialize(&cache->full_slabs);
196
    list_initialize(&cache->full_slabs);
197
  list_initialize(&cache->partial_slabs);
197
    list_initialize(&cache->partial_slabs);
198
  list_initialize(&cache->magazines);
198
    list_initialize(&cache->magazines);
Line 199... Line 199...
199
//  spinlock_initialize(&cache->slablock, "slab_lock");
199
//  spinlock_initialize(&cache->slablock, "slab_lock");
Line 200... Line 200...
200
//  spinlock_initialize(&cache->maglock, "slab_maglock");
200
//  spinlock_initialize(&cache->maglock, "slab_maglock");
201
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
201
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
202
//    make_magcache(cache);
202
//    make_magcache(cache);
203
 
203
 
204
  /* Compute slab sizes, object counts in slabs etc. */
204
  /* Compute slab sizes, object counts in slabs etc. */
205
 
205
 
206
  /* Minimum slab order */
206
  /* Minimum slab order */
Line 207... Line 207...
207
  pages = SIZE2FRAMES(cache->size);
207
    pages = SIZE2FRAMES(cache->size);
208
  /* We need the 2^order >= pages */
208
  /* We need the 2^order >= pages */
209
  if (pages <= 1)
209
    if (pages <= 1)
210
    cache->order = 0;
210
        cache->order = 0;
Line 211... Line 211...
211
  else
211
    else
212
    cache->order = fnzb(pages-1)+1;
212
        cache->order = fnzb(pages-1)+1;
213
 
213
 
Line 214... Line 214...
214
  while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
214
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
Line 215... Line 215...
215
    cache->order += 1;
215
        cache->order += 1;
216
  }
216
    }
217
  cache->objects = comp_objects(cache);
217
    cache->objects = comp_objects(cache);
Line 238... Line 238...
238
 
238
 
Line 239... Line 239...
239
    DBG("%s\n", __FUNCTION__);
239
    DBG("%s\n", __FUNCTION__);
Line 240... Line 240...
240
 
240
 
-
 
241
	cache = (slab_cache_t*)slab_cache_alloc();
241
	cache = (slab_cache_t*)slab_cache_alloc();
242
 
-
 
243
    _slab_cache_create(cache, size, align, constructor, destructor, flags);
242
  _slab_cache_create(cache, size, align, constructor, destructor, flags);
244
 
243
	return cache;
245
	return cache;
Line 244... Line 246...
244
}
246
}
245
 
247
 
Line 329... Line 331...
329
}
331
}
330
 
332
 
Line 331... Line 333...
331
static slab_t *slab_create()
333
static slab_t *slab_create()
332
{
334
{
333
  slab_t *slab;
335
    slab_t *slab;
334
  void *obj;
336
    void *obj;
335
  u32_t p;
337
    u32_t p;
Line 336... Line 338...
336
 
338
 
Line 337... Line 339...
337
    DBG("%s\n", __FUNCTION__);
339
    DBG("%s\n", __FUNCTION__);
Line 388... Line 390...
388
}
390
}
389
 
391
 
Line 390... Line 392...
390
static slab_cache_t * slab_cache_alloc()
392
static slab_cache_t * slab_cache_alloc()
391
{
393
{
392
  slab_t *slab;
394
    slab_t *slab;
393
  void *obj;
395
    void *obj;
394
  u32_t *p;
396
    u32_t *p;
Line 395... Line 397...
395
 
397
 
Line 396... Line 398...
396
    DBG("%s\n", __FUNCTION__);
398
    DBG("%s\n", __FUNCTION__);
397
 
399
 
398
    if (list_empty(&slab_cache_cache.partial_slabs))
400
    if (list_empty(&slab_cache_cache.partial_slabs))
399
    {
401
    {
Line 400... Line 402...
400
//    spinlock_unlock(&cache->slablock);
402
//    spinlock_unlock(&cache->slablock);
401
//    slab = slab_create();
403
//    slab = slab_create();
Line 402... Line 404...
402
 
404
 
403
    void *data;
405
        void *data;
404
    unsigned int i;
406
        unsigned int i;
405
 
407
 
Line 406... Line 408...
406
        data = (void*)(PA2KA(alloc_page()));
408
        data = (void*)(PA2KA(alloc_page()));
Line 407... Line 409...
407
    if (!data) {
409
        if (!data) {
408
      return NULL;
410
            return NULL;
Line 409... Line 411...
409
    }
411
        }
410
 
412
 
411
    slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
413
        slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
412
 
414
 
Line 413... Line 415...
413
    /* Fill in slab structures */
415
    /* Fill in slab structures */
414
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
416
        frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
415
 
417
 
416
    slab->start = data;
418
        slab->start = data;
417
    slab->available = slab_cache_cache.objects;
419
        slab->available = slab_cache_cache.objects;
Line 418... Line 420...
418
    slab->nextavail = (void*)data;
420
        slab->nextavail = (void*)data;
419
    slab->cache = &slab_cache_cache;
421
        slab->cache = &slab_cache_cache;
420
 
422
 
421
    for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
423
        for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
422
    {
424
        {
423
      *p = (u32_t)p+slab_cache_cache.size;
425
            *p = (u32_t)p+slab_cache_cache.size;
424
      p = (u32_t*)((u32_t)p+slab_cache_cache.size);
426
            p = (u32_t*)((u32_t)p+slab_cache_cache.size);
425
    };
427
        };
426
 
428
 
427
 
429
 
428
    atomic_inc(&slab_cache_cache.allocated_slabs);
430
        atomic_inc(&slab_cache_cache.allocated_slabs);
429
//    spinlock_lock(&cache->slablock);
431
//    spinlock_lock(&cache->slablock);
430
    }
432
    }
431
    else {
433
    else {
432
    slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
434
        slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
Line 433... Line 435...
433
    list_remove(&slab->link);
435
        list_remove(&slab->link);
Line 434... Line 436...
434
  }
436
    }
435
  obj = slab->nextavail;
437
    obj = slab->nextavail;
Line 436... Line 438...
436
  slab->nextavail = *((void**)obj);
438
    slab->nextavail = *((void**)obj);
437
  slab->available--;
439
    slab->available--;
438
 
440
 
Line 439... Line 441...
439
  if (!slab->available)
441
    if (!slab->available)
440
    list_prepend(&slab->link, &slab_cache_cache.full_slabs);
442
        list_prepend(&slab->link, &slab_cache_cache.full_slabs);
441
  else
443
    else
Line 442... Line 444...
442
    list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
444
        list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
443
 
445
 
444
//  spinlock_unlock(&cache->slablock);
446
//  spinlock_unlock(&cache->slablock);
445
 
447