Subversion Repositories Kolibri OS

Rev

Rev 1066 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1066 Rev 2971
1
#include 
1
#include 
2
#include 
2
#include 
3
#include 
3
#include 
4
#include 
4
#include 
5
#include 
5
#include 
6
#include 
6
#include 
7
 
7
 
8
 
8
 
9
extern zone_t z_core;
9
extern zone_t z_core;
10
 
10
 
11
 
11
 
12
static LIST_INITIALIZE(slab_cache_list);
12
static LIST_INITIALIZE(slab_cache_list);
13
 
13
 
14
static slab_cache_t *slab_cache;
14
static slab_cache_t *slab_cache;
15
 
15
 
16
static slab_cache_t slab_cache_cache;
16
static slab_cache_t slab_cache_cache;
17
 
17
 
18
static slab_t *slab_create();
18
static slab_t *slab_create();
19
 
19
 
20
static slab_cache_t * slab_cache_alloc();
20
static slab_cache_t * slab_cache_alloc();
21
 
21
 
22
 
22
 
23
/**
23
/**
24
 * Allocate frames for slab space and initialize
24
 * Allocate frames for slab space and initialize
25
 *
25
 *
26
 */
26
 */
27
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
27
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
28
{
28
{
29
  void *data;
29
    void *data;
30
  slab_t *slab;
30
    slab_t *slab;
31
  size_t fsize;
31
    size_t fsize;
32
  unsigned int i;
32
    unsigned int i;
33
  u32_t p;
33
    u32_t p;
34
 
34
 
35
    DBG("%s order %d\n", __FUNCTION__, cache->order);
35
    DBG("%s order %d\n", __FUNCTION__, cache->order);
36
 
36
 
37
    data = (void*)PA2KA(frame_alloc(1 << cache->order));
37
    data = (void*)PA2KA(frame_alloc(1 << cache->order));
38
  if (!data) {
38
    if (!data) {
39
    return NULL;
39
        return NULL;
40
  }
40
    }
41
  slab = (slab_t*)slab_create();
41
    slab = (slab_t*)slab_create();
42
  if (!slab) {
42
    if (!slab) {
43
        frame_free(KA2PA(data));
43
        frame_free(KA2PA(data));
44
      return NULL;
44
        return NULL;
45
  }
45
    }
46
 
46
 
47
  /* Fill in slab structures */
47
  /* Fill in slab structures */
48
  for (i = 0; i < ((u32_t) 1 << cache->order); i++)
48
    for (i = 0; i < ((u32_t) 1 << cache->order); i++)
49
    frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
49
        frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
50
 
50
 
51
  slab->start = data;
51
    slab->start = data;
52
  slab->available = cache->objects;
52
    slab->available = cache->objects;
53
  slab->nextavail = (void*)data;
53
    slab->nextavail = (void*)data;
54
  slab->cache = cache;
54
    slab->cache = cache;
55
 
55
 
56
  for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
56
    for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
57
  {
57
    {
58
    *(addr_t *)p = p+cache->size;
58
        *(addr_t *)p = p+cache->size;
59
    p = p+cache->size;
59
        p = p+cache->size;
60
  };
60
    };
61
  atomic_inc(&cache->allocated_slabs);
61
    atomic_inc(&cache->allocated_slabs);
62
  return slab;
62
    return slab;
63
}
63
}
64
 
64
 
65
/**
65
/**
66
 * Take new object from slab or create new if needed
66
 * Take new object from slab or create new if needed
67
 *
67
 *
68
 * @return Object address or null
68
 * @return Object address or null
69
 */
69
 */
70
static void * slab_obj_create(slab_cache_t *cache, int flags)
70
static void * slab_obj_create(slab_cache_t *cache, int flags)
71
{
71
{
72
  slab_t *slab;
72
  slab_t *slab;
73
  void *obj;
73
  void *obj;
74
 
74
 
75
  spinlock_lock(&cache->slablock);
75
  spinlock_lock(&cache->slablock);
76
 
76
 
77
  if (list_empty(&cache->partial_slabs)) {
77
  if (list_empty(&cache->partial_slabs)) {
78
    slab = slab_space_alloc(cache, flags);
78
    slab = slab_space_alloc(cache, flags);
79
    if (!slab)
79
    if (!slab)
80
    {
80
    {
81
      spinlock_unlock(&cache->slablock);
81
      spinlock_unlock(&cache->slablock);
82
      return NULL;
82
      return NULL;
83
    }
83
    }
84
  } else {
84
  } else {
85
    slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
85
    slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
86
    list_remove(&slab->link);
86
    list_remove(&slab->link);
87
  }
87
  }
88
 
88
 
89
  obj = slab->nextavail;
89
  obj = slab->nextavail;
90
  slab->nextavail = *(void**)obj;
90
  slab->nextavail = *(void**)obj;
91
  slab->available--;
91
  slab->available--;
92
 
92
 
93
  if (!slab->available)
93
  if (!slab->available)
94
    list_prepend(&slab->link, &cache->full_slabs);
94
    list_prepend(&slab->link, &cache->full_slabs);
95
  else
95
  else
96
    list_prepend(&slab->link, &cache->partial_slabs);
96
    list_prepend(&slab->link, &cache->partial_slabs);
97
 
97
 
98
  spinlock_unlock(&cache->slablock);
98
  spinlock_unlock(&cache->slablock);
99
 
99
 
100
//  if (cache->constructor && cache->constructor(obj, flags)) {
100
//  if (cache->constructor && cache->constructor(obj, flags)) {
101
    /* Bad, bad, construction failed */
101
    /* Bad, bad, construction failed */
102
//    slab_obj_destroy(cache, obj, slab);
102
//    slab_obj_destroy(cache, obj, slab);
103
//    return NULL;
103
//    return NULL;
104
//  }
104
//  }
105
  return obj;
105
  return obj;
106
}
106
}
107
 
107
 
108
 
108
 
109
/** Map object to slab structure */
109
/** Map object to slab structure */
110
static slab_t * obj2slab(void *obj)
110
static slab_t * obj2slab(void *obj)
111
{
111
{
112
  return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
112
  return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
113
}
113
}
114
 
114
 
115
 
115
 
116
/** Allocate new object from cache - if no flags given, always returns
116
/** Allocate new object from cache - if no flags given, always returns
117
    memory */
117
    memory */
118
void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
118
void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
119
{
119
{
120
   eflags_t efl;
120
   eflags_t efl;
121
   void *result = NULL;
121
   void *result = NULL;
122
 
122
 
123
  /* Disable interrupts to avoid deadlocks with interrupt handlers */
123
  /* Disable interrupts to avoid deadlocks with interrupt handlers */
124
   efl = safe_cli();
124
   efl = safe_cli();
125
 
125
 
126
 // if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
126
 // if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
127
 //   result = magazine_obj_get(cache);
127
 //   result = magazine_obj_get(cache);
128
 // }
128
 // }
129
//  if (!result)
129
//  if (!result)
130
    result = slab_obj_create(cache, flags);
130
    result = slab_obj_create(cache, flags);
131
 
131
 
132
   safe_sti(efl);
132
   safe_sti(efl);
133
 
133
 
134
//  if (result)
134
//  if (result)
135
//    atomic_inc(&cache->allocated_objs);
135
//    atomic_inc(&cache->allocated_objs);
136
 
136
 
137
  return result;
137
  return result;
138
}
138
}
139
 
139
 
140
 
140
 
141
 
141
 
142
/**************************************/
142
/**************************************/
143
/* Slab cache functions */
143
/* Slab cache functions */
144
 
144
 
145
/** Return number of objects that fit in certain cache size */
145
/** Return number of objects that fit in certain cache size */
146
static unsigned int comp_objects(slab_cache_t *cache)
146
static unsigned int comp_objects(slab_cache_t *cache)
147
{
147
{
148
  if (cache->flags & SLAB_CACHE_SLINSIDE)
148
  if (cache->flags & SLAB_CACHE_SLINSIDE)
149
    return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
149
    return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
150
  else
150
  else
151
    return (PAGE_SIZE << cache->order) / cache->size;
151
    return (PAGE_SIZE << cache->order) / cache->size;
152
}
152
}
153
 
153
 
154
/** Return wasted space in slab */
154
/** Return wasted space in slab */
155
static unsigned int badness(slab_cache_t *cache)
155
static unsigned int badness(slab_cache_t *cache)
156
{
156
{
157
  unsigned int objects;
157
  unsigned int objects;
158
  unsigned int ssize;
158
  unsigned int ssize;
159
  size_t val;
159
  size_t val;
160
 
160
 
161
  objects = comp_objects(cache);
161
  objects = comp_objects(cache);
162
  ssize = PAGE_SIZE << cache->order;
162
  ssize = PAGE_SIZE << cache->order;
163
  if (cache->flags & SLAB_CACHE_SLINSIDE)
163
  if (cache->flags & SLAB_CACHE_SLINSIDE)
164
    ssize -= sizeof(slab_t);
164
    ssize -= sizeof(slab_t);
165
  val = ssize - objects * cache->size;
165
  val = ssize - objects * cache->size;
166
  return val;
166
  return val;
167
 
167
 
168
}
168
}
169
 
169
 
170
 
170
 
171
/** Initialize allocated memory as a slab cache */
171
/** Initialize allocated memory as a slab cache */
172
static void
172
static void
173
_slab_cache_create(slab_cache_t *cache,
173
_slab_cache_create(slab_cache_t *cache,
174
       size_t size,
174
       size_t size,
175
       size_t align,
175
       size_t align,
176
       int (*constructor)(void *obj, int kmflag),
176
       int (*constructor)(void *obj, int kmflag),
177
       int (*destructor)(void *obj),
177
       int (*destructor)(void *obj),
178
       int flags)
178
       int flags)
179
{
179
{
180
  int pages;
180
    int pages;
181
 // ipl_t ipl;
181
 // ipl_t ipl;
182
 
182
 
183
//  memsetb((uintptr_t)cache, sizeof(*cache), 0);
183
//  memsetb((uintptr_t)cache, sizeof(*cache), 0);
184
//  cache->name = name;
184
//  cache->name = name;
185
 
185
 
186
//if (align < sizeof(unative_t))
186
//if (align < sizeof(unative_t))
187
//    align = sizeof(unative_t);
187
//    align = sizeof(unative_t);
188
//  size = ALIGN_UP(size, align);
188
//  size = ALIGN_UP(size, align);
189
 
189
 
190
  cache->size = size;
190
    cache->size = size;
191
 
191
 
192
//  cache->constructor = constructor;
192
//  cache->constructor = constructor;
193
//  cache->destructor = destructor;
193
//  cache->destructor = destructor;
194
  cache->flags = flags;
194
    cache->flags = flags;
195
 
195
 
196
  list_initialize(&cache->full_slabs);
196
    list_initialize(&cache->full_slabs);
197
  list_initialize(&cache->partial_slabs);
197
    list_initialize(&cache->partial_slabs);
198
  list_initialize(&cache->magazines);
198
    list_initialize(&cache->magazines);
199
//  spinlock_initialize(&cache->slablock, "slab_lock");
199
//  spinlock_initialize(&cache->slablock, "slab_lock");
200
//  spinlock_initialize(&cache->maglock, "slab_maglock");
200
//  spinlock_initialize(&cache->maglock, "slab_maglock");
201
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
201
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
202
//    make_magcache(cache);
202
//    make_magcache(cache);
203
 
203
 
204
  /* Compute slab sizes, object counts in slabs etc. */
204
  /* Compute slab sizes, object counts in slabs etc. */
205
 
205
 
206
  /* Minimum slab order */
206
  /* Minimum slab order */
207
  pages = SIZE2FRAMES(cache->size);
207
    pages = SIZE2FRAMES(cache->size);
208
  /* We need the 2^order >= pages */
208
  /* We need the 2^order >= pages */
209
  if (pages <= 1)
209
    if (pages <= 1)
210
    cache->order = 0;
210
        cache->order = 0;
211
  else
211
    else
212
    cache->order = fnzb(pages-1)+1;
212
        cache->order = fnzb(pages-1)+1;
213
 
213
 
214
  while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
214
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
215
    cache->order += 1;
215
        cache->order += 1;
216
  }
216
    }
217
  cache->objects = comp_objects(cache);
217
    cache->objects = comp_objects(cache);
218
 
218
 
219
  /* Add cache to cache list */
219
  /* Add cache to cache list */
220
//  ipl = interrupts_disable();
220
//  ipl = interrupts_disable();
221
//  spinlock_lock(&slab_cache_lock);
221
//  spinlock_lock(&slab_cache_lock);
222
 
222
 
223
  list_append(&cache->link, &slab_cache_list);
223
    list_append(&cache->link, &slab_cache_list);
224
 
224
 
225
//  spinlock_unlock(&slab_cache_lock);
225
//  spinlock_unlock(&slab_cache_lock);
226
//  interrupts_restore(ipl);
226
//  interrupts_restore(ipl);
227
}
227
}
228
 
228
 
229
/** Create slab cache  */
229
/** Create slab cache  */
230
slab_cache_t * slab_cache_create(
230
slab_cache_t * slab_cache_create(
231
				 size_t size,
231
				 size_t size,
232
				 size_t align,
232
				 size_t align,
233
				 int (*constructor)(void *obj, int kmflag),
233
				 int (*constructor)(void *obj, int kmflag),
234
				 int (*destructor)(void *obj),
234
				 int (*destructor)(void *obj),
235
				 int flags)
235
				 int flags)
236
{
236
{
237
	slab_cache_t *cache;
237
	slab_cache_t *cache;
238
 
238
 
239
    DBG("%s\n", __FUNCTION__);
239
    DBG("%s\n", __FUNCTION__);
240
 
240
 
241
	cache = (slab_cache_t*)slab_cache_alloc();
241
	cache = (slab_cache_t*)slab_cache_alloc();
242
  _slab_cache_create(cache, size, align, constructor, destructor, flags);
242
 
-
 
243
    _slab_cache_create(cache, size, align, constructor, destructor, flags);
243
	return cache;
244
 
-
 
245
	return cache;
244
}
246
}
245
 
247
 
246
/**
248
/**
247
 * Deallocate space associated with slab
249
 * Deallocate space associated with slab
248
 *
250
 *
249
 * @return number of freed frames
251
 * @return number of freed frames
250
 */
252
 */
251
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
253
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
252
{
254
{
253
	frame_free(KA2PA(slab->start));
255
	frame_free(KA2PA(slab->start));
254
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
256
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
255
    slab_free(slab_cache, slab);
257
    slab_free(slab_cache, slab);
256
 
258
 
257
//	atomic_dec(&cache->allocated_slabs);
259
//	atomic_dec(&cache->allocated_slabs);
258
 
260
 
259
	return 1 << cache->order;
261
	return 1 << cache->order;
260
}
262
}
261
 
263
 
262
/**
264
/**
263
 * Return object to slab and call a destructor
265
 * Return object to slab and call a destructor
264
 *
266
 *
265
 * @param slab If the caller knows directly slab of the object, otherwise NULL
267
 * @param slab If the caller knows directly slab of the object, otherwise NULL
266
 *
268
 *
267
 * @return Number of freed pages
269
 * @return Number of freed pages
268
 */
270
 */
269
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
271
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
270
				slab_t *slab)
272
				slab_t *slab)
271
{
273
{
272
	int freed = 0;
274
	int freed = 0;
273
 
275
 
274
	if (!slab)
276
	if (!slab)
275
		slab = obj2slab(obj);
277
		slab = obj2slab(obj);
276
 
278
 
277
//	ASSERT(slab->cache == cache);
279
//	ASSERT(slab->cache == cache);
278
 
280
 
279
//  if (cache->destructor)
281
//  if (cache->destructor)
280
//    freed = cache->destructor(obj);
282
//    freed = cache->destructor(obj);
281
 
283
 
282
//	spinlock_lock(&cache->slablock);
284
//	spinlock_lock(&cache->slablock);
283
//	ASSERT(slab->available < cache->objects);
285
//	ASSERT(slab->available < cache->objects);
284
 
286
 
285
	*(void**)obj = slab->nextavail;
287
	*(void**)obj = slab->nextavail;
286
	slab->nextavail = obj;
288
	slab->nextavail = obj;
287
	slab->available++;
289
	slab->available++;
288
 
290
 
289
	/* Move it to correct list */
291
	/* Move it to correct list */
290
	if (slab->available == cache->objects) {
292
	if (slab->available == cache->objects) {
291
		/* Free associated memory */
293
		/* Free associated memory */
292
		list_remove(&slab->link);
294
		list_remove(&slab->link);
293
//		spinlock_unlock(&cache->slablock);
295
//		spinlock_unlock(&cache->slablock);
294
 
296
 
295
		return freed + slab_space_free(cache, slab);
297
		return freed + slab_space_free(cache, slab);
296
 
298
 
297
	} else if (slab->available == 1) {
299
	} else if (slab->available == 1) {
298
		/* It was in full, move to partial */
300
		/* It was in full, move to partial */
299
		list_remove(&slab->link);
301
		list_remove(&slab->link);
300
		list_prepend(&slab->link, &cache->partial_slabs);
302
		list_prepend(&slab->link, &cache->partial_slabs);
301
	}
303
	}
302
//	spinlock_unlock(&cache->slablock);
304
//	spinlock_unlock(&cache->slablock);
303
	return freed;
305
	return freed;
304
}
306
}
305
 
307
 
306
 
308
 
307
 
309
 
308
/** Return object to cache, use slab if known  */
310
/** Return object to cache, use slab if known  */
309
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
311
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
310
{
312
{
311
   eflags_t efl;
313
   eflags_t efl;
312
 
314
 
313
   efl = safe_cli();
315
   efl = safe_cli();
314
 
316
 
315
//	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
317
//	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
316
//      || magazine_obj_put(cache, obj)) {
318
//      || magazine_obj_put(cache, obj)) {
317
 
319
 
318
		slab_obj_destroy(cache, obj, slab);
320
		slab_obj_destroy(cache, obj, slab);
319
 
321
 
320
//	}
322
//	}
321
  safe_sti(efl);
323
  safe_sti(efl);
322
  atomic_dec(&cache->allocated_objs);
324
  atomic_dec(&cache->allocated_objs);
323
}
325
}
324
 
326
 
325
/** Return slab object to cache */
327
/** Return slab object to cache */
326
void __fastcall slab_free(slab_cache_t *cache, void *obj)
328
void __fastcall slab_free(slab_cache_t *cache, void *obj)
327
{
329
{
328
	_slab_free(cache, obj, NULL);
330
	_slab_free(cache, obj, NULL);
329
}
331
}
330
 
332
 
331
static slab_t *slab_create()
333
static slab_t *slab_create()
332
{
334
{
333
  slab_t *slab;
335
    slab_t *slab;
334
  void *obj;
336
    void *obj;
335
  u32_t p;
337
    u32_t p;
336
 
338
 
337
    DBG("%s\n", __FUNCTION__);
339
    DBG("%s\n", __FUNCTION__);
338
 
340
 
339
//  spinlock_lock(&cache->slablock);
341
//  spinlock_lock(&cache->slablock);
340
 
342
 
341
  if (list_empty(&slab_cache->partial_slabs)) {
343
  if (list_empty(&slab_cache->partial_slabs)) {
342
//    spinlock_unlock(&cache->slablock);
344
//    spinlock_unlock(&cache->slablock);
343
//    slab = slab_create();
345
//    slab = slab_create();
344
 
346
 
345
    void *data;
347
    void *data;
346
    unsigned int i;
348
    unsigned int i;
347
 
349
 
348
    data = (void*)PA2KA(alloc_page());
350
    data = (void*)PA2KA(alloc_page());
349
    if (!data) {
351
    if (!data) {
350
      return NULL;
352
      return NULL;
351
    }
353
    }
352
 
354
 
353
    slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
355
    slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
354
 
356
 
355
    /* Fill in slab structures */
357
    /* Fill in slab structures */
356
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
358
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
357
 
359
 
358
    slab->start = data;
360
    slab->start = data;
359
    slab->available = slab_cache->objects;
361
    slab->available = slab_cache->objects;
360
    slab->nextavail = (void*)data;
362
    slab->nextavail = (void*)data;
361
    slab->cache = slab_cache;
363
    slab->cache = slab_cache;
362
 
364
 
363
    for (i = 0,p = (u32_t)slab->start;i < slab_cache->objects; i++)
365
    for (i = 0,p = (u32_t)slab->start;i < slab_cache->objects; i++)
364
    {
366
    {
365
      *(int *)p = p+slab_cache->size;
367
      *(int *)p = p+slab_cache->size;
366
      p = p+slab_cache->size;
368
      p = p+slab_cache->size;
367
    };
369
    };
368
 
370
 
369
 
371
 
370
    atomic_inc(&slab_cache->allocated_slabs);
372
    atomic_inc(&slab_cache->allocated_slabs);
371
//    spinlock_lock(&cache->slablock);
373
//    spinlock_lock(&cache->slablock);
372
  } else {
374
  } else {
373
    slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
375
    slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
374
    list_remove(&slab->link);
376
    list_remove(&slab->link);
375
  }
377
  }
376
  obj = slab->nextavail;
378
  obj = slab->nextavail;
377
  slab->nextavail = *((void**)obj);
379
  slab->nextavail = *((void**)obj);
378
  slab->available--;
380
  slab->available--;
379
 
381
 
380
  if (!slab->available)
382
  if (!slab->available)
381
    list_prepend(&slab->link, &slab_cache->full_slabs);
383
    list_prepend(&slab->link, &slab_cache->full_slabs);
382
  else
384
  else
383
    list_prepend(&slab->link, &slab_cache->partial_slabs);
385
    list_prepend(&slab->link, &slab_cache->partial_slabs);
384
 
386
 
385
//  spinlock_unlock(&cache->slablock);
387
//  spinlock_unlock(&cache->slablock);
386
 
388
 
387
  return (slab_t*)obj;
389
  return (slab_t*)obj;
388
}
390
}
389
 
391
 
390
static slab_cache_t * slab_cache_alloc()
392
static slab_cache_t * slab_cache_alloc()
391
{
393
{
392
  slab_t *slab;
394
    slab_t *slab;
393
  void *obj;
395
    void *obj;
394
  u32_t *p;
396
    u32_t *p;
395
 
397
 
396
    DBG("%s\n", __FUNCTION__);
398
    DBG("%s\n", __FUNCTION__);
397
 
399
 
398
    if (list_empty(&slab_cache_cache.partial_slabs))
400
    if (list_empty(&slab_cache_cache.partial_slabs))
399
    {
401
    {
400
//    spinlock_unlock(&cache->slablock);
402
//    spinlock_unlock(&cache->slablock);
401
//    slab = slab_create();
403
//    slab = slab_create();
402
 
404
 
403
    void *data;
405
        void *data;
404
    unsigned int i;
406
        unsigned int i;
405
 
407
 
406
        data = (void*)(PA2KA(alloc_page()));
408
        data = (void*)(PA2KA(alloc_page()));
407
    if (!data) {
409
        if (!data) {
408
      return NULL;
410
            return NULL;
409
    }
411
        }
410
 
412
 
411
    slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
413
        slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
412
 
414
 
413
    /* Fill in slab structures */
415
    /* Fill in slab structures */
414
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
416
        frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
415
 
417
 
416
    slab->start = data;
418
        slab->start = data;
417
    slab->available = slab_cache_cache.objects;
419
        slab->available = slab_cache_cache.objects;
418
    slab->nextavail = (void*)data;
420
        slab->nextavail = (void*)data;
419
    slab->cache = &slab_cache_cache;
421
        slab->cache = &slab_cache_cache;
420
 
422
 
421
    for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
423
        for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
422
    {
424
        {
423
      *p = (u32_t)p+slab_cache_cache.size;
425
            *p = (u32_t)p+slab_cache_cache.size;
424
      p = (u32_t*)((u32_t)p+slab_cache_cache.size);
426
            p = (u32_t*)((u32_t)p+slab_cache_cache.size);
425
    };
427
        };
426
 
428
 
427
 
429
 
428
    atomic_inc(&slab_cache_cache.allocated_slabs);
430
        atomic_inc(&slab_cache_cache.allocated_slabs);
429
//    spinlock_lock(&cache->slablock);
431
//    spinlock_lock(&cache->slablock);
430
    }
432
    }
431
    else {
433
    else {
432
    slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
434
        slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
433
    list_remove(&slab->link);
435
        list_remove(&slab->link);
434
  }
436
    }
435
  obj = slab->nextavail;
437
    obj = slab->nextavail;
436
  slab->nextavail = *((void**)obj);
438
    slab->nextavail = *((void**)obj);
437
  slab->available--;
439
    slab->available--;
438
 
440
 
439
  if (!slab->available)
441
    if (!slab->available)
440
    list_prepend(&slab->link, &slab_cache_cache.full_slabs);
442
        list_prepend(&slab->link, &slab_cache_cache.full_slabs);
441
  else
443
    else
442
    list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
444
        list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
443
 
445
 
444
//  spinlock_unlock(&cache->slablock);
446
//  spinlock_unlock(&cache->slablock);
445
 
447
 
446
  return (slab_cache_t*)obj;
448
    return (slab_cache_t*)obj;
447
}
449
}
448
 
450
 
449
void slab_cache_init(void)
451
void slab_cache_init(void)
450
{
452
{
451
    DBG("%s\n", __FUNCTION__);
453
    DBG("%s\n", __FUNCTION__);
452
 
454
 
453
  _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
455
    _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
454
                     sizeof(void *), NULL, NULL,
456
                     sizeof(void *), NULL, NULL,
455
                     SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
457
                     SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
456
 
458
 
457
	/* Initialize external slab cache */
459
	/* Initialize external slab cache */
458
  slab_cache = slab_cache_create(sizeof(slab_t),
460
    slab_cache = slab_cache_create(sizeof(slab_t),
459
					      0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
461
					      0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
460
};
462
};