Subversion Repositories Kolibri OS

Rev

Rev 859 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 859 Rev 886
1
#include 
1
#include 
2
#include 
2
#include 
3
#include 
3
#include 
4
#include 
4
#include 
5
#include 
5
#include 
6
#include 
6
#include 
7
 
7
 
8
 
8
 
9
extern zone_t z_core;
9
extern zone_t z_core;
10
 
10
 
11
 
11
 
12
static LIST_INITIALIZE(slab_cache_list);
12
static LIST_INITIALIZE(slab_cache_list);
13
 
13
 
14
static slab_cache_t *slab_cache;
14
static slab_cache_t *slab_cache;
15
 
15
 
16
static slab_cache_t slab_cache_cache;
16
static slab_cache_t slab_cache_cache;
17
 
17
 
18
static slab_t *slab_create();
18
static slab_t *slab_create();
19
 
19
 
20
static slab_cache_t * slab_cache_alloc();
20
static slab_cache_t * slab_cache_alloc();
21
 
21
 
22
void slab_free(slab_cache_t *cache, void *obj);
-
 
23
 
-
 
24
 
22
 
25
/**
23
/**
26
 * Allocate frames for slab space and initialize
24
 * Allocate frames for slab space and initialize
27
 *
25
 *
28
 */
26
 */
29
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
27
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
30
{
28
{
31
  void *data;
29
  void *data;
32
  slab_t *slab;
30
  slab_t *slab;
33
  size_t fsize;
31
  size_t fsize;
34
  unsigned int i;
32
  unsigned int i;
35
  u32_t p;
33
  u32_t p;
36
 
34
 
37
  data = (void*)PA2KA(core_alloc(cache->order));
35
  data = (void*)PA2KA(core_alloc(cache->order));
38
  if (!data) {
36
  if (!data) {
39
    return NULL;
37
    return NULL;
40
  }
38
  }
41
  slab = (slab_t*)slab_create();
39
  slab = (slab_t*)slab_create();
42
  if (!slab) {
40
  if (!slab) {
43
    core_free(KA2PA(data));
41
    core_free(KA2PA(data));
44
      return NULL;
42
      return NULL;
45
  }
43
  }
46
 
44
 
47
  /* Fill in slab structures */
45
  /* Fill in slab structures */
48
  for (i = 0; i < ((u32_t) 1 << cache->order); i++)
46
  for (i = 0; i < ((u32_t) 1 << cache->order); i++)
49
    frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
47
    frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
50
 
48
 
51
  slab->start = data;
49
  slab->start = data;
52
  slab->available = cache->objects;
50
  slab->available = cache->objects;
53
  slab->nextavail = (void*)data;
51
  slab->nextavail = (void*)data;
54
  slab->cache = cache;
52
  slab->cache = cache;
55
 
53
 
56
  for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
54
  for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++)
57
  {
55
  {
58
    *(addr_t *)p = p+cache->size;
56
    *(addr_t *)p = p+cache->size;
59
    p = p+cache->size;
57
    p = p+cache->size;
60
  };
58
  };
61
  atomic_inc(&cache->allocated_slabs);
59
  atomic_inc(&cache->allocated_slabs);
62
  return slab;
60
  return slab;
63
}
61
}
64
 
62
 
65
/**
63
/**
66
 * Take new object from slab or create new if needed
64
 * Take new object from slab or create new if needed
67
 *
65
 *
68
 * @return Object address or null
66
 * @return Object address or null
69
 */
67
 */
70
static void * slab_obj_create(slab_cache_t *cache, int flags)
68
static void * slab_obj_create(slab_cache_t *cache, int flags)
71
{
69
{
72
  slab_t *slab;
70
  slab_t *slab;
73
  void *obj;
71
  void *obj;
74
 
72
 
75
  spinlock_lock(&cache->slablock);
73
  spinlock_lock(&cache->slablock);
76
 
74
 
77
  if (list_empty(&cache->partial_slabs)) {
75
  if (list_empty(&cache->partial_slabs)) {
78
    /* Allow recursion and reclaiming
76
    /* Allow recursion and reclaiming
79
     * - this should work, as the slab control structures
77
     * - this should work, as the slab control structures
80
     *   are small and do not need to allocate with anything
78
     *   are small and do not need to allocate with anything
81
     *   other than frame_alloc when they are allocating,
79
     *   other than frame_alloc when they are allocating,
82
     *   that's why we should get recursion at most 1-level deep
80
     *   that's why we should get recursion at most 1-level deep
83
     */
81
     */
84
    slab = slab_space_alloc(cache, flags);
82
    slab = slab_space_alloc(cache, flags);
85
    if (!slab)
83
    if (!slab)
86
    {
84
    {
87
      spinlock_unlock(&cache->slablock);
85
      spinlock_unlock(&cache->slablock);
88
      return NULL;
86
      return NULL;
89
    }
87
    }
90
  } else {
88
  } else {
91
    slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
89
    slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
92
    list_remove(&slab->link);
90
    list_remove(&slab->link);
93
  }
91
  }
94
 
92
 
95
  obj = slab->nextavail;
93
  obj = slab->nextavail;
96
  slab->nextavail = *(void**)obj;
94
  slab->nextavail = *(void**)obj;
97
  slab->available--;
95
  slab->available--;
98
 
96
 
99
  if (!slab->available)
97
  if (!slab->available)
100
    list_prepend(&slab->link, &cache->full_slabs);
98
    list_prepend(&slab->link, &cache->full_slabs);
101
  else
99
  else
102
    list_prepend(&slab->link, &cache->partial_slabs);
100
    list_prepend(&slab->link, &cache->partial_slabs);
103
 
101
 
104
  spinlock_unlock(&cache->slablock);
102
  spinlock_unlock(&cache->slablock);
105
 
103
 
106
//  if (cache->constructor && cache->constructor(obj, flags)) {
104
//  if (cache->constructor && cache->constructor(obj, flags)) {
107
    /* Bad, bad, construction failed */
105
    /* Bad, bad, construction failed */
108
//    slab_obj_destroy(cache, obj, slab);
106
//    slab_obj_destroy(cache, obj, slab);
109
//    return NULL;
107
//    return NULL;
110
//  }
108
//  }
111
  return obj;
109
  return obj;
112
}
110
}
113
 
111
 
114
 
112
 
115
/** Map object to slab structure */
113
/** Map object to slab structure */
116
static slab_t * obj2slab(void *obj)
114
static slab_t * obj2slab(void *obj)
117
{
115
{
118
  return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
116
  return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
119
}
117
}
120
 
118
 
121
 
119
 
122
/** Allocate new object from cache - if no flags given, always returns
120
/** Allocate new object from cache - if no flags given, always returns
123
    memory */
121
    memory */
124
void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
122
void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
125
{
123
{
126
   eflags_t efl;
124
   eflags_t efl;
127
   void *result = NULL;
125
   void *result = NULL;
128
 
126
 
129
  /* Disable interrupts to avoid deadlocks with interrupt handlers */
127
  /* Disable interrupts to avoid deadlocks with interrupt handlers */
130
   efl = safe_cli();
128
   efl = safe_cli();
131
 
129
 
132
 // if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
130
 // if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
133
 //   result = magazine_obj_get(cache);
131
 //   result = magazine_obj_get(cache);
134
 // }
132
 // }
135
//  if (!result)
133
//  if (!result)
136
    result = slab_obj_create(cache, flags);
134
    result = slab_obj_create(cache, flags);
137
 
135
 
138
   safe_sti(efl);
136
   safe_sti(efl);
139
 
137
 
140
//  if (result)
138
//  if (result)
141
//    atomic_inc(&cache->allocated_objs);
139
//    atomic_inc(&cache->allocated_objs);
142
 
140
 
143
  return result;
141
  return result;
144
}
142
}
145
 
143
 
146
 
144
 
147
 
145
 
148
/**************************************/
146
/**************************************/
149
/* Slab cache functions */
147
/* Slab cache functions */
150
 
148
 
151
/** Return number of objects that fit in certain cache size */
149
/** Return number of objects that fit in certain cache size */
152
static unsigned int comp_objects(slab_cache_t *cache)
150
static unsigned int comp_objects(slab_cache_t *cache)
153
{
151
{
154
  if (cache->flags & SLAB_CACHE_SLINSIDE)
152
  if (cache->flags & SLAB_CACHE_SLINSIDE)
155
    return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
153
    return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
156
  else
154
  else
157
    return (PAGE_SIZE << cache->order) / cache->size;
155
    return (PAGE_SIZE << cache->order) / cache->size;
158
}
156
}
159
 
157
 
160
/** Return wasted space in slab */
158
/** Return wasted space in slab */
161
static unsigned int badness(slab_cache_t *cache)
159
static unsigned int badness(slab_cache_t *cache)
162
{
160
{
163
  unsigned int objects;
161
  unsigned int objects;
164
  unsigned int ssize;
162
  unsigned int ssize;
165
  size_t val;
163
  size_t val;
166
 
164
 
167
  objects = comp_objects(cache);
165
  objects = comp_objects(cache);
168
  ssize = PAGE_SIZE << cache->order;
166
  ssize = PAGE_SIZE << cache->order;
169
  if (cache->flags & SLAB_CACHE_SLINSIDE)
167
  if (cache->flags & SLAB_CACHE_SLINSIDE)
170
    ssize -= sizeof(slab_t);
168
    ssize -= sizeof(slab_t);
171
  val = ssize - objects * cache->size;
169
  val = ssize - objects * cache->size;
172
  return val;
170
  return val;
173
 
171
 
174
}
172
}
175
 
173
 
176
 
174
 
177
/** Initialize allocated memory as a slab cache */
175
/** Initialize allocated memory as a slab cache */
178
static void
176
static void
179
_slab_cache_create(slab_cache_t *cache,
177
_slab_cache_create(slab_cache_t *cache,
180
       size_t size,
178
       size_t size,
181
       size_t align,
179
       size_t align,
182
       int (*constructor)(void *obj, int kmflag),
180
       int (*constructor)(void *obj, int kmflag),
183
       int (*destructor)(void *obj),
181
       int (*destructor)(void *obj),
184
       int flags)
182
       int flags)
185
{
183
{
186
  int pages;
184
  int pages;
187
 // ipl_t ipl;
185
 // ipl_t ipl;
188
 
186
 
189
//  memsetb((uintptr_t)cache, sizeof(*cache), 0);
187
//  memsetb((uintptr_t)cache, sizeof(*cache), 0);
190
//  cache->name = name;
188
//  cache->name = name;
191
 
189
 
192
//if (align < sizeof(unative_t))
190
//if (align < sizeof(unative_t))
193
//    align = sizeof(unative_t);
191
//    align = sizeof(unative_t);
194
//  size = ALIGN_UP(size, align);
192
//  size = ALIGN_UP(size, align);
195
 
193
 
196
  cache->size = size;
194
  cache->size = size;
197
 
195
 
198
//  cache->constructor = constructor;
196
//  cache->constructor = constructor;
199
//  cache->destructor = destructor;
197
//  cache->destructor = destructor;
200
  cache->flags = flags;
198
  cache->flags = flags;
201
 
199
 
202
  list_initialize(&cache->full_slabs);
200
  list_initialize(&cache->full_slabs);
203
  list_initialize(&cache->partial_slabs);
201
  list_initialize(&cache->partial_slabs);
204
  list_initialize(&cache->magazines);
202
  list_initialize(&cache->magazines);
205
//  spinlock_initialize(&cache->slablock, "slab_lock");
203
//  spinlock_initialize(&cache->slablock, "slab_lock");
206
//  spinlock_initialize(&cache->maglock, "slab_maglock");
204
//  spinlock_initialize(&cache->maglock, "slab_maglock");
207
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
205
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
208
//    make_magcache(cache);
206
//    make_magcache(cache);
209
 
207
 
210
  /* Compute slab sizes, object counts in slabs etc. */
208
  /* Compute slab sizes, object counts in slabs etc. */
211
 
209
 
212
  /* Minimum slab order */
210
  /* Minimum slab order */
213
  pages = SIZE2FRAMES(cache->size);
211
  pages = SIZE2FRAMES(cache->size);
214
  /* We need the 2^order >= pages */
212
  /* We need the 2^order >= pages */
215
  if (pages == 1)
213
  if (pages == 1)
216
    cache->order = 0;
214
    cache->order = 0;
217
  else
215
  else
218
    cache->order = fnzb(pages-1)+1;
216
    cache->order = fnzb(pages-1)+1;
219
 
217
 
220
  while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
218
  while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
221
    cache->order += 1;
219
    cache->order += 1;
222
  }
220
  }
223
  cache->objects = comp_objects(cache);
221
  cache->objects = comp_objects(cache);
224
 
222
 
225
  /* Add cache to cache list */
223
  /* Add cache to cache list */
226
//  ipl = interrupts_disable();
224
//  ipl = interrupts_disable();
227
//  spinlock_lock(&slab_cache_lock);
225
//  spinlock_lock(&slab_cache_lock);
228
 
226
 
229
  list_append(&cache->link, &slab_cache_list);
227
  list_append(&cache->link, &slab_cache_list);
230
 
228
 
231
//  spinlock_unlock(&slab_cache_lock);
229
//  spinlock_unlock(&slab_cache_lock);
232
//  interrupts_restore(ipl);
230
//  interrupts_restore(ipl);
233
}
231
}
234
 
232
 
235
/** Create slab cache  */
233
/** Create slab cache  */
236
slab_cache_t * slab_cache_create(
234
slab_cache_t * slab_cache_create(
237
				 size_t size,
235
				 size_t size,
238
				 size_t align,
236
				 size_t align,
239
				 int (*constructor)(void *obj, int kmflag),
237
				 int (*constructor)(void *obj, int kmflag),
240
				 int (*destructor)(void *obj),
238
				 int (*destructor)(void *obj),
241
				 int flags)
239
				 int flags)
242
{
240
{
243
	slab_cache_t *cache;
241
	slab_cache_t *cache;
244
 
242
 
245
	cache = (slab_cache_t*)slab_cache_alloc();
243
	cache = (slab_cache_t*)slab_cache_alloc();
246
  _slab_cache_create(cache, size, align, constructor, destructor, flags);
244
  _slab_cache_create(cache, size, align, constructor, destructor, flags);
247
	return cache;
245
	return cache;
248
}
246
}
249
 
247
 
250
/**
248
/**
251
 * Deallocate space associated with slab
249
 * Deallocate space associated with slab
252
 *
250
 *
253
 * @return number of freed frames
251
 * @return number of freed frames
254
 */
252
 */
255
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
253
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
256
{
254
{
257
	frame_free(KA2PA(slab->start));
255
	frame_free(KA2PA(slab->start));
258
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
256
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
259
    slab_free(slab_cache, slab);
257
    slab_free(slab_cache, slab);
260
 
258
 
261
//	atomic_dec(&cache->allocated_slabs);
259
//	atomic_dec(&cache->allocated_slabs);
262
 
260
 
263
	return 1 << cache->order;
261
	return 1 << cache->order;
264
}
262
}
265
 
263
 
266
/**
264
/**
267
 * Return object to slab and call a destructor
265
 * Return object to slab and call a destructor
268
 *
266
 *
269
 * @param slab If the caller knows directly slab of the object, otherwise NULL
267
 * @param slab If the caller knows directly slab of the object, otherwise NULL
270
 *
268
 *
271
 * @return Number of freed pages
269
 * @return Number of freed pages
272
 */
270
 */
273
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
271
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
274
				slab_t *slab)
272
				slab_t *slab)
275
{
273
{
276
	int freed = 0;
274
	int freed = 0;
277
 
275
 
278
	if (!slab)
276
	if (!slab)
279
		slab = obj2slab(obj);
277
		slab = obj2slab(obj);
280
 
278
 
281
//	ASSERT(slab->cache == cache);
279
//	ASSERT(slab->cache == cache);
282
 
280
 
283
//  if (cache->destructor)
281
//  if (cache->destructor)
284
//    freed = cache->destructor(obj);
282
//    freed = cache->destructor(obj);
285
 
283
 
286
//	spinlock_lock(&cache->slablock);
284
//	spinlock_lock(&cache->slablock);
287
//	ASSERT(slab->available < cache->objects);
285
//	ASSERT(slab->available < cache->objects);
288
 
286
 
289
	*(void**)obj = slab->nextavail;
287
	*(void**)obj = slab->nextavail;
290
	slab->nextavail = obj;
288
	slab->nextavail = obj;
291
	slab->available++;
289
	slab->available++;
292
 
290
 
293
	/* Move it to correct list */
291
	/* Move it to correct list */
294
	if (slab->available == cache->objects) {
292
	if (slab->available == cache->objects) {
295
		/* Free associated memory */
293
		/* Free associated memory */
296
		list_remove(&slab->link);
294
		list_remove(&slab->link);
297
//		spinlock_unlock(&cache->slablock);
295
//		spinlock_unlock(&cache->slablock);
298
 
296
 
299
		return freed + slab_space_free(cache, slab);
297
		return freed + slab_space_free(cache, slab);
300
 
298
 
301
	} else if (slab->available == 1) {
299
	} else if (slab->available == 1) {
302
		/* It was in full, move to partial */
300
		/* It was in full, move to partial */
303
		list_remove(&slab->link);
301
		list_remove(&slab->link);
304
		list_prepend(&slab->link, &cache->partial_slabs);
302
		list_prepend(&slab->link, &cache->partial_slabs);
305
	}
303
	}
306
//	spinlock_unlock(&cache->slablock);
304
//	spinlock_unlock(&cache->slablock);
307
	return freed;
305
	return freed;
308
}
306
}
309
 
307
 
310
 
308
 
311
 
309
 
312
/** Return object to cache, use slab if known  */
310
/** Return object to cache, use slab if known  */
313
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
311
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
314
{
312
{
315
//	ipl_t ipl;
313
   eflags_t efl;
316
 
314
 
317
//	ipl = interrupts_disable();
315
   efl = safe_cli();
318
 
316
 
319
//	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
317
//	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
320
	    || magazine_obj_put(cache, obj)) {
318
//      || magazine_obj_put(cache, obj)) {
321
 
319
 
322
		slab_obj_destroy(cache, obj, slab);
320
		slab_obj_destroy(cache, obj, slab);
323
 
321
 
324
//	}
322
//	}
325
//	interrupts_restore(ipl);
323
  safe_sti(efl);
326
//	atomic_dec(&cache->allocated_objs);
324
  atomic_dec(&cache->allocated_objs);
327
}
325
}
328
 
326
 
329
/** Return slab object to cache */
327
/** Return slab object to cache */
330
void slab_free(slab_cache_t *cache, void *obj)
328
void __fastcall slab_free(slab_cache_t *cache, void *obj)
331
{
329
{
332
	_slab_free(cache, obj, NULL);
330
	_slab_free(cache, obj, NULL);
333
}
331
}
334
 
332
 
335
static slab_t *slab_create()
333
static slab_t *slab_create()
336
{
334
{
337
  slab_t *slab;
335
  slab_t *slab;
338
  void *obj;
336
  void *obj;
339
  u32_t p;
337
  u32_t p;
340
 
338
 
341
//  spinlock_lock(&cache->slablock);
339
//  spinlock_lock(&cache->slablock);
342
 
340
 
343
  if (list_empty(&slab_cache->partial_slabs)) {
341
  if (list_empty(&slab_cache->partial_slabs)) {
344
    /* Allow recursion and reclaiming
342
    /* Allow recursion and reclaiming
345
     * - this should work, as the slab control structures
343
     * - this should work, as the slab control structures
346
     *   are small and do not need to allocate with anything
344
     *   are small and do not need to allocate with anything
347
     *   other than frame_alloc when they are allocating,
345
     *   other than frame_alloc when they are allocating,
348
     *   that's why we should get recursion at most 1-level deep
346
     *   that's why we should get recursion at most 1-level deep
349
     */
347
     */
350
//    spinlock_unlock(&cache->slablock);
348
//    spinlock_unlock(&cache->slablock);
351
//    slab = slab_create();
349
//    slab = slab_create();
352
 
350
 
353
    void *data;
351
    void *data;
354
    unsigned int i;
352
    unsigned int i;
355
 
353
 
356
    data = (void*)PA2KA(core_alloc(0));
354
    data = (void*)PA2KA(core_alloc(0));
357
    if (!data) {
355
    if (!data) {
358
      return NULL;
356
      return NULL;
359
    }
357
    }
360
 
358
 
361
    slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
359
    slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
362
 
360
 
363
    /* Fill in slab structures */
361
    /* Fill in slab structures */
364
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
362
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
365
 
363
 
366
    slab->start = data;
364
    slab->start = data;
367
    slab->available = slab_cache->objects;
365
    slab->available = slab_cache->objects;
368
    slab->nextavail = (void*)data;
366
    slab->nextavail = (void*)data;
369
    slab->cache = slab_cache;
367
    slab->cache = slab_cache;
370
 
368
 
371
    for (i = 0,p = (u32_t)slab->start;i < slab_cache->objects; i++)
369
    for (i = 0,p = (u32_t)slab->start;i < slab_cache->objects; i++)
372
    {
370
    {
373
      *(int *)p = p+slab_cache->size;
371
      *(int *)p = p+slab_cache->size;
374
      p = p+slab_cache->size;
372
      p = p+slab_cache->size;
375
    };
373
    };
376
 
374
 
377
 
375
 
378
    atomic_inc(&slab_cache->allocated_slabs);
376
    atomic_inc(&slab_cache->allocated_slabs);
379
//    spinlock_lock(&cache->slablock);
377
//    spinlock_lock(&cache->slablock);
380
  } else {
378
  } else {
381
    slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
379
    slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
382
    list_remove(&slab->link);
380
    list_remove(&slab->link);
383
  }
381
  }
384
  obj = slab->nextavail;
382
  obj = slab->nextavail;
385
  slab->nextavail = *((void**)obj);
383
  slab->nextavail = *((void**)obj);
386
  slab->available--;
384
  slab->available--;
387
 
385
 
388
  if (!slab->available)
386
  if (!slab->available)
389
    list_prepend(&slab->link, &slab_cache->full_slabs);
387
    list_prepend(&slab->link, &slab_cache->full_slabs);
390
  else
388
  else
391
    list_prepend(&slab->link, &slab_cache->partial_slabs);
389
    list_prepend(&slab->link, &slab_cache->partial_slabs);
392
 
390
 
393
//  spinlock_unlock(&cache->slablock);
391
//  spinlock_unlock(&cache->slablock);
394
 
392
 
395
  return (slab_t*)obj;
393
  return (slab_t*)obj;
396
}
394
}
397
 
395
 
398
static slab_cache_t * slab_cache_alloc()
396
static slab_cache_t * slab_cache_alloc()
399
{
397
{
400
  slab_t *slab;
398
  slab_t *slab;
401
  void *obj;
399
  void *obj;
402
  u32_t *p;
400
  u32_t *p;
403
 
401
 
404
  if (list_empty(&slab_cache_cache.partial_slabs)) {
402
  if (list_empty(&slab_cache_cache.partial_slabs)) {
405
    /* Allow recursion and reclaiming
403
    /* Allow recursion and reclaiming
406
     * - this should work, as the slab control structures
404
     * - this should work, as the slab control structures
407
     *   are small and do not need to allocate with anything
405
     *   are small and do not need to allocate with anything
408
     *   other than frame_alloc when they are allocating,
406
     *   other than frame_alloc when they are allocating,
409
     *   that's why we should get recursion at most 1-level deep
407
     *   that's why we should get recursion at most 1-level deep
410
     */
408
     */
411
//    spinlock_unlock(&cache->slablock);
409
//    spinlock_unlock(&cache->slablock);
412
//    slab = slab_create();
410
//    slab = slab_create();
413
 
411
 
414
    void *data;
412
    void *data;
415
    unsigned int i;
413
    unsigned int i;
416
 
414
 
417
    data = (void*)(PA2KA(core_alloc(0)));
415
    data = (void*)(PA2KA(core_alloc(0)));
418
    if (!data) {
416
    if (!data) {
419
      return NULL;
417
      return NULL;
420
    }
418
    }
421
 
419
 
422
    slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
420
    slab = (slab_t*)((u32_t)data + PAGE_SIZE - sizeof(slab_t));
423
 
421
 
424
    /* Fill in slab structures */
422
    /* Fill in slab structures */
425
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
423
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
426
 
424
 
427
    slab->start = data;
425
    slab->start = data;
428
    slab->available = slab_cache_cache.objects;
426
    slab->available = slab_cache_cache.objects;
429
    slab->nextavail = (void*)data;
427
    slab->nextavail = (void*)data;
430
    slab->cache = &slab_cache_cache;
428
    slab->cache = &slab_cache_cache;
431
 
429
 
432
    for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
430
    for (i = 0,p = (u32_t*)slab->start;i < slab_cache_cache.objects; i++)
433
    {
431
    {
434
      *p = (u32_t)p+slab_cache_cache.size;
432
      *p = (u32_t)p+slab_cache_cache.size;
435
      p = (u32_t*)((u32_t)p+slab_cache_cache.size);
433
      p = (u32_t*)((u32_t)p+slab_cache_cache.size);
436
    };
434
    };
437
 
435
 
438
 
436
 
439
    atomic_inc(&slab_cache_cache.allocated_slabs);
437
    atomic_inc(&slab_cache_cache.allocated_slabs);
440
//    spinlock_lock(&cache->slablock);
438
//    spinlock_lock(&cache->slablock);
441
  } else {
439
  } else {
442
    slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
440
    slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
443
    list_remove(&slab->link);
441
    list_remove(&slab->link);
444
  }
442
  }
445
  obj = slab->nextavail;
443
  obj = slab->nextavail;
446
  slab->nextavail = *((void**)obj);
444
  slab->nextavail = *((void**)obj);
447
  slab->available--;
445
  slab->available--;
448
 
446
 
449
  if (!slab->available)
447
  if (!slab->available)
450
    list_prepend(&slab->link, &slab_cache_cache.full_slabs);
448
    list_prepend(&slab->link, &slab_cache_cache.full_slabs);
451
  else
449
  else
452
    list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
450
    list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
453
 
451
 
454
//  spinlock_unlock(&cache->slablock);
452
//  spinlock_unlock(&cache->slablock);
455
 
453
 
456
  return (slab_cache_t*)obj;
454
  return (slab_cache_t*)obj;
457
}
455
}
458
 
456
 
459
void slab_cache_init(void)
457
void slab_cache_init(void)
460
{
458
{
461
 
459
 
462
  _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
460
  _slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t),
463
                     sizeof(void *), NULL, NULL,
461
                     sizeof(void *), NULL, NULL,
464
                     SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
462
                     SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
465
 
463
 
466
	/* Initialize external slab cache */
464
	/* Initialize external slab cache */
467
  slab_cache = slab_cache_create(sizeof(slab_t),
465
  slab_cache = slab_cache_create(sizeof(slab_t),
468
					      0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
466
					      0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
469
};
467
};