Subversion Repositories Kolibri OS

Rev

Rev 859 | Rev 1066 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
859 serge 1
 
2
#include 
3
#include 
4
#include 
5
#include 
6
#include 
7
8
 
9
 
10
11
 
12
 
13
14
 
15
16
 
17
18
 
19
20
 
21
22
 
23
 
24
 * Allocate frames for slab space and initialize
25
 *
26
 */
27
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
28
{
29
  void *data;
30
  slab_t *slab;
31
  size_t fsize;
32
  unsigned int i;
33
  u32_t p;
34
35
 
36
  if (!data) {
37
    return NULL;
38
  }
39
  slab = (slab_t*)slab_create();
40
  if (!slab) {
41
    core_free(KA2PA(data));
42
      return NULL;
43
  }
44
45
 
46
  for (i = 0; i < ((u32_t) 1 << cache->order); i++)
47
    frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
48
49
 
50
  slab->available = cache->objects;
51
  slab->nextavail = (void*)data;
52
  slab->cache = cache;
53
54
 
55
  {
56
    *(addr_t *)p = p+cache->size;
57
    p = p+cache->size;
58
  };
59
  atomic_inc(&cache->allocated_slabs);
60
  return slab;
61
}
62
63
 
64
 * Take new object from slab or create new if needed
65
 *
66
 * @return Object address or null
67
 */
68
static void * slab_obj_create(slab_cache_t *cache, int flags)
69
{
70
  slab_t *slab;
71
  void *obj;
72
73
 
74
75
 
76
    /* Allow recursion and reclaiming
77
     * - this should work, as the slab control structures
78
     *   are small and do not need to allocate with anything
79
     *   other than frame_alloc when they are allocating,
80
     *   that's why we should get recursion at most 1-level deep
81
     */
82
    slab = slab_space_alloc(cache, flags);
83
    if (!slab)
84
    {
85
      spinlock_unlock(&cache->slablock);
86
      return NULL;
87
    }
88
  } else {
89
    slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
90
    list_remove(&slab->link);
91
  }
92
93
 
94
  slab->nextavail = *(void**)obj;
95
  slab->available--;
96
97
 
98
    list_prepend(&slab->link, &cache->full_slabs);
99
  else
100
    list_prepend(&slab->link, &cache->partial_slabs);
101
102
 
103
104
 
105
    /* Bad, bad, construction failed */
106
//    slab_obj_destroy(cache, obj, slab);
107
//    return NULL;
108
//  }
109
  return obj;
110
}
111
112
 
113
 
114
static slab_t * obj2slab(void *obj)
115
{
116
  return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
117
}
118
119
 
120
 
121
    memory */
122
void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
123
{
124
   eflags_t efl;
125
   void *result = NULL;
126
127
 
128
   efl = safe_cli();
129
130
 
131
 //   result = magazine_obj_get(cache);
132
 // }
133
//  if (!result)
134
    result = slab_obj_create(cache, flags);
135
136
 
137
138
 
139
//    atomic_inc(&cache->allocated_objs);
140
141
 
142
}
143
144
 
145
 
146
 
147
/* Slab cache functions */
148
149
 
150
static unsigned int comp_objects(slab_cache_t *cache)
151
{
152
  if (cache->flags & SLAB_CACHE_SLINSIDE)
153
    return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
154
  else
155
    return (PAGE_SIZE << cache->order) / cache->size;
156
}
157
158
 
159
static unsigned int badness(slab_cache_t *cache)
160
{
161
  unsigned int objects;
162
  unsigned int ssize;
163
  size_t val;
164
165
 
166
  ssize = PAGE_SIZE << cache->order;
167
  if (cache->flags & SLAB_CACHE_SLINSIDE)
168
    ssize -= sizeof(slab_t);
169
  val = ssize - objects * cache->size;
170
  return val;
171
172
 
173
174
 
175
 
176
static void
177
_slab_cache_create(slab_cache_t *cache,
178
       size_t size,
179
       size_t align,
180
       int (*constructor)(void *obj, int kmflag),
181
       int (*destructor)(void *obj),
182
       int flags)
183
{
184
  int pages;
185
 // ipl_t ipl;
186
187
 
188
//  cache->name = name;
189
190
 
191
//    align = sizeof(unative_t);
192
//  size = ALIGN_UP(size, align);
193
194
 
195
196
 
197
//  cache->destructor = destructor;
198
  cache->flags = flags;
199
200
 
201
  list_initialize(&cache->partial_slabs);
202
  list_initialize(&cache->magazines);
203
//  spinlock_initialize(&cache->slablock, "slab_lock");
204
//  spinlock_initialize(&cache->maglock, "slab_maglock");
205
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
206
//    make_magcache(cache);
207
208
 
209
210
 
211
  pages = SIZE2FRAMES(cache->size);
212
  /* We need the 2^order >= pages */
213
  if (pages == 1)
214
    cache->order = 0;
215
  else
216
    cache->order = fnzb(pages-1)+1;
217
218
 
219
    cache->order += 1;
220
  }
221
  cache->objects = comp_objects(cache);
222
223
 
224
//  ipl = interrupts_disable();
225
//  spinlock_lock(&slab_cache_lock);
226
227
 
228
229
 
230
//  interrupts_restore(ipl);
231
}
232
233
 
234
slab_cache_t * slab_cache_create(
235
				 size_t size,
236
				 size_t align,
237
				 int (*constructor)(void *obj, int kmflag),
238
				 int (*destructor)(void *obj),
239
				 int flags)
240
{
241
	slab_cache_t *cache;
242
243
 
244
  _slab_cache_create(cache, size, align, constructor, destructor, flags);
245
	return cache;
246
}
247
248
 
249
 * Deallocate space associated with slab
250
 *
251
 * @return number of freed frames
252
 */
253
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
254
{
255
	frame_free(KA2PA(slab->start));
256
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
257
    slab_free(slab_cache, slab);
258
259
 
260
261
 
262
}
263
264
 
265
 * Return object to slab and call a destructor
266
 *
267
 * @param slab If the caller knows directly slab of the object, otherwise NULL
268
 *
269
 * @return Number of freed pages
270
 */
271
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
272
				slab_t *slab)
273
{
274
	int freed = 0;
275
276
 
277
		slab = obj2slab(obj);
278
279
 
280
281
 
282
//    freed = cache->destructor(obj);
283
284
 
285
//	ASSERT(slab->available < cache->objects);
286
287
 
288
	slab->nextavail = obj;
289
	slab->available++;
290
291
 
292
	if (slab->available == cache->objects) {
293
		/* Free associated memory */
294
		list_remove(&slab->link);
295
//		spinlock_unlock(&cache->slablock);
296
297
 
298
299
 
300
		/* It was in full, move to partial */
301
		list_remove(&slab->link);
302
		list_prepend(&slab->link, &cache->partial_slabs);
303
	}
304
//	spinlock_unlock(&cache->slablock);
305
	return freed;
306
}
307
308
 
309
 
310
 
311
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
312
{
313
   eflags_t efl;
886 serge 314
859 serge 315
 
886 serge 316
859 serge 317
 
318
//      || magazine_obj_put(cache, obj)) {
886 serge 319
859 serge 320
 
321
322
 
323
  safe_sti(efl);
886 serge 324
  atomic_dec(&cache->allocated_objs);
325
}
859 serge 326
327
 
328
void __fastcall slab_free(slab_cache_t *cache, void *obj)
886 serge 329
{
859 serge 330
	_slab_free(cache, obj, NULL);
331
}
332
333
 
334
{
335
  slab_t *slab;
336
  void *obj;
337
  u32_t p;
338
339
 
340
341
 
342
    /* Allow recursion and reclaiming
343
     * - this should work, as the slab control structures
344
     *   are small and do not need to allocate with anything
345
     *   other than frame_alloc when they are allocating,
346
     *   that's why we should get recursion at most 1-level deep
347
     */
348
//    spinlock_unlock(&cache->slablock);
349
//    slab = slab_create();
350
351
 
352
    unsigned int i;
353
354
 
355
    if (!data) {
356
      return NULL;
357
    }
358
359
 
360
361
 
362
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
363
364
 
365
    slab->available = slab_cache->objects;
366
    slab->nextavail = (void*)data;
367
    slab->cache = slab_cache;
368
369
 
370
    {
371
      *(int *)p = p+slab_cache->size;
372
      p = p+slab_cache->size;
373
    };
374
375
 
376
 
377
//    spinlock_lock(&cache->slablock);
378
  } else {
379
    slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
380
    list_remove(&slab->link);
381
  }
382
  obj = slab->nextavail;
383
  slab->nextavail = *((void**)obj);
384
  slab->available--;
385
386
 
387
    list_prepend(&slab->link, &slab_cache->full_slabs);
388
  else
389
    list_prepend(&slab->link, &slab_cache->partial_slabs);
390
391
 
392
393
 
394
}
395
396
 
397
{
398
  slab_t *slab;
399
  void *obj;
400
  u32_t *p;
401
402
 
403
    /* Allow recursion and reclaiming
404
     * - this should work, as the slab control structures
405
     *   are small and do not need to allocate with anything
406
     *   other than frame_alloc when they are allocating,
407
     *   that's why we should get recursion at most 1-level deep
408
     */
409
//    spinlock_unlock(&cache->slablock);
410
//    slab = slab_create();
411
412
 
413
    unsigned int i;
414
415
 
416
    if (!data) {
417
      return NULL;
418
    }
419
420
 
421
422
 
423
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
424
425
 
426
    slab->available = slab_cache_cache.objects;
427
    slab->nextavail = (void*)data;
428
    slab->cache = &slab_cache_cache;
429
430
 
431
    {
432
      *p = (u32_t)p+slab_cache_cache.size;
433
      p = (u32_t*)((u32_t)p+slab_cache_cache.size);
434
    };
435
436
 
437
 
438
//    spinlock_lock(&cache->slablock);
439
  } else {
440
    slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
441
    list_remove(&slab->link);
442
  }
443
  obj = slab->nextavail;
444
  slab->nextavail = *((void**)obj);
445
  slab->available--;
446
447
 
448
    list_prepend(&slab->link, &slab_cache_cache.full_slabs);
449
  else
450
    list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
451
452
 
453
454
 
455
}
456
457
 
458
{
459
460
 
461
                     sizeof(void *), NULL, NULL,
462
                     SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
463
464
 
465
  slab_cache = slab_cache_create(sizeof(slab_t),
466
					      0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
467
};
468