Subversion Repositories Kolibri OS

Rev

Rev 886 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
859 serge 1
 
2
#include 
3
#include 
4
#include 
5
#include 
6
#include 
7
8
 
9
 
10
11
 
12
 
13
14
 
15
16
 
17
18
 
19
20
 
21
22
 
23
 
24
 * Allocate frames for slab space and initialize
25
 *
26
 */
27
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
28
{
29
  void *data;
30
  slab_t *slab;
31
  size_t fsize;
32
  unsigned int i;
33
  u32_t p;
34
35
 
1066 serge 36
37
 
38
  if (!data) {
859 serge 39
    return NULL;
40
  }
41
  slab = (slab_t*)slab_create();
42
  if (!slab) {
43
        frame_free(KA2PA(data));
1066 serge 44
      return NULL;
859 serge 45
  }
46
47
 
48
  for (i = 0; i < ((u32_t) 1 << cache->order); i++)
49
    frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab);
50
51
 
52
  slab->available = cache->objects;
53
  slab->nextavail = (void*)data;
54
  slab->cache = cache;
55
56
 
57
  {
58
    *(addr_t *)p = p+cache->size;
59
    p = p+cache->size;
60
  };
61
  atomic_inc(&cache->allocated_slabs);
62
  return slab;
63
}
64
65
 
66
 * Take new object from slab or create new if needed
67
 *
68
 * @return Object address or null
69
 */
70
static void * slab_obj_create(slab_cache_t *cache, int flags)
71
{
72
  slab_t *slab;
73
  void *obj;
74
75
 
76
77
 
78
    slab = slab_space_alloc(cache, flags);
79
    if (!slab)
80
    {
81
      spinlock_unlock(&cache->slablock);
82
      return NULL;
83
    }
84
  } else {
85
    slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
86
    list_remove(&slab->link);
87
  }
88
89
 
90
  slab->nextavail = *(void**)obj;
91
  slab->available--;
92
93
 
94
    list_prepend(&slab->link, &cache->full_slabs);
95
  else
96
    list_prepend(&slab->link, &cache->partial_slabs);
97
98
 
99
100
 
101
    /* Bad, bad, construction failed */
102
//    slab_obj_destroy(cache, obj, slab);
103
//    return NULL;
104
//  }
105
  return obj;
106
}
107
108
 
109
 
110
static slab_t * obj2slab(void *obj)
111
{
112
  return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)));
113
}
114
115
 
116
 
117
    memory */
118
void* __fastcall slab_alloc(slab_cache_t *cache, int flags)
119
{
120
   eflags_t efl;
121
   void *result = NULL;
122
123
 
124
   efl = safe_cli();
125
126
 
127
 //   result = magazine_obj_get(cache);
128
 // }
129
//  if (!result)
130
    result = slab_obj_create(cache, flags);
131
132
 
133
134
 
135
//    atomic_inc(&cache->allocated_objs);
136
137
 
138
}
139
140
 
141
 
142
 
143
/* Slab cache functions */
144
145
 
146
static unsigned int comp_objects(slab_cache_t *cache)
147
{
148
  if (cache->flags & SLAB_CACHE_SLINSIDE)
149
    return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
150
  else
151
    return (PAGE_SIZE << cache->order) / cache->size;
152
}
153
154
 
155
static unsigned int badness(slab_cache_t *cache)
156
{
157
  unsigned int objects;
158
  unsigned int ssize;
159
  size_t val;
160
161
 
162
  ssize = PAGE_SIZE << cache->order;
163
  if (cache->flags & SLAB_CACHE_SLINSIDE)
164
    ssize -= sizeof(slab_t);
165
  val = ssize - objects * cache->size;
166
  return val;
167
168
 
169
170
 
171
 
172
static void
173
_slab_cache_create(slab_cache_t *cache,
174
       size_t size,
175
       size_t align,
176
       int (*constructor)(void *obj, int kmflag),
177
       int (*destructor)(void *obj),
178
       int flags)
179
{
180
  int pages;
181
 // ipl_t ipl;
182
183
 
184
//  cache->name = name;
185
186
 
187
//    align = sizeof(unative_t);
188
//  size = ALIGN_UP(size, align);
189
190
 
191
192
 
193
//  cache->destructor = destructor;
194
  cache->flags = flags;
195
196
 
197
  list_initialize(&cache->partial_slabs);
198
  list_initialize(&cache->magazines);
199
//  spinlock_initialize(&cache->slablock, "slab_lock");
200
//  spinlock_initialize(&cache->maglock, "slab_maglock");
201
//  if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
202
//    make_magcache(cache);
203
204
 
205
206
 
207
  pages = SIZE2FRAMES(cache->size);
208
  /* We need the 2^order >= pages */
209
  if (pages <= 1)
1066 serge 210
    cache->order = 0;
859 serge 211
  else
212
    cache->order = fnzb(pages-1)+1;
213
214
 
215
    cache->order += 1;
216
  }
217
  cache->objects = comp_objects(cache);
218
219
 
220
//  ipl = interrupts_disable();
221
//  spinlock_lock(&slab_cache_lock);
222
223
 
224
225
 
226
//  interrupts_restore(ipl);
227
}
228
229
 
230
slab_cache_t * slab_cache_create(
231
				 size_t size,
232
				 size_t align,
233
				 int (*constructor)(void *obj, int kmflag),
234
				 int (*destructor)(void *obj),
235
				 int flags)
236
{
237
	slab_cache_t *cache;
238
239
 
1066 serge 240
241
 
859 serge 242
  _slab_cache_create(cache, size, align, constructor, destructor, flags);
243
	return cache;
244
}
245
246
 
247
 * Deallocate space associated with slab
248
 *
249
 * @return number of freed frames
250
 */
251
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
252
{
253
	frame_free(KA2PA(slab->start));
254
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
255
    slab_free(slab_cache, slab);
256
257
 
258
259
 
260
}
261
262
 
263
 * Return object to slab and call a destructor
264
 *
265
 * @param slab If the caller knows directly slab of the object, otherwise NULL
266
 *
267
 * @return Number of freed pages
268
 */
269
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
270
				slab_t *slab)
271
{
272
	int freed = 0;
273
274
 
275
		slab = obj2slab(obj);
276
277
 
278
279
 
280
//    freed = cache->destructor(obj);
281
282
 
283
//	ASSERT(slab->available < cache->objects);
284
285
 
286
	slab->nextavail = obj;
287
	slab->available++;
288
289
 
290
	if (slab->available == cache->objects) {
291
		/* Free associated memory */
292
		list_remove(&slab->link);
293
//		spinlock_unlock(&cache->slablock);
294
295
 
296
297
 
298
		/* It was in full, move to partial */
299
		list_remove(&slab->link);
300
		list_prepend(&slab->link, &cache->partial_slabs);
301
	}
302
//	spinlock_unlock(&cache->slablock);
303
	return freed;
304
}
305
306
 
307
 
308
 
309
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
310
{
311
   eflags_t efl;
886 serge 312
859 serge 313
 
886 serge 314
859 serge 315
 
316
//      || magazine_obj_put(cache, obj)) {
886 serge 317
859 serge 318
 
319
320
 
321
  safe_sti(efl);
886 serge 322
  atomic_dec(&cache->allocated_objs);
323
}
859 serge 324
325
 
326
void __fastcall slab_free(slab_cache_t *cache, void *obj)
886 serge 327
{
859 serge 328
	_slab_free(cache, obj, NULL);
329
}
330
331
 
332
{
333
  slab_t *slab;
334
  void *obj;
335
  u32_t p;
336
337
 
1066 serge 338
339
 
859 serge 340
341
 
342
//    spinlock_unlock(&cache->slablock);
343
//    slab = slab_create();
344
345
 
346
    unsigned int i;
347
348
 
1066 serge 349
    if (!data) {
859 serge 350
      return NULL;
351
    }
352
353
 
354
355
 
356
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
357
358
 
359
    slab->available = slab_cache->objects;
360
    slab->nextavail = (void*)data;
361
    slab->cache = slab_cache;
362
363
 
364
    {
365
      *(int *)p = p+slab_cache->size;
366
      p = p+slab_cache->size;
367
    };
368
369
 
370
 
371
//    spinlock_lock(&cache->slablock);
372
  } else {
373
    slab = list_get_instance(slab_cache->partial_slabs.next, slab_t, link);
374
    list_remove(&slab->link);
375
  }
376
  obj = slab->nextavail;
377
  slab->nextavail = *((void**)obj);
378
  slab->available--;
379
380
 
381
    list_prepend(&slab->link, &slab_cache->full_slabs);
382
  else
383
    list_prepend(&slab->link, &slab_cache->partial_slabs);
384
385
 
386
387
 
388
}
389
390
 
391
{
392
  slab_t *slab;
393
  void *obj;
394
  u32_t *p;
395
396
 
1066 serge 397
398
 
399
    {
400
//    spinlock_unlock(&cache->slablock);
859 serge 401
//    slab = slab_create();
402
403
 
404
    unsigned int i;
405
406
 
1066 serge 407
    if (!data) {
859 serge 408
      return NULL;
409
    }
410
411
 
412
413
 
414
    frame_set_parent(ADDR2PFN(KA2PA(data)), slab);
415
416
 
417
    slab->available = slab_cache_cache.objects;
418
    slab->nextavail = (void*)data;
419
    slab->cache = &slab_cache_cache;
420
421
 
422
    {
423
      *p = (u32_t)p+slab_cache_cache.size;
424
      p = (u32_t*)((u32_t)p+slab_cache_cache.size);
425
    };
426
427
 
428
 
429
//    spinlock_lock(&cache->slablock);
430
    }
1066 serge 431
    else {
432
    slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link);
859 serge 433
    list_remove(&slab->link);
434
  }
435
  obj = slab->nextavail;
436
  slab->nextavail = *((void**)obj);
437
  slab->available--;
438
439
 
440
    list_prepend(&slab->link, &slab_cache_cache.full_slabs);
441
  else
442
    list_prepend(&slab->link, &slab_cache_cache.partial_slabs);
443
444
 
445
446
 
447
}
448
449
 
450
{
451
    DBG("%s\n", __FUNCTION__);
1066 serge 452
859 serge 453
 
454
                     sizeof(void *), NULL, NULL,
455
                     SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
456
457
 
458
  slab_cache = slab_cache_create(sizeof(slab_t),
459
					      0, NULL, NULL,SLAB_CACHE_MAGDEFERRED);
460
};
461