Subversion Repositories Kolibri OS

Rev

Rev 864 | Rev 888 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
859 serge 1
 
2
#include 
3
#include 
4
#include 
5
#include 
6
#include 
7
8
 
9
{
10
   link_t link;
11
   link_t adj;
12
   addr_t base;
13
   size_t size;
14
   void*  parent;
15
   u32_t  state;
886 serge 16
}md_t;
859 serge 17
18
 
886 serge 19
#define   MD_USED    2
20
21
 
859 serge 22
    SPINLOCK_DECLARE(lock);   /**< this lock protects everything below */
886 serge 23
859 serge 24
 
886 serge 25
    link_t free[32];
26
27
 
28
}heap_t;
859 serge 29
30
 
886 serge 31
 
859 serge 32
slab_cache_t *phm_slab;
33
34
 
35
 
886 serge 36
heap_t        sheap;
37
38
 
39
 
40
 
859 serge 41
{ asm volatile ("bts %0, _lheap"::"r"(idx):"cc"); }
886 serge 42
859 serge 43
 
44
{ asm volatile ("btr %0, _lheap"::"r"(idx):"cc"); }
886 serge 45
859 serge 46
 
47
{ asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); }
886 serge 48
859 serge 49
 
50
{ asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); }
886 serge 51
859 serge 52
 
53
 
54
{
55
   md_t *md;
56
   u32_t i;
57
58
 
59
   ASSERT(size != 0)
60
   ASSERT((base & 0x3FFFFF) == 0);
862 serge 61
   ASSERT((size & 0x3FFFFF) == 0);
62
859 serge 63
 
64
   {
65
     list_initialize(&lheap.free[i]);
886 serge 66
     list_initialize(&sheap.free[i]);
67
   };
859 serge 68
69
 
886 serge 70
   list_initialize(&sheap.used);
71
72
 
73
 
859 serge 74
75
 
76
77
 
78
   md->base = base;
79
   md->size = size;
80
   md->parent = NULL;
81
   md->state = MD_FREE;
886 serge 82
859 serge 83
 
886 serge 84
   lheap.availmask = 0x80000000;
859 serge 85
   sheap.availmask = 0x00000000;
86
87
 
88
89
 
90
};
91
92
 
93
{
94
   md_t *md = NULL;
95
96
 
97
   u32_t mask;
98
99
 
862 serge 100
859 serge 101
 
102
   mask = lheap.availmask & ( -1<
103
104
 
105
   {
106
     if(idx0 == 31)
862 serge 107
     {
108
        md_t *tmp = (md_t*)lheap.free[31].next;
886 serge 109
        while((link_t*)tmp != &lheap.free[31])
110
        {
862 serge 111
          if(tmp->size >= size)
112
          {
113
            DBG("remove large tmp %x\n", tmp);
114
859 serge 115
 
862 serge 116
            break;
117
          };
118
        };
119
        tmp = (md_t*)tmp->link.next;
120
     }
121
     else
122
     {
123
       idx0 = _bsf(mask);
124
859 serge 125
 
886 serge 126
862 serge 127
 
886 serge 128
     };
862 serge 129
   }
859 serge 130
   else
131
     return NULL;
132
133
 
886 serge 134
135
 
859 serge 136
   if(list_empty(&lheap.free[idx0]))
886 serge 137
     _reset_lmask(idx0);
859 serge 138
139
 
140
   {
141
     count_t idx1;
142
     md_t *new_md = (md_t*)slab_alloc(md_slab,0);         /* FIXME check */
886 serge 143
859 serge 144
 
145
     list_insert(&new_md->adj, &md->adj);
861 serge 146
859 serge 147
 
148
     new_md->size = size;
149
     new_md->state = MD_USED;
886 serge 150
859 serge 151
 
152
     md->size-= size;
153
154
 
155
156
 
886 serge 157
     _set_lmask(idx1);
859 serge 158
159
 
160
   };
886 serge 161
   md->state = MD_USED;
162
163
 
859 serge 164
}
165
166
 
167
{
168
    eflags_t efl;
886 serge 169
859 serge 170
 
886 serge 171
859 serge 172
 
886 serge 173
    u32_t mask;
174
859 serge 175
 
886 serge 176
859 serge 177
 
886 serge 178
859 serge 179
 
886 serge 180
    mask = sheap.availmask & ( -1<
181
859 serge 182
 
886 serge 183
861 serge 184
 
886 serge 185
    {
186
        if(idx0 == 31)
187
        {
862 serge 188
            ASSERT( !list_empty(&sheap.free[31]));
886 serge 189
190
 
191
            while((link_t*)tmp != &sheap.free[31])
192
            {
193
                if(tmp->size >= size)
194
                {
195
                    md = tmp;
196
                    break;
197
                };
198
                tmp = (md_t*)tmp->link.next;
199
            };
200
        }
201
        else
202
        {
203
            idx0 = _bsf(mask);
204
205
 
206
207
 
208
        }
209
    };
210
211
 
212
    {
213
        DBG("remove md %x\n", md);
214
215
 
216
217
 
218
        if(list_empty(&sheap.free[idx0]))
219
            _reset_smask(idx0);
220
    }
221
    else
222
    {
223
        md_t *lmd;
224
        lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF);
225
226
 
227
228
 
229
        {
230
            safe_sti(efl);
231
            return NULL;
232
        };
862 serge 233
861 serge 234
 
886 serge 235
862 serge 236
 
886 serge 237
        list_initialize(&md->adj);
238
        md->base = lmd->base;
239
        md->size = lmd->size;
240
        md->parent  = lmd;
241
        md->state = MD_USED;
242
    };
243
862 serge 244
 
886 serge 245
    {
246
        count_t idx1;
247
        md_t *new_md = (md_t*)slab_alloc(md_slab,0);    /* FIXME check */
248
862 serge 249
 
886 serge 250
        list_insert(&new_md->adj, &md->adj);
251
859 serge 252
 
886 serge 253
        new_md->size = size;
254
        new_md->parent = md->parent;
255
        new_md->state = MD_USED;
256
859 serge 257
 
886 serge 258
        md->size-= size;
259
        md->state = MD_FREE;
260
859 serge 261
 
886 serge 262
859 serge 263
 
886 serge 264
859 serge 265
 
886 serge 266
          list_prepend(&md->link, &sheap.free[idx1]);
267
        else
268
        {
269
            if( list_empty(&sheap.free[31]))
270
                list_prepend(&md->link, &sheap.free[31]);
271
            else
272
            {
273
                md_t *tmp = (md_t*)sheap.free[31].next;
274
859 serge 275
 
886 serge 276
                {
277
                    if(md->base < tmp->base)
278
                        break;
279
                    tmp = (md_t*)tmp->link.next;
280
                }
281
                list_insert(&md->link, &tmp->link);
282
            };
283
        };
284
859 serge 285
 
886 serge 286
861 serge 287
 
886 serge 288
861 serge 289
 
886 serge 290
    };
291
861 serge 292
 
886 serge 293
859 serge 294
 
886 serge 295
859 serge 296
 
886 serge 297
}
859 serge 298
299
 
886 serge 300
{
301
    eflags_t  efl ;
302
    md_t     *fd;
303
    md_t     *bk;
304
    count_t   idx;
305
306
 
307
    spinlock_lock(&sheap.lock);
308
309
 
310
    {
311
        bk = (md_t*)md->adj.prev;
312
        fd = (md_t*)md->adj.next;
313
314
 
315
        {
316
            idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
317
318
 
319
            if(list_empty(&sheap.free[idx]))
320
                _reset_smask(idx);
321
322
 
323
            md->adj.next = fd->adj.next;
324
            md->adj.next->prev = (link_t*)md;
325
            slab_free(md_slab, fd);
326
        };
327
        if(bk->state == MD_FREE)
328
        {
329
            idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
330
331
 
332
            if(list_empty(&sheap.free[idx]))
333
                _reset_smask(idx);
334
335
 
336
            bk->adj.next = md->adj.next;
337
            bk->adj.next->prev = (link_t*)bk;
338
            slab_free(md_slab, md);
339
            md = fd;
340
        };
341
    };
342
343
 
344
345
 
346
347
 
348
349
 
350
        list_prepend(&md->link, &sheap.free[idx]);
351
    else
352
    {
353
        if( list_empty(&sheap.free[31]))
354
            list_prepend(&md->link, &sheap.free[31]);
355
        else
356
        {
357
            md_t *tmp = (md_t*)sheap.free[31].next;
358
359
 
360
            {
361
                if(md->base < tmp->base)
362
                    break;
363
                tmp = (md_t*)tmp->link.next;
364
            }
365
            list_insert(&md->link, &tmp->link);
366
        };
367
    };
368
    spinlock_unlock(&sheap.lock);
369
    safe_sti(efl);
370
371
 
372
373
 
374
 
375
376
 
377
phismem_t* __fastcall phis_alloc(count_t count)
859 serge 378
{
379
   phismem_t *phm;
380
   count_t tmp;
381
   phm = (phismem_t*)slab_alloc(phm_slab, 0);
382
383
 
384
   tmp = count;
385
   while(tmp)
386
   {
387
      u32_t order;
388
389
 
390
      asm volatile ("btr %0, %1" :"=r"(tmp):"r"(order):"cc");
391
392
 
393
394
 
395
396
 
397
}
398
399
 
400
{
401
   count_t count;
402
   addr_t  *pte;
403
404
 
405
   pte = &((addr_t*)page_tabs)[base>>12];
406
407
 
408
   {
409
     u32_t order;
410
     addr_t frame;
411
     count_t size;
412
413
 
414
     asm volatile ("btr %0, %1" :"=r"(count):"r"(order):"cc");
415
416
 
417
     size = (1 << order);
418
     while(size--)
419
     {
420
       *pte++ = frame;
421
       frame+= 4096;
422
     }
423
   }
424
};
425
*/
886 serge 426
859 serge 427
 
886 serge 428
{
859 serge 429
    eflags_t efl;
886 serge 430
859 serge 431
 
886 serge 432
859 serge 433
 
886 serge 434
435
 
436
437
 
438
439
 
440
441
 
442
    {
443
        ASSERT(md->state == MD_USED);
444
445
 
446
        {
447
            count_t tmp = size >> 12;
448
            addr_t  *pte = &((addr_t*)page_tabs)[md->base>>12];
449
450
 
451
            {
452
                u32_t  order;
453
                addr_t frame;
454
                size_t size;
455
456
 
457
                asm volatile ("btr %1, %0" :"=r"(tmp):"r"(order):"cc");
458
459
 
460
461
 
462
                while(size--)
463
                {
464
                    *pte++ = frame;
465
                    frame+= 4096;
466
                };
467
            };
468
        };
469
470
 
471
        spinlock_lock(&sheap.lock);
472
473
 
474
            list_prepend(&md->link, &sheap.used);
475
        else
476
        {
477
            md_t *tmp = (md_t*)sheap.used.next;
478
479
 
480
            {
481
                if(md->base < tmp->base)
482
                    break;
483
                tmp = (md_t*)tmp->link.next;
484
            }
485
            list_insert(&md->link, &tmp->link);
486
        };
487
488
 
489
        safe_sti(efl);
490
491
 
492
        return (void*)md->base;
493
    };
494
    return NULL;
495
};
859 serge 496
497
 
886 serge 498
{
859 serge 499
    eflags_t efl;
886 serge 500
859 serge 501
 
886 serge 502
    md_t *md = NULL;
503
859 serge 504
 
886 serge 505
861 serge 506
 
886 serge 507
    ASSERT( ((addr_t)mem & 0xFFF) == 0 );
508
    ASSERT( ! list_empty(&sheap.used));
509
862 serge 510
 
886 serge 511
864 serge 512
 
886 serge 513
864 serge 514
 
886 serge 515
    {
516
        if( tmp->base == (addr_t)mem )
517
        {
518
            md = tmp;
519
            break;
520
        };
521
        tmp = (md_t*)tmp->link.next;
522
    }
523
864 serge 524
 
886 serge 525
    {
526
        DBG("\tmd: %x base: %x size: %x\n",md, md->base, md->size);
527
859 serge 528
 
886 serge 529
864 serge 530
 
886 serge 531
        addr_t  *pte = &((addr_t*)page_tabs)[md->base>>12];
532
533
 
534
        {
535
            *pte++ = 0;
536
            asm volatile (
537
                "invlpg (%0)"
538
                :
539
                :"r" (mem) );
540
            mem+= 4096;
541
        };
542
        list_remove((link_t*)md);
543
        free_small_md( md );
544
    }
545
    else
546
    {
547
        DBG("\tERROR: invalid base address: %x\n", mem);
548
    };
549
550
 
551
};
552