Subversion Repositories Kolibri OS

Rev

Rev 928 | Rev 1313 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 928 Rev 1066
Line 2... Line 2...
2
#include 
2
#include 
3
#include 
3
#include 
4
#include 
4
#include 
5
#include 
5
#include 
6
#include 
6
 
7
 
-
 
Line -... Line 7...
-
 
7
#define PG_DEMAND   0x400
Line 8... Line 8...
8
 
8
 
9
#define  MD_FREE    1
9
#define HF_WIDTH    16
Line 10... Line -...
10
#define  MD_USED    2
-
 
11
 
-
 
12
typedef struct {
10
#define HF_SIZE     (1 << HF_WIDTH)
Line 13... Line 11...
13
    u32_t  av_mapped;
11
 
14
    u32_t  av_unmapped;
-
 
Line 15... Line 12...
15
 
12
#define BUDDY_SYSTEM_INNER_BLOCK  0xff
Line 16... Line -...
16
    link_t mapped[32];
-
 
17
    link_t unmapped[32];
-
 
Line -... Line 13...
-
 
13
 
-
 
14
static zone_t z_heap;
Line 18... Line 15...
18
 
15
 
19
    link_t used;
16
static link_t  shared_mmap;
Line 20... Line -...
20
 
-
 
21
    SPINLOCK_DECLARE(lock);   /**< this lock protects everything below */
-
 
22
}heap_t;
-
 
23
 
-
 
24
 
-
 
25
slab_cache_t *md_slab;
-
 
26
slab_cache_t *phm_slab;
-
 
27
 
17
 
28
 
-
 
29
heap_t        lheap;
-
 
30
heap_t        sheap;
-
 
31
 
-
 
32
 
-
 
33
static inline void _set_lavu(count_t idx)
-
 
34
{ asm volatile ("bts %0, _lheap+4"::"r"(idx):"cc"); }
-
 
35
 
-
 
36
static inline void _reset_lavu(count_t idx)
-
 
37
{ asm volatile ("btr %0, _lheap+4"::"r"(idx):"cc"); }
-
 
38
 
-
 
39
static inline void _set_savm(count_t idx)
-
 
40
{ asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); }
-
 
41
 
-
 
42
static inline void _reset_savm(count_t idx)
-
 
43
{ asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); }
-
 
44
 
-
 
45
static inline void _set_savu(count_t idx)
-
 
46
{ asm volatile ("bts %0, _sheap+4"::"r"(idx):"cc"); }
-
 
47
 
-
 
48
static inline void _reset_savu(count_t idx)
-
 
49
{ asm volatile ("btr %0, _sheap+4"::"r"(idx):"cc"); }
-
 
50
 
-
 
51
 
-
 
52
int __fastcall init_heap(addr_t base, size_t size)
-
 
53
{
-
 
54
    md_t *md;
18
 
55
    u32_t i;
19
#define heap_index( frame ) \
56
 
20
	  (index_t)( (frame) - z_heap.frames)
-
 
21
 
Line 57... Line 22...
57
    ASSERT(base != 0);
22
#define heap_index_abs( frame ) \
58
    ASSERT(size != 0)
23
	  (index_t)( (frame) - z_heap.frames)
59
    ASSERT((base & 0x3FFFFF) == 0);
-
 
Line 60... Line -...
60
    ASSERT((size & 0x3FFFFF) == 0);
-
 
61
 
-
 
Line -... Line 24...
-
 
24
 
62
    for (i = 0; i < 32; i++)
25
 
Line -... Line 26...
-
 
26
static __inline void frame_initialize(frame_t *frame)
63
    {
27
{
Line 64... Line -...
64
        list_initialize(&lheap.mapped[i]);
-
 
65
        list_initialize(&lheap.unmapped[i]);
-
 
66
 
-
 
67
        list_initialize(&sheap.mapped[i]);
-
 
68
        list_initialize(&sheap.unmapped[i]);
-
 
69
    };
-
 
70
 
-
 
71
    list_initialize(&lheap.used);
-
 
72
    list_initialize(&sheap.used);
-
 
73
 
-
 
74
    md_slab = slab_cache_create(sizeof(md_t), 16,NULL,NULL,SLAB_CACHE_MAGDEFERRED);
-
 
Line 75... Line -...
75
 
-
 
76
    md = (md_t*)slab_alloc(md_slab,0);
-
 
77
 
-
 
78
    list_initialize(&md->adj);
28
	frame->refcount = 1;
79
    md->base = base;
29
	frame->buddy_order = 0;
80
    md->size = size;
30
}
81
    md->parent = NULL;
-
 
82
    md->state = MD_FREE;
-
 
83
 
-
 
84
    list_prepend(&md->link, &lheap.unmapped[31]);
-
 
85
    lheap.av_mapped    = 0x00000000;
-
 
Line 86... Line 31...
86
    lheap.av_unmapped  = 0x80000000;
31
 
87
    sheap.av_mapped    = 0x00000000;
32
#define buddy_get_order( block) \
Line 88... Line -...
88
    sheap.av_unmapped  = 0x00000000;
-
 
89
 
-
 
90
    return 1;
-
 
91
};
-
 
92
 
-
 
93
md_t* __fastcall find_large_md(size_t size)
-
 
94
{
-
 
95
    md_t *md = NULL;
-
 
96
 
-
 
97
    count_t idx0;
-
 
98
    u32_t mask;
-
 
99
 
33
    ((frame_t*)(block))->buddy_order
100
    ASSERT((size & 0x3FFFFF) == 0);
-
 
101
 
-
 
102
    idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31;
-
 
103
    mask = lheap.av_unmapped & ( -1<
-
 
104
 
34
 
-
 
35
 
105
    if(mask)
36
#define buddy_set_order( block, order) \
106
    {
37
     ((frame_t*)(block))->buddy_order = (order)
107
        if(idx0 == 31)
38
 
Line -... Line 39...
-
 
39
#define buddy_mark_busy( block ) \
108
        {
40
    ((frame_t*)(block))->refcount = 1
Line 109... Line 41...
109
            md_t *tmp = (md_t*)lheap.unmapped[31].next;
41
 
110
            while(&tmp->link != &lheap.unmapped[31])
-
 
111
            {
42
 
112
                if(tmp->size >= size)
-
 
113
                {
-
 
Line 114... Line -...
114
                    DBG("remove large tmp %x\n", tmp);
-
 
Line 115... Line 43...
115
 
43
static __inline link_t * buddy_bisect(link_t *block)
116
                    md = tmp;
44
{
117
                    break;
-
 
Line 118... Line 45...
118
                };
45
    frame_t *frame_l, *frame_r;
119
            };
-
 
120
            tmp = (md_t*)tmp->link.next;
-
 
121
        }
46
 
Line 122... Line -...
122
        else
-
 
123
        {
-
 
Line 124... Line 47...
124
            idx0 = _bsf(mask);
47
    frame_l = (frame_t*)block;
-
 
48
	frame_r = (frame_l + (1 << (frame_l->buddy_order - 1)));
125
 
49
 
126
            ASSERT( !list_empty(&lheap.unmapped[idx0]))
50
	return &frame_r->buddy_link;
127
 
51
}
Line 128... Line 52...
128
            md = (md_t*)lheap.unmapped[idx0].next;
52
 
129
        };
53
static __inline link_t *buddy_coalesce(link_t *block_1, link_t *block_2)
Line -... Line 54...
-
 
54
{
130
    }
55
	frame_t *frame1, *frame2;
Line 131... Line -...
131
    else
-
 
132
        return NULL;
56
 
Line 133... Line 57...
133
 
57
    frame1 = (frame_t*)block_1;
-
 
58
    frame2 = (frame_t*)block_2;
-
 
59
 
-
 
60
	return frame1 < frame2 ? block_1 : block_2;
-
 
61
}
134
    ASSERT(md->state == MD_FREE);
62
 
135
 
-
 
Line 136... Line -...
136
    list_remove((link_t*)md);
-
 
137
    if(list_empty(&lheap.unmapped[idx0]))
-
 
Line 138... Line 63...
138
        _reset_lavu(idx0);
63
 
139
 
64
#define IS_BUDDY_LEFT_BLOCK_ABS(frame)  \
140
    if(md->size > size)
65
  (((heap_index_abs((frame)) >> (frame)->buddy_order) & 0x1) == 0)
141
    {
-
 
142
        count_t idx1;
66
 
143
        md_t *new_md = (md_t*)slab_alloc(md_slab,0);         /* FIXME check */
-
 
144
 
67
#define IS_BUDDY_RIGHT_BLOCK_ABS(frame) \
145
        link_initialize(&new_md->link);
-
 
146
        list_insert(&new_md->adj, &md->adj);
68
	(((heap_index_abs((frame)) >> (frame)->buddy_order) & 0x1) == 1)
147
 
-
 
148
        new_md->base   = md->base;
69
 
149
        new_md->size   = size;
-
 
Line 150... Line -...
150
        new_md->parent = NULL;
-
 
151
        new_md->state  = MD_USED;
70
 
-
 
71
static link_t *find_buddy(link_t *block)
Line 152... Line -...
152
 
-
 
Line 153... Line 72...
153
        md->base+= size;
72
{
154
        md->size-= size;
73
	frame_t *frame;
155
 
74
	index_t index;
156
        idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31;
75
    u32_t is_left, is_right;
157
 
-
 
Line -... Line 76...
-
 
76
 
-
 
77
    frame = (frame_t*)block;
-
 
78
 //   ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),frame->buddy_order));
-
 
79
 
-
 
80
	is_left = IS_BUDDY_LEFT_BLOCK_ABS( frame);
158
        list_prepend(&md->link, &lheap.unmapped[idx1]);
81
	is_right = IS_BUDDY_RIGHT_BLOCK_ABS( frame);
-
 
82
 
159
        _set_lavu(idx1);
83
 //   ASSERT(is_left ^ is_right);
160
 
84
 
-
 
85
    if (is_left) {
-
 
86
        index = (heap_index(frame)) + (1 << frame->buddy_order);
-
 
87
    }
161
        return new_md;
88
    else {    /* if (is_right) */
162
    };
89
        index = (heap_index(frame)) - (1 << frame->buddy_order);
-
 
90
    };
-
 
91
 
163
    md->state = MD_USED;
92
 
-
 
93
	if ( index < z_heap.count)
-
 
94
	{
-
 
95
		if (z_heap.frames[index].buddy_order == frame->buddy_order &&
-
 
96
		    z_heap.frames[index].refcount == 0) {
-
 
97
			return &z_heap.frames[index].buddy_link;
-
 
98
		}
-
 
99
	}
-
 
100
 
-
 
101
	return NULL;
-
 
102
}
-
 
103
 
-
 
104
 
-
 
105
static void buddy_system_free(link_t *block)
-
 
106
{
164
 
107
    link_t *buddy, *hlp;
-
 
108
    u32_t i;
-
 
109
 
-
 
110
    /*
-
 
111
	 * Determine block's order.
165
    return md;
112
	 */
-
 
113
    i = buddy_get_order(block);
-
 
114
 
-
 
115
    ASSERT(i <= z_heap.max_order);
-
 
116
 
166
}
117
    if (i != z_heap.max_order)
167
 
118
    {
168
md_t* __fastcall find_unmapped_md(size_t size)
119
		/*
169
{
-
 
170
    eflags_t efl;
-
 
171
 
-
 
172
    md_t *md = NULL;
-
 
173
 
-
 
174
    count_t idx0;
-
 
175
    u32_t mask;
-
 
176
 
120
		 * See if there is any buddy in the list of order i.
177
    ASSERT((size & 0xFFF) == 0);
-
 
178
 
-
 
179
    efl = safe_cli();
-
 
180
 
121
		 */
181
    idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31;
122
        buddy = find_buddy( block );
182
    mask = sheap.av_unmapped & ( -1<
-
 
183
 
-
 
184
    DBG("smask %x size %x idx0 %x mask %x\n",sheap.av_unmapped, size, idx0, mask);
-
 
185
 
123
		if (buddy)
186
    if(mask)
-
 
187
    {
124
		{
188
        if(idx0 == 31)
-
 
189
        {
125
 
190
            ASSERT( !list_empty(&sheap.unmapped[31]));
-
 
191
 
-
 
192
            md_t *tmp = (md_t*)sheap.unmapped[31].next;
-
 
193
            while( &tmp->link != &sheap.unmapped[31])
-
 
Line 194... Line -...
194
            {
-
 
Line -... Line 126...
-
 
126
            ASSERT(buddy_get_order(buddy) == i);
-
 
127
			/*
195
                if(tmp->size >= size)
128
			 * Remove buddy from the list of order i.
-
 
129
			 */
-
 
130
			list_remove(buddy);
-
 
131
 
-
 
132
			/*
-
 
133
			 * Invalidate order of both block and buddy.
-
 
134
			 */
-
 
135
            buddy_set_order(block, BUDDY_SYSTEM_INNER_BLOCK);
-
 
136
            buddy_set_order(buddy, BUDDY_SYSTEM_INNER_BLOCK);
-
 
137
 
-
 
138
			/*
-
 
139
			 * Coalesce block and buddy into one block.
196
                {
140
			 */
-
 
141
            hlp = buddy_coalesce( block, buddy );
-
 
142
 
-
 
143
			/*
-
 
144
			 * Set order of the coalesced block to i + 1.
-
 
145
			 */
197
                    md = tmp;
146
            buddy_set_order(hlp, i + 1);
198
                    break;
147
 
199
                };
-
 
Line 200... Line -...
200
                tmp = (md_t*)tmp->link.next;
-
 
201
            };
-
 
-
 
148
			/*
202
        }
149
			 * Recursively add the coalesced block to the list of order i + 1.
-
 
150
			 */
203
        else
151
            buddy_system_free( hlp );
204
        {
152
			return;
205
            idx0 = _bsf(mask);
-
 
206
 
153
		}
207
            ASSERT( !list_empty(&sheap.unmapped[idx0]));
154
	}
208
 
-
 
209
            md = (md_t*)sheap.unmapped[idx0].next;
155
	/*
210
        }
156
	 * Insert block into the list of order i.
211
    };
157
	 */
212
 
-
 
213
    if(md)
158
    list_append(block, &z_heap.order[i]);
Line 214... Line -...
214
    {
-
 
215
        DBG("remove md %x\n", md);
159
}
216
 
-
 
217
        ASSERT(md->state==MD_FREE);
-
 
Line -... Line 160...
-
 
160
 
218
        ASSERT(md->parent != NULL);
161
 
-
 
162
static link_t* buddy_system_alloc( u32_t i)
219
 
163
{
Line 220... Line -...
220
        list_remove((link_t*)md);
-
 
221
        if(list_empty(&sheap.unmapped[idx0]))
164
	link_t *res, *hlp;
222
            _reset_savu(idx0);
-
 
223
    }
165
 
Line -... Line 166...
-
 
166
    ASSERT(i <= z_heap.max_order);
-
 
167
 
224
    else
168
	/*
-
 
169
	 * If the list of order i is not empty,
225
    {
170
	 * the request can be immediatelly satisfied.
226
        md_t *lmd;
171
	 */
Line 227... Line 172...
227
        lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF);
172
	if (!list_empty(&z_heap.order[i])) {
-
 
173
		res = z_heap.order[i].next;
Line 228... Line -...
228
 
-
 
Line 229... Line -...
229
        DBG("get large md %x\n", lmd);
-
 
230
 
174
		list_remove(res);
231
        if( !lmd)
-
 
232
        {
175
		buddy_mark_busy(res);
233
            safe_sti(efl);
-
 
234
            return NULL;
-
 
235
        };
176
		return res;
236
 
177
	}
237
        ASSERT(lmd->size != 0);
-
 
Line 238... Line -...
238
        ASSERT(lmd->base != 0);
-
 
239
        ASSERT((lmd->base & 0x3FFFFF) == 0);
-
 
240
        ASSERT(lmd->parent == NULL);
-
 
241
 
178
	/*
242
        md = (md_t*)slab_alloc(md_slab,0);    /* FIXME check */
-
 
243
 
-
 
244
        link_initialize(&md->link);
-
 
245
        list_initialize(&md->adj);
-
 
246
        md->base = lmd->base;
-
 
Line 247... Line 179...
247
        md->size = lmd->size;
179
	 * If order i is already the maximal order,
-
 
180
	 * the request cannot be satisfied.
Line 248... Line 181...
248
        md->parent  = lmd;
181
	 */
Line 249... Line 182...
249
        md->state = MD_USED;
182
	if (i == z_heap.max_order)
250
    };
183
		return NULL;
-
 
184
 
-
 
185
	/*
Line 251... Line 186...
251
 
186
	 * Try to recursively satisfy the request from higher order lists.
Line 252... Line 187...
252
    if(md->size > size)
187
	 */
Line 253... Line 188...
253
    {
188
	hlp = buddy_system_alloc( i + 1 );
254
        count_t idx1;
-
 
Line 255... Line 189...
255
        md_t *new_md = (md_t*)slab_alloc(md_slab,0);    /* FIXME check */
189
 
256
 
-
 
257
        link_initialize(&new_md->link);
190
	/*
Line 258... Line -...
258
        list_insert(&new_md->adj, &md->adj);
-
 
Line 259... Line 191...
259
 
191
	 * The request could not be satisfied
260
        new_md->base = md->base;
192
	 * from higher order lists.
Line 261... Line 193...
261
        new_md->size = size;
193
	 */
Line 262... Line -...
262
        new_md->parent = md->parent;
-
 
Line 263... Line 194...
263
        new_md->state = MD_USED;
194
	if (!hlp)
264
 
195
		return NULL;
Line 265... Line -...
265
        md->base+= size;
-
 
266
        md->size-= size;
-
 
Line 267... Line 196...
267
        md->state = MD_FREE;
196
 
268
 
-
 
269
        idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
197
	res = hlp;
270
 
198
 
271
        DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1);
199
	/*
-
 
200
	 * Bisect the block and set order of both of its parts to i.
Line 272... Line -...
272
 
-
 
273
        if( idx1 < 31)
201
	 */
274
          list_prepend(&md->link, &sheap.unmapped[idx1]);
202
	hlp = buddy_bisect( res );
275
        else
203
 
276
        {
-
 
277
            if( list_empty(&sheap.unmapped[31]))
-
 
278
                list_prepend(&md->link, &sheap.unmapped[31]);
-
 
279
            else
-
 
280
            {
204
	buddy_set_order(res, i);
281
                md_t *tmp = (md_t*)sheap.unmapped[31].next;
-
 
282
 
205
	buddy_set_order(hlp, i);
283
                while( &tmp->link != &sheap.unmapped[31])
-
 
284
                {
-
 
285
                    if(md->base < tmp->base)
-
 
Line 286... Line 206...
286
                        break;
206
 
Line 287... Line 207...
287
                    tmp = (md_t*)tmp->link.next;
207
	/*
288
                }
208
	 * Return the other half to buddy system. Mark the first part
289
                list_insert(&md->link, &tmp->link);
-
 
Line 290... Line 209...
290
            };
209
	 * full, so that it won't coalesce again.
291
        };
210
	 */
292
 
211
	buddy_mark_busy(res);
293
        _set_savu(idx1);
-
 
294
 
212
	buddy_system_free( hlp );
Line 295... Line -...
295
        safe_sti(efl);
-
 
296
 
-
 
297
        return new_md;
-
 
298
    };
-
 
299
 
-
 
300
    md->state = MD_USED;
-
 
301
 
213
 
302
    safe_sti(efl);
214
	return res;
303
 
215
}
304
    return md;
216
 
-
 
217
 
-
 
218
int __fastcall init_heap(addr_t start, size_t size)
Line 305... Line -...
305
}
-
 
306
 
-
 
307
md_t* __fastcall find_mapped_md(size_t size)
219
{
Line 308... Line -...
308
{
-
 
309
    eflags_t efl;
-
 
310
 
-
 
311
    md_t *md = NULL;
220
	count_t i;
312
 
-
 
Line 313... Line -...
313
    count_t idx0;
-
 
314
    u32_t mask;
221
    count_t count;
315
 
-
 
316
    ASSERT((size & 0xFFF) == 0);
-
 
Line -... Line 222...
-
 
222
 
317
 
223
    count = size >> HF_WIDTH;
Line 318... Line 224...
318
    efl = safe_cli();
224
 
-
 
225
    ASSERT( start != 0);
Line 319... Line 226...
319
 
226
    ASSERT( count != 0);
Line 320... Line 227...
320
    idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31;
227
 
321
    mask = sheap.av_mapped & ( -1<
228
    spinlock_initialize(&z_heap.lock);
322
 
229
 
323
    DBG("small av_mapped %x size %x idx0 %x mask %x\n",sheap.av_mapped, size,
-
 
324
         idx0, mask);
-
 
Line -... Line 230...
-
 
230
    z_heap.base = start >> HF_WIDTH;
325
 
231
	z_heap.count = count;
Line 326... Line -...
326
    if(mask)
-
 
327
    {
-
 
328
        if(idx0 == 31)
-
 
329
        {
232
	z_heap.free_count = count;
330
            ASSERT( !list_empty(&sheap.mapped[31]));
-
 
331
 
-
 
332
            md_t *tmp = (md_t*)sheap.mapped[31].next;
-
 
Line 333... Line -...
333
            while( &tmp->link != &sheap.mapped[31])
-
 
334
            {
-
 
335
                if(tmp->size >= size)
233
	z_heap.busy_count = 0;
336
                {
-
 
Line 337... Line -...
337
                    md = tmp;
-
 
338
                    break;
234
 
Line 339... Line -...
339
                };
-
 
340
                tmp = (md_t*)tmp->link.next;
235
    z_heap.max_order = fnzb(count);
341
            };
-
 
Line 342... Line 236...
342
        }
236
 
-
 
237
    DBG("create heap zone: base %x count %x\n", start, count);
343
        else
238
 
344
        {
239
    ASSERT(z_heap.max_order < BUDDY_SYSTEM_INNER_BLOCK);
Line 345... Line 240...
345
            idx0 = _bsf(mask);
240
 
-
 
241
    for (i = 0; i <= z_heap.max_order; i++)
Line 346... Line 242...
346
 
242
        list_initialize(&z_heap.order[i]);
Line 347... Line -...
347
            ASSERT( !list_empty(&sheap.mapped[idx0]));
-
 
348
 
-
 
349
            md = (md_t*)sheap.mapped[idx0].next;
-
 
350
        }
-
 
351
    };
-
 
352
 
-
 
353
    if(md)
-
 
354
    {
-
 
355
        DBG("remove md %x\n", md);
243
 
Line 356... Line -...
356
 
-
 
357
        ASSERT(md->state==MD_FREE);
-
 
358
 
-
 
359
        list_remove((link_t*)md);
244
 
360
        if(list_empty(&sheap.mapped[idx0]))
-
 
361
            _reset_savm(idx0);
-
 
362
    }
-
 
363
    else
-
 
364
    {
-
 
Line 365... Line 245...
365
        md_t    *lmd;
245
    DBG("count %d frame_t %d page_size %d\n",
Line 366... Line 246...
366
        addr_t  frame;
246
               count, sizeof(frame_t), PAGE_SIZE);
367
        addr_t  *pte;
247
 
Line 368... Line 248...
368
        int i;
248
    z_heap.frames = (frame_t *)PA2KA(frame_alloc( (count*sizeof(frame_t)) >> PAGE_WIDTH ));
Line 369... Line 249...
369
 
249
 
Line 370... Line -...
370
        lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF);
-
 
371
 
-
 
Line 372... Line 250...
372
        DBG("get large md %x\n", lmd);
250
 
373
 
-
 
374
        if( !lmd)
-
 
375
        {
-
 
376
            safe_sti(efl);
-
 
377
            return NULL;
-
 
378
        };
-
 
379
 
251
    if( z_heap.frames == 0 )
Line -... Line 252...
-
 
252
        return 0;
380
        ASSERT(lmd->size != 0);
253
 
381
        ASSERT(lmd->base != 0);
254
 
Line -... Line 255...
-
 
255
	for (i = 0; i < count; i++) {
382
        ASSERT((lmd->base & 0x3FFFFF) == 0);
256
		z_heap.frames[i].buddy_order=0;
383
        ASSERT(lmd->parent == NULL);
257
		z_heap.frames[i].parent = NULL;
384
 
258
		z_heap.frames[i].refcount=1;
385
        frame = core_alloc(10);                        /* FIXME check */
259
	}
Line -... Line 260...
-
 
260
 
386
 
261
    for (i = 0; i < count; i++)
-
 
262
    {
-
 
263
        z_heap.frames[i].refcount = 0;
-
 
264
        buddy_system_free(&z_heap.frames[i].buddy_link);
-
 
265
    }
-
 
266
 
387
        lmd->parent = (void*)frame;
267
    list_initialize(&shared_mmap);
-
 
268
 
388
 
269
	return 1;
Line 389... Line -...
389
        pte = &((addr_t*)page_tabs)[lmd->base>>12];    /* FIXME remove */
-
 
390
 
-
 
391
        for(i = 0; i<1024; i++)
270
}
Line 392... Line 271...
392
        {
271
 
393
           *pte++ = frame;
272
addr_t  __fastcall mem_alloc(size_t size, u32_t flags)
394
           frame+= 4096;
-
 
395
        }
-
 
396
 
-
 
397
        md = (md_t*)slab_alloc(md_slab,0);             /* FIXME check */
-
 
398
 
-
 
399
        link_initialize(&md->link);
-
 
Line 400... Line -...
400
        list_initialize(&md->adj);
-
 
401
        md->base = lmd->base;
273
{
402
        md->size = lmd->size;
274
    eflags_t  efl;
403
        md->parent  = lmd;
-
 
404
        md->state = MD_USED;
-
 
405
    };
-
 
406
 
-
 
407
    if(md->size > size)
-
 
408
    {
-
 
409
        count_t idx1;
-
 
410
        md_t *new_md = (md_t*)slab_alloc(md_slab,0);    /* FIXME check */
275
    addr_t    heap = 0;
-
 
276
 
Line 411... Line 277...
411
 
277
    count_t   order;
Line 412... Line 278...
412
        link_initialize(&new_md->link);
278
    frame_t  *frame;
413
        list_insert(&new_md->adj, &md->adj);
-
 
414
 
279
    index_t   v;
Line 415... Line -...
415
        new_md->base = md->base;
-
 
416
        new_md->size = size;
-
 
417
        new_md->parent = md->parent;
-
 
418
 
-
 
419
        md->base+= size;
280
    int i;
420
        md->size-= size;
-
 
421
        md->state = MD_FREE;
-
 
422
 
-
 
423
        idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
-
 
Line 424... Line -...
424
 
-
 
425
        DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1);
-
 
426
 
-
 
427
        if( idx1 < 31)
-
 
428
          list_prepend(&md->link, &sheap.mapped[idx1]);
-
 
429
        else
-
 
430
        {
-
 
431
            if( list_empty(&sheap.mapped[31]))
-
 
432
                list_prepend(&md->link, &sheap.mapped[31]);
-
 
433
            else
-
 
434
            {
281
    mmap_t   *map;
Line -... Line 282...
-
 
282
    count_t   pages;
435
                md_t *tmp = (md_t*)sheap.mapped[31].next;
283
 
Line -... Line 284...
-
 
284
 //   __asm__ __volatile__ ("xchgw %bx, %bx");
-
 
285
 
-
 
286
    size = (size + 4095) & ~4095;
436
 
287
 
437
                while( &tmp->link != &sheap.mapped[31])
288
    pages = size >> PAGE_WIDTH;
438
                {
289
 
439
                    if(md->base < tmp->base)
290
//    map = (mmap_t*)malloc( sizeof(mmap_t) +
440
                        break;
-
 
441
                    tmp = (md_t*)tmp->link.next;
291
//                           sizeof(addr_t) * pages);
Line 442... Line -...
442
                }
-
 
443
                list_insert(&md->link, &tmp->link);
-
 
444
            };
-
 
445
        };
292
 
446
 
-
 
447
        _set_savm(idx1);
-
 
448
 
-
 
449
        md = new_md;
-
 
450
    };
-
 
451
 
-
 
452
    md->state = MD_USED;
-
 
453
 
-
 
454
    safe_sti(efl);
-
 
455
 
-
 
456
    return md;
-
 
457
}
-
 
458
 
-
 
459
void __fastcall free_unmapped_md(md_t *md)
-
 
Line 460... Line -...
460
{
-
 
461
    eflags_t  efl ;
-
 
462
    md_t     *fd;
-
 
463
    md_t     *bk;
-
 
464
    count_t   idx;
-
 
465
 
-
 
466
    ASSERT(md->parent != NULL);
-
 
467
 
-
 
468
    efl = safe_cli();
-
 
469
    spinlock_lock(&sheap.lock);
-
 
470
 
-
 
471
    if( !list_empty(&md->adj))
293
    map = (mmap_t*)PA2KA(frame_alloc( (sizeof(mmap_t) +
472
    {
-
 
473
        bk = (md_t*)md->adj.prev;
-
 
474
        fd = (md_t*)md->adj.next;
-
 
475
 
294
                           sizeof(addr_t) * pages) >> PAGE_WIDTH));
476
        if(fd->state == MD_FREE)
295
 
477
        {
-
 
478
            idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
296
    map->size = size;
479
 
297
 
480
            list_remove((link_t*)fd);
-
 
481
            if(list_empty(&sheap.unmapped[idx]))
-
 
482
                _reset_savu(idx);
-
 
483
 
-
 
484
            md->size+= fd->size;
-
 
485
            md->adj.next = fd->adj.next;
-
 
Line 486... Line 298...
486
            md->adj.next->prev = (link_t*)md;
298
    if ( map )
487
            slab_free(md_slab, fd);
-
 
488
        };
-
 
489
        if(bk->state == MD_FREE)
-
 
490
        {
-
 
491
            idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
-
 
492
 
-
 
493
            list_remove((link_t*)bk);
-
 
494
            if(list_empty(&sheap.unmapped[idx]))
-
 
Line 495... Line -...
495
                _reset_savu(idx);
-
 
496
 
-
 
497
            bk->size+= md->size;
299
    {
498
            bk->adj.next = md->adj.next;
-
 
499
            bk->adj.next->prev = (link_t*)bk;
-
 
500
            slab_free(md_slab, md);
-
 
501
            md = fd;
-
 
502
        };
-
 
503
    };
-
 
504
 
-
 
505
    md->state = MD_FREE;
-
 
506
 
-
 
Line -... Line 300...
-
 
300
        order = size >> HF_WIDTH;
Line 507... Line 301...
507
    idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
301
 
508
 
-
 
509
    _set_savu(idx);
-
 
Line 510... Line 302...
510
 
302
        if( order )
Line 511... Line 303...
511
    if( idx < 31)
303
            order = fnzb(order - 1) + 1;
Line 512... Line 304...
512
        list_prepend(&md->link, &sheap.unmapped[idx]);
304
 
513
    else
305
        efl = safe_cli();
514
    {
306
 
-
 
307
        spinlock_lock(&z_heap.lock);
Line 515... Line -...
515
        if( list_empty(&sheap.unmapped[31]))
-
 
516
            list_prepend(&md->link, &sheap.unmapped[31]);
308
 
Line 517... Line 309...
517
        else
309
        frame = (frame_t*)buddy_system_alloc(order);
518
        {
310
 
Line 519... Line 311...
519
            md_t *tmp = (md_t*)sheap.unmapped[31].next;
311
        ASSERT( frame );
Line -... Line 312...
-
 
312
 
520
 
313
        if( frame )
521
            while( &tmp->link != &sheap.unmapped[31])
314
        {
Line 522... Line -...
522
            {
-
 
523
                if(md->base < tmp->base)
-
 
524
                    break;
315
            addr_t  page = 0;
525
                tmp = (md_t*)tmp->link.next;
316
            addr_t  mem;
Line 526... Line 317...
526
            }
317
 
527
            list_insert(&md->link, &tmp->link);
318
            z_heap.free_count -= (1 << order);
Line 528... Line -...
528
        };
-
 
529
    };
-
 
530
    spinlock_unlock(&sheap.lock);
319
            z_heap.busy_count += (1 << order);
531
    safe_sti(efl);
-
 
532
 
-
 
533
};
320
 
534
 
321
/* get frame address */
535
void __fastcall free_mapped_md(md_t *md)
322
 
536
{
323
            v = z_heap.base + (index_t)(frame - z_heap.frames);
537
    eflags_t  efl ;
324
 
538
    md_t     *fd;
-
 
539
    md_t     *bk;
-
 
540
    count_t   idx;
-
 
541
 
-
 
542
    ASSERT(md->parent != NULL);
325
            heap = v << HF_WIDTH;
543
    ASSERT( ((md_t*)(md->parent))->parent != NULL);
-
 
544
 
-
 
545
    efl = safe_cli();
326
 
Line 546... Line 327...
546
    spinlock_lock(&sheap.lock);
327
            map->base = heap;
547
 
328
 
-
 
329
            for(i = 0; i < (1 << order); i++)
-
 
330
                frame[i].parent = map;
-
 
331
 
Line 548... Line -...
548
    if( !list_empty(&md->adj))
-
 
549
    {
-
 
550
        bk = (md_t*)md->adj.prev;
332
            spinlock_unlock(&z_heap.lock);
Line 551... Line 333...
551
        fd = (md_t*)md->adj.next;
333
 
Line 552... Line 334...
552
 
334
            safe_sti(efl);
Line 553... Line 335...
553
        if(fd->state == MD_FREE)
335
 
Line 554... Line -...
554
        {
-
 
555
            idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
-
 
556
 
-
 
557
            list_remove((link_t*)fd);
-
 
558
            if(list_empty(&sheap.mapped[idx]))
336
 
559
                _reset_savm(idx);
337
            addr_t  *pte = &((addr_t*)page_tabs)[heap >> PAGE_WIDTH];
560
 
338
            addr_t  *mpte = &map->pte[0];
561
            md->size+= fd->size;
-
 
562
            md->adj.next = fd->adj.next;
-
 
Line 563... Line -...
563
            md->adj.next->prev = (link_t*)md;
-
 
564
            slab_free(md_slab, fd);
-
 
565
        };
-
 
566
        if(bk->state == MD_FREE)
-
 
567
        {
-
 
568
            idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
-
 
569
 
339
 
570
            list_remove((link_t*)bk);
-
 
571
            if(list_empty(&sheap.mapped[idx]))
-
 
572
                _reset_savm(idx);
-
 
573
 
-
 
Line 574... Line 340...
574
            bk->size+= md->size;
340
#if 0
575
            bk->adj.next = md->adj.next;
-
 
Line 576... Line 341...
576
            bk->adj.next->prev = (link_t*)bk;
341
            if( flags & PG_MAP )
577
            slab_free(md_slab, md);
342
                page = PG_DEMAND | (flags & 0xFFF);
578
            md = fd;
-
 
579
        };
-
 
580
    };
-
 
581
 
-
 
582
    md->state = MD_FREE;
-
 
583
 
-
 
584
    idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
-
 
585
 
-
 
586
    _set_savm(idx);
-
 
587
 
-
 
588
    if( idx < 31)
343
 
589
        list_prepend(&md->link, &sheap.mapped[idx]);
-
 
590
    else
-
 
591
    {
-
 
592
        if( list_empty(&sheap.mapped[31]))
-
 
593
            list_prepend(&md->link, &sheap.mapped[31]);
-
 
594
        else
344
            mem = heap;
595
        {
-
 
596
            md_t *tmp = (md_t*)sheap.mapped[31].next;
-
 
597
 
345
            while(pages--)
-
 
346
            {
598
            while( &tmp->link != &sheap.mapped[31])
347
                *pte++  = 0; //page;
Line 599... Line -...
599
            {
-
 
600
                if(md->base < tmp->base)
-
 
601
                    break;
348
                *mpte++ = page;
602
                tmp = (md_t*)tmp->link.next;
349
 
603
            }
-
 
604
            list_insert(&md->link, &tmp->link);
350
                asm volatile ( "invlpg (%0)" ::"r" (mem) );
605
        };
351
                mem+=  4096;
606
    };
352
            };
607
    spinlock_unlock(&sheap.lock);
-
 
608
    safe_sti(efl);
-
 
609
};
-
 
610
 
-
 
611
 
-
 
612
md_t* __fastcall  md_alloc(size_t size, u32_t flags)
-
 
613
{
353
#else
614
    eflags_t efl;
-
 
615
 
-
 
616
    md_t *md;
-
 
617
 
-
 
618
    size = (size+4095)&~4095;
-
 
619
 
-
 
620
    if( flags & PG_MAP )
-
 
621
    {
-
 
622
        md = find_mapped_md(size);
-
 
623
 
-
 
624
        if( !md )
-
 
625
            return NULL;
-
 
626
 
-
 
627
        ASSERT(md->state == MD_USED);
-
 
628
        ASSERT(md->parent != NULL);
-
 
629
 
-
 
630
        md_t *lmd = (md_t*)md->parent;
-
 
631
 
-
 
632
        ASSERT( lmd != NULL);
-
 
633
        ASSERT( lmd->parent != NULL);
-
 
634
 
-
 
635
        addr_t  frame  = (md->base - lmd->base + (addr_t)lmd->parent)|
-
 
636
                         (flags & 0xFFF);
-
 
637
        DBG("frame %x\n", frame);
-
 
638
        ASSERT(frame != 0);
354
            mem = heap;
639
 
-
 
640
        count_t  tmp = size >> 12;
-
 
Line 641... Line -...
641
        addr_t  *pte = &((addr_t*)page_tabs)[md->base>>12];
-
 
642
 
-
 
643
        while(tmp--)
355
 
Line 644... Line -...
644
        {
-
 
645
            *pte++ = frame;
-
 
646
            frame+= 4096;
-
 
647
        };
-
 
648
    }
-
 
649
    else
-