4,775 → 4,481 |
#include <spinlock.h> |
#include <link.h> |
#include <mm.h> |
#include <slab.h> |
|
#define PG_DEMAND 0x400 |
|
#define MD_FREE 1 |
#define MD_USED 2 |
#define HF_WIDTH 16 |
#define HF_SIZE (1 << HF_WIDTH) |
|
typedef struct { |
u32_t av_mapped; |
u32_t av_unmapped; |
#define BUDDY_SYSTEM_INNER_BLOCK 0xff |
|
link_t mapped[32]; |
link_t unmapped[32]; |
static zone_t z_heap; |
|
link_t used; |
static link_t shared_mmap; |
|
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
}heap_t; |
|
#define heap_index( frame ) \ |
(index_t)( (frame) - z_heap.frames) |
|
slab_cache_t *md_slab; |
slab_cache_t *phm_slab; |
#define heap_index_abs( frame ) \ |
(index_t)( (frame) - z_heap.frames) |
|
|
heap_t lheap; |
heap_t sheap; |
static __inline void frame_initialize(frame_t *frame) |
{ |
frame->refcount = 1; |
frame->buddy_order = 0; |
} |
|
#define buddy_get_order( block) \ |
((frame_t*)(block))->buddy_order |
|
static inline void _set_lavu(count_t idx) |
{ asm volatile ("bts %0, _lheap+4"::"r"(idx):"cc"); } |
|
static inline void _reset_lavu(count_t idx) |
{ asm volatile ("btr %0, _lheap+4"::"r"(idx):"cc"); } |
#define buddy_set_order( block, order) \ |
((frame_t*)(block))->buddy_order = (order) |
|
static inline void _set_savm(count_t idx) |
{ asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); } |
#define buddy_mark_busy( block ) \ |
((frame_t*)(block))->refcount = 1 |
|
static inline void _reset_savm(count_t idx) |
{ asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); } |
|
static inline void _set_savu(count_t idx) |
{ asm volatile ("bts %0, _sheap+4"::"r"(idx):"cc"); } |
static __inline link_t * buddy_bisect(link_t *block) |
{ |
frame_t *frame_l, *frame_r; |
|
static inline void _reset_savu(count_t idx) |
{ asm volatile ("btr %0, _sheap+4"::"r"(idx):"cc"); } |
frame_l = (frame_t*)block; |
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
|
return &frame_r->buddy_link; |
} |
|
int __fastcall init_heap(addr_t base, size_t size) |
static __inline link_t *buddy_coalesce(link_t *block_1, link_t *block_2) |
{ |
md_t *md; |
u32_t i; |
frame_t *frame1, *frame2; |
|
ASSERT(base != 0); |
ASSERT(size != 0) |
ASSERT((base & 0x3FFFFF) == 0); |
ASSERT((size & 0x3FFFFF) == 0); |
frame1 = (frame_t*)block_1; |
frame2 = (frame_t*)block_2; |
|
for (i = 0; i < 32; i++) |
{ |
list_initialize(&lheap.mapped[i]); |
list_initialize(&lheap.unmapped[i]); |
return frame1 < frame2 ? block_1 : block_2; |
} |
|
list_initialize(&sheap.mapped[i]); |
list_initialize(&sheap.unmapped[i]); |
}; |
|
list_initialize(&lheap.used); |
list_initialize(&sheap.used); |
#define IS_BUDDY_LEFT_BLOCK_ABS(frame) \ |
(((heap_index_abs((frame)) >> (frame)->buddy_order) & 0x1) == 0) |
|
md_slab = slab_cache_create(sizeof(md_t), 16,NULL,NULL,SLAB_CACHE_MAGDEFERRED); |
#define IS_BUDDY_RIGHT_BLOCK_ABS(frame) \ |
(((heap_index_abs((frame)) >> (frame)->buddy_order) & 0x1) == 1) |
|
md = (md_t*)slab_alloc(md_slab,0); |
|
list_initialize(&md->adj); |
md->base = base; |
md->size = size; |
md->parent = NULL; |
md->state = MD_FREE; |
static link_t *find_buddy(link_t *block) |
{ |
frame_t *frame; |
index_t index; |
u32_t is_left, is_right; |
|
list_prepend(&md->link, &lheap.unmapped[31]); |
lheap.av_mapped = 0x00000000; |
lheap.av_unmapped = 0x80000000; |
sheap.av_mapped = 0x00000000; |
sheap.av_unmapped = 0x00000000; |
frame = (frame_t*)block; |
// ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),frame->buddy_order)); |
|
return 1; |
}; |
is_left = IS_BUDDY_LEFT_BLOCK_ABS( frame); |
is_right = IS_BUDDY_RIGHT_BLOCK_ABS( frame); |
|
md_t* __fastcall find_large_md(size_t size) |
{ |
md_t *md = NULL; |
// ASSERT(is_left ^ is_right); |
|
count_t idx0; |
u32_t mask; |
if (is_left) { |
index = (heap_index(frame)) + (1 << frame->buddy_order); |
} |
else { /* if (is_right) */ |
index = (heap_index(frame)) - (1 << frame->buddy_order); |
}; |
|
ASSERT((size & 0x3FFFFF) == 0); |
|
idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31; |
mask = lheap.av_unmapped & ( -1<<idx0 ); |
|
if(mask) |
if ( index < z_heap.count) |
{ |
if(idx0 == 31) |
{ |
md_t *tmp = (md_t*)lheap.unmapped[31].next; |
while(&tmp->link != &lheap.unmapped[31]) |
{ |
if(tmp->size >= size) |
{ |
DBG("remove large tmp %x\n", tmp); |
|
md = tmp; |
break; |
}; |
}; |
tmp = (md_t*)tmp->link.next; |
if (z_heap.frames[index].buddy_order == frame->buddy_order && |
z_heap.frames[index].refcount == 0) { |
return &z_heap.frames[index].buddy_link; |
} |
else |
{ |
idx0 = _bsf(mask); |
} |
|
ASSERT( !list_empty(&lheap.unmapped[idx0])) |
|
md = (md_t*)lheap.unmapped[idx0].next; |
}; |
return NULL; |
} |
else |
return NULL; |
|
ASSERT(md->state == MD_FREE); |
|
list_remove((link_t*)md); |
if(list_empty(&lheap.unmapped[idx0])) |
_reset_lavu(idx0); |
|
if(md->size > size) |
static void buddy_system_free(link_t *block) |
{ |
count_t idx1; |
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
link_t *buddy, *hlp; |
u32_t i; |
|
link_initialize(&new_md->link); |
list_insert(&new_md->adj, &md->adj); |
/* |
* Determine block's order. |
*/ |
i = buddy_get_order(block); |
|
new_md->base = md->base; |
new_md->size = size; |
new_md->parent = NULL; |
new_md->state = MD_USED; |
ASSERT(i <= z_heap.max_order); |
|
md->base+= size; |
md->size-= size; |
|
idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31; |
|
list_prepend(&md->link, &lheap.unmapped[idx1]); |
_set_lavu(idx1); |
|
return new_md; |
}; |
md->state = MD_USED; |
|
return md; |
} |
|
md_t* __fastcall find_unmapped_md(size_t size) |
if (i != z_heap.max_order) |
{ |
eflags_t efl; |
/* |
* See if there is any buddy in the list of order i. |
*/ |
buddy = find_buddy( block ); |
if (buddy) |
{ |
|
md_t *md = NULL; |
ASSERT(buddy_get_order(buddy) == i); |
/* |
* Remove buddy from the list of order i. |
*/ |
list_remove(buddy); |
|
count_t idx0; |
u32_t mask; |
/* |
* Invalidate order of both block and buddy. |
*/ |
buddy_set_order(block, BUDDY_SYSTEM_INNER_BLOCK); |
buddy_set_order(buddy, BUDDY_SYSTEM_INNER_BLOCK); |
|
ASSERT((size & 0xFFF) == 0); |
/* |
* Coalesce block and buddy into one block. |
*/ |
hlp = buddy_coalesce( block, buddy ); |
|
efl = safe_cli(); |
/* |
* Set order of the coalesced block to i + 1. |
*/ |
buddy_set_order(hlp, i + 1); |
|
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31; |
mask = sheap.av_unmapped & ( -1<<idx0 ); |
|
DBG("smask %x size %x idx0 %x mask %x\n",sheap.av_unmapped, size, idx0, mask); |
|
if(mask) |
{ |
if(idx0 == 31) |
{ |
ASSERT( !list_empty(&sheap.unmapped[31])); |
|
md_t *tmp = (md_t*)sheap.unmapped[31].next; |
while( &tmp->link != &sheap.unmapped[31]) |
{ |
if(tmp->size >= size) |
{ |
md = tmp; |
break; |
}; |
tmp = (md_t*)tmp->link.next; |
}; |
/* |
* Recursively add the coalesced block to the list of order i + 1. |
*/ |
buddy_system_free( hlp ); |
return; |
} |
else |
{ |
idx0 = _bsf(mask); |
} |
/* |
* Insert block into the list of order i. |
*/ |
list_append(block, &z_heap.order[i]); |
} |
|
ASSERT( !list_empty(&sheap.unmapped[idx0])); |
|
md = (md_t*)sheap.unmapped[idx0].next; |
} |
}; |
|
if(md) |
static link_t* buddy_system_alloc( u32_t i) |
{ |
DBG("remove md %x\n", md); |
link_t *res, *hlp; |
|
ASSERT(md->state==MD_FREE); |
ASSERT(md->parent != NULL); |
ASSERT(i <= z_heap.max_order); |
|
list_remove((link_t*)md); |
if(list_empty(&sheap.unmapped[idx0])) |
_reset_savu(idx0); |
/* |
* If the list of order i is not empty, |
* the request can be immediatelly satisfied. |
*/ |
if (!list_empty(&z_heap.order[i])) { |
res = z_heap.order[i].next; |
list_remove(res); |
buddy_mark_busy(res); |
return res; |
} |
else |
{ |
md_t *lmd; |
lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF); |
/* |
* If order i is already the maximal order, |
* the request cannot be satisfied. |
*/ |
if (i == z_heap.max_order) |
return NULL; |
|
DBG("get large md %x\n", lmd); |
/* |
* Try to recursively satisfy the request from higher order lists. |
*/ |
hlp = buddy_system_alloc( i + 1 ); |
|
if( !lmd) |
{ |
safe_sti(efl); |
/* |
* The request could not be satisfied |
* from higher order lists. |
*/ |
if (!hlp) |
return NULL; |
}; |
|
ASSERT(lmd->size != 0); |
ASSERT(lmd->base != 0); |
ASSERT((lmd->base & 0x3FFFFF) == 0); |
ASSERT(lmd->parent == NULL); |
res = hlp; |
|
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
/* |
* Bisect the block and set order of both of its parts to i. |
*/ |
hlp = buddy_bisect( res ); |
|
link_initialize(&md->link); |
list_initialize(&md->adj); |
md->base = lmd->base; |
md->size = lmd->size; |
md->parent = lmd; |
md->state = MD_USED; |
}; |
buddy_set_order(res, i); |
buddy_set_order(hlp, i); |
|
if(md->size > size) |
{ |
count_t idx1; |
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
/* |
* Return the other half to buddy system. Mark the first part |
* full, so that it won't coalesce again. |
*/ |
buddy_mark_busy(res); |
buddy_system_free( hlp ); |
|
link_initialize(&new_md->link); |
list_insert(&new_md->adj, &md->adj); |
return res; |
} |
|
new_md->base = md->base; |
new_md->size = size; |
new_md->parent = md->parent; |
new_md->state = MD_USED; |
|
md->base+= size; |
md->size-= size; |
md->state = MD_FREE; |
|
idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
|
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1); |
|
if( idx1 < 31) |
list_prepend(&md->link, &sheap.unmapped[idx1]); |
else |
int __fastcall init_heap(addr_t start, size_t size) |
{ |
if( list_empty(&sheap.unmapped[31])) |
list_prepend(&md->link, &sheap.unmapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.unmapped[31].next; |
count_t i; |
count_t count; |
|
while( &tmp->link != &sheap.unmapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
}; |
}; |
count = size >> HF_WIDTH; |
|
_set_savu(idx1); |
ASSERT( start != 0); |
ASSERT( count != 0); |
|
safe_sti(efl); |
spinlock_initialize(&z_heap.lock); |
|
return new_md; |
}; |
z_heap.base = start >> HF_WIDTH; |
z_heap.count = count; |
z_heap.free_count = count; |
z_heap.busy_count = 0; |
|
md->state = MD_USED; |
z_heap.max_order = fnzb(count); |
|
safe_sti(efl); |
DBG("create heap zone: base %x count %x\n", start, count); |
|
return md; |
} |
ASSERT(z_heap.max_order < BUDDY_SYSTEM_INNER_BLOCK); |
|
md_t* __fastcall find_mapped_md(size_t size) |
{ |
eflags_t efl; |
for (i = 0; i <= z_heap.max_order; i++) |
list_initialize(&z_heap.order[i]); |
|
md_t *md = NULL; |
|
count_t idx0; |
u32_t mask; |
DBG("count %d frame_t %d page_size %d\n", |
count, sizeof(frame_t), PAGE_SIZE); |
|
ASSERT((size & 0xFFF) == 0); |
z_heap.frames = (frame_t *)PA2KA(frame_alloc( (count*sizeof(frame_t)) >> PAGE_WIDTH )); |
|
efl = safe_cli(); |
|
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31; |
mask = sheap.av_mapped & ( -1<<idx0 ); |
if( z_heap.frames == 0 ) |
return 0; |
|
DBG("small av_mapped %x size %x idx0 %x mask %x\n",sheap.av_mapped, size, |
idx0, mask); |
|
if(mask) |
{ |
if(idx0 == 31) |
{ |
ASSERT( !list_empty(&sheap.mapped[31])); |
for (i = 0; i < count; i++) { |
z_heap.frames[i].buddy_order=0; |
z_heap.frames[i].parent = NULL; |
z_heap.frames[i].refcount=1; |
} |
|
md_t *tmp = (md_t*)sheap.mapped[31].next; |
while( &tmp->link != &sheap.mapped[31]) |
for (i = 0; i < count; i++) |
{ |
if(tmp->size >= size) |
{ |
md = tmp; |
break; |
}; |
tmp = (md_t*)tmp->link.next; |
}; |
z_heap.frames[i].refcount = 0; |
buddy_system_free(&z_heap.frames[i].buddy_link); |
} |
else |
{ |
idx0 = _bsf(mask); |
|
ASSERT( !list_empty(&sheap.mapped[idx0])); |
list_initialize(&shared_mmap); |
|
md = (md_t*)sheap.mapped[idx0].next; |
return 1; |
} |
}; |
|
if(md) |
addr_t __fastcall mem_alloc(size_t size, u32_t flags) |
{ |
DBG("remove md %x\n", md); |
eflags_t efl; |
addr_t heap = 0; |
|
ASSERT(md->state==MD_FREE); |
|
list_remove((link_t*)md); |
if(list_empty(&sheap.mapped[idx0])) |
_reset_savm(idx0); |
} |
else |
{ |
md_t *lmd; |
addr_t frame; |
addr_t *pte; |
count_t order; |
frame_t *frame; |
index_t v; |
int i; |
mmap_t *map; |
count_t pages; |
|
lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF); |
// __asm__ __volatile__ ("xchgw %bx, %bx"); |
|
DBG("get large md %x\n", lmd); |
size = (size + 4095) & ~4095; |
|
if( !lmd) |
{ |
safe_sti(efl); |
return NULL; |
}; |
pages = size >> PAGE_WIDTH; |
|
ASSERT(lmd->size != 0); |
ASSERT(lmd->base != 0); |
ASSERT((lmd->base & 0x3FFFFF) == 0); |
ASSERT(lmd->parent == NULL); |
// map = (mmap_t*)malloc( sizeof(mmap_t) + |
// sizeof(addr_t) * pages); |
|
frame = core_alloc(10); /* FIXME check */ |
map = (mmap_t*)PA2KA(frame_alloc( (sizeof(mmap_t) + |
sizeof(addr_t) * pages) >> PAGE_WIDTH)); |
|
lmd->parent = (void*)frame; |
map->size = size; |
|
pte = &((addr_t*)page_tabs)[lmd->base>>12]; /* FIXME remove */ |
|
for(i = 0; i<1024; i++) |
if ( map ) |
{ |
*pte++ = frame; |
frame+= 4096; |
} |
order = size >> HF_WIDTH; |
|
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
if( order ) |
order = fnzb(order - 1) + 1; |
|
link_initialize(&md->link); |
list_initialize(&md->adj); |
md->base = lmd->base; |
md->size = lmd->size; |
md->parent = lmd; |
md->state = MD_USED; |
}; |
efl = safe_cli(); |
|
if(md->size > size) |
{ |
count_t idx1; |
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
spinlock_lock(&z_heap.lock); |
|
link_initialize(&new_md->link); |
list_insert(&new_md->adj, &md->adj); |
frame = (frame_t*)buddy_system_alloc(order); |
|
new_md->base = md->base; |
new_md->size = size; |
new_md->parent = md->parent; |
ASSERT( frame ); |
|
md->base+= size; |
md->size-= size; |
md->state = MD_FREE; |
if( frame ) |
{ |
addr_t page = 0; |
addr_t mem; |
|
idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
z_heap.free_count -= (1 << order); |
z_heap.busy_count += (1 << order); |
|
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1); |
/* get frame address */ |
|
if( idx1 < 31) |
list_prepend(&md->link, &sheap.mapped[idx1]); |
else |
{ |
if( list_empty(&sheap.mapped[31])) |
list_prepend(&md->link, &sheap.mapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.mapped[31].next; |
v = z_heap.base + (index_t)(frame - z_heap.frames); |
|
while( &tmp->link != &sheap.mapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
}; |
}; |
heap = v << HF_WIDTH; |
|
_set_savm(idx1); |
map->base = heap; |
|
md = new_md; |
}; |
for(i = 0; i < (1 << order); i++) |
frame[i].parent = map; |
|
md->state = MD_USED; |
spinlock_unlock(&z_heap.lock); |
|
safe_sti(efl); |
|
return md; |
} |
|
void __fastcall free_unmapped_md(md_t *md) |
{ |
eflags_t efl ; |
md_t *fd; |
md_t *bk; |
count_t idx; |
addr_t *pte = &((addr_t*)page_tabs)[heap >> PAGE_WIDTH]; |
addr_t *mpte = &map->pte[0]; |
|
ASSERT(md->parent != NULL); |
#if 0 |
if( flags & PG_MAP ) |
page = PG_DEMAND | (flags & 0xFFF); |
|
efl = safe_cli(); |
spinlock_lock(&sheap.lock); |
|
if( !list_empty(&md->adj)) |
mem = heap; |
while(pages--) |
{ |
bk = (md_t*)md->adj.prev; |
fd = (md_t*)md->adj.next; |
*pte++ = 0; //page; |
*mpte++ = page; |
|
if(fd->state == MD_FREE) |
asm volatile ( "invlpg (%0)" ::"r" (mem) ); |
mem+= 4096; |
}; |
#else |
mem = heap; |
|
while(pages--) |
{ |
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31; |
if( flags & PG_MAP ) |
page = alloc_page(); |
|
list_remove((link_t*)fd); |
if(list_empty(&sheap.unmapped[idx])) |
_reset_savu(idx); |
page |= flags & 0xFFF; |
|
md->size+= fd->size; |
md->adj.next = fd->adj.next; |
md->adj.next->prev = (link_t*)md; |
slab_free(md_slab, fd); |
}; |
if(bk->state == MD_FREE) |
{ |
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31; |
*pte++ = 0; |
*mpte++ = page; |
|
list_remove((link_t*)bk); |
if(list_empty(&sheap.unmapped[idx])) |
_reset_savu(idx); |
|
bk->size+= md->size; |
bk->adj.next = md->adj.next; |
bk->adj.next->prev = (link_t*)bk; |
slab_free(md_slab, md); |
md = fd; |
asm volatile ( "invlpg (%0)" ::"r" (mem) ); |
mem+= 4096; |
}; |
}; |
#endif |
|
md->state = MD_FREE; |
DBG("%s %x size %d order %d\n", __FUNCTION__, heap, size, order); |
|
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
return heap; |
} |
|
_set_savu(idx); |
spinlock_unlock(&z_heap.lock); |
|
if( idx < 31) |
list_prepend(&md->link, &sheap.unmapped[idx]); |
else |
{ |
if( list_empty(&sheap.unmapped[31])) |
list_prepend(&md->link, &sheap.unmapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.unmapped[31].next; |
|
while( &tmp->link != &sheap.unmapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
}; |
}; |
spinlock_unlock(&sheap.lock); |
safe_sti(efl); |
|
frame_free( KA2PA(map) ); |
}; |
|
void __fastcall free_mapped_md(md_t *md) |
return 0; |
} |
|
void __fastcall mem_free(addr_t addr) |
{ |
eflags_t efl ; |
md_t *fd; |
md_t *bk; |
frame_t *frame; |
count_t idx; |
|
ASSERT(md->parent != NULL); |
ASSERT( ((md_t*)(md->parent))->parent != NULL); |
idx = (addr >> HF_WIDTH); |
|
if( (idx < z_heap.base) || |
(idx >= z_heap.base+z_heap.count)) { |
DBG("invalid address %x\n", addr); |
return; |
} |
|
efl = safe_cli(); |
spinlock_lock(&sheap.lock); |
|
if( !list_empty(&md->adj)) |
{ |
bk = (md_t*)md->adj.prev; |
fd = (md_t*)md->adj.next; |
frame = &z_heap.frames[idx-z_heap.base]; |
|
if(fd->state == MD_FREE) |
{ |
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31; |
u32_t order = frame->buddy_order; |
|
list_remove((link_t*)fd); |
if(list_empty(&sheap.mapped[idx])) |
_reset_savm(idx); |
DBG("%s %x order %d\n", __FUNCTION__, addr, order); |
|
md->size+= fd->size; |
md->adj.next = fd->adj.next; |
md->adj.next->prev = (link_t*)md; |
slab_free(md_slab, fd); |
}; |
if(bk->state == MD_FREE) |
{ |
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31; |
ASSERT(frame->refcount); |
|
list_remove((link_t*)bk); |
if(list_empty(&sheap.mapped[idx])) |
_reset_savm(idx); |
spinlock_lock(&z_heap.lock); |
|
bk->size+= md->size; |
bk->adj.next = md->adj.next; |
bk->adj.next->prev = (link_t*)bk; |
slab_free(md_slab, md); |
md = fd; |
}; |
}; |
if (!--frame->refcount) |
{ |
mmap_t *map; |
count_t i; |
|
md->state = MD_FREE; |
map = frame->parent; |
|
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
for(i = 0; i < (1 << order); i++) |
frame[i].parent = NULL; |
|
_set_savm(idx); |
buddy_system_free(&frame->buddy_link); |
|
if( idx < 31) |
list_prepend(&md->link, &sheap.mapped[idx]); |
else |
{ |
if( list_empty(&sheap.mapped[31])) |
list_prepend(&md->link, &sheap.mapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.mapped[31].next; |
/* Update zone information. */ |
z_heap.free_count += (1 << order); |
z_heap.busy_count -= (1 << order); |
|
while( &tmp->link != &sheap.mapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
}; |
}; |
spinlock_unlock(&sheap.lock); |
spinlock_unlock(&z_heap.lock); |
safe_sti(efl); |
}; |
|
for( i = 0; i < (map->size >> PAGE_WIDTH); i++) |
frame_free(map->pte[i]); |
|
md_t* __fastcall md_alloc(size_t size, u32_t flags) |
{ |
eflags_t efl; |
|
md_t *md; |
|
size = (size+4095)&~4095; |
|
if( flags & PG_MAP ) |
{ |
md = find_mapped_md(size); |
|
if( !md ) |
return NULL; |
|
ASSERT(md->state == MD_USED); |
ASSERT(md->parent != NULL); |
|
md_t *lmd = (md_t*)md->parent; |
|
ASSERT( lmd != NULL); |
ASSERT( lmd->parent != NULL); |
|
addr_t frame = (md->base - lmd->base + (addr_t)lmd->parent)| |
(flags & 0xFFF); |
DBG("frame %x\n", frame); |
ASSERT(frame != 0); |
|
count_t tmp = size >> 12; |
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; |
|
while(tmp--) |
{ |
*pte++ = frame; |
frame+= 4096; |
}; |
frame_free( KA2PA(map) ); |
} |
else |
{ |
md = find_unmapped_md(size); |
if( !md ) |
return NULL; |
|
ASSERT(md->parent != NULL); |
ASSERT(md->state == MD_USED); |
} |
|
return md; |
spinlock_unlock(&z_heap.lock); |
safe_sti(efl); |
}; |
}; |
|
|
void __fastcall md_free(md_t *md) |
void __fastcall heap_fault(addr_t faddr, u32_t code) |
{ |
index_t idx; |
frame_t *frame; |
mmap_t *map; |
|
if( md ) |
{ |
md_t *lmd; |
idx = faddr >> HF_WIDTH; |
|
DBG("free md: %x base: %x size: %x\n",md, md->base, md->size); |
frame = &z_heap.frames[idx-z_heap.base]; |
|
ASSERT(md->state == MD_USED); |
map = frame->parent; |
|
list_remove((link_t*)md); |
ASSERT( faddr >= map->base); |
|
lmd = (md_t*)md->parent; |
|
ASSERT(lmd != 0); |
|
if(lmd->parent != 0) |
if( faddr < map->base + map->size) |
{ |
addr_t mem = md->base; |
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; |
count_t tmp = md->size >> 12; |
addr_t page; |
|
while(tmp--) |
{ |
*pte++ = 0; |
asm volatile ( "invlpg (%0)" ::"r" (mem) ); |
mem+= 4096; |
}; |
free_mapped_md( md ); |
} |
else |
free_unmapped_md( md ); |
} |
idx = (faddr - map->base) >> PAGE_WIDTH; |
|
return; |
}; |
page = map->pte[idx]; |
|
void * __fastcall mem_alloc(size_t size, u32_t flags) |
if( page != 0) |
{ |
eflags_t efl; |
|
md_t *md; |
|
DBG("\nmem_alloc: %x bytes\n", size); |
|
ASSERT(size != 0); |
|
md = md_alloc(size, flags); |
|
if( !md ) |
return NULL; |
|
efl = safe_cli(); |
spinlock_lock(&sheap.lock); |
|
if( list_empty(&sheap.used) ) |
list_prepend(&md->link, &sheap.used); |
else |
#if 0 |
if( page & PG_DEMAND) |
{ |
md_t *tmp = (md_t*)sheap.used.next; |
page &= ~PG_DEMAND; |
page = alloc_page() | (page & 0xFFF); |
|
while( &tmp->link != &sheap.used) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
map->pte[idx] = page; |
}; |
|
spinlock_unlock(&sheap.lock); |
safe_sti(efl); |
|
DBG("allocate: %x size %x\n\n",md->base, size); |
return (void*)md->base; |
#endif |
((addr_t*)page_tabs)[faddr >> PAGE_WIDTH] = page; |
}; |
|
void __fastcall mem_free(void *mem) |
{ |
eflags_t efl; |
|
md_t *tmp; |
md_t *md = NULL; |
|
DBG("mem_free: %x\n",mem); |
|
ASSERT( mem != 0 ); |
ASSERT( ((addr_t)mem & 0xFFF) == 0 ); |
ASSERT( ! list_empty(&sheap.used)); |
|
efl = safe_cli(); |
|
tmp = (md_t*)sheap.used.next; |
|
while( &tmp->link != &sheap.used) |
{ |
if( tmp->base == (addr_t)mem ) |
{ |
md = tmp; |
break; |
}; |
tmp = (md_t*)tmp->link.next; |
} |
}; |
|
if( md ) |
{ |
md_free( md ); |
//#include "mmap.inc" |
|
} |
else |
DBG("\tERROR: invalid base address: %x\n", mem); |
|
safe_sti(efl); |
}; |