6,6 → 6,8 |
#include <mm.h> |
#include <slab.h> |
|
#define page_tabs 0xDF800000 |
|
typedef struct |
{ |
link_t link; |
20,12 → 22,15 |
#define MD_USED 2 |
|
typedef struct { |
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
u32_t av_mapped; |
u32_t av_unmapped; |
|
u32_t availmask; |
link_t free[32]; |
link_t mapped[32]; |
link_t unmapped[32]; |
|
link_t used; |
|
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
}heap_t; |
|
|
37,20 → 42,25 |
heap_t sheap; |
|
|
static inline void _set_lavu(count_t idx) |
{ asm volatile ("bts %0, _lheap+4"::"r"(idx):"cc"); } |
|
static inline void _set_lmask(count_t idx) |
{ asm volatile ("bts %0, _lheap"::"r"(idx):"cc"); } |
static inline void _reset_lavu(count_t idx) |
{ asm volatile ("btr %0, _lheap+4"::"r"(idx):"cc"); } |
|
static inline void _reset_lmask(count_t idx) |
{ asm volatile ("btr %0, _lheap"::"r"(idx):"cc"); } |
|
static inline void _set_smask(count_t idx) |
static inline void _set_savm(count_t idx) |
{ asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); } |
|
static inline void _reset_smask(count_t idx) |
static inline void _reset_savm(count_t idx) |
{ asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); } |
|
static inline void _set_savu(count_t idx) |
{ asm volatile ("bts %0, _sheap+4"::"r"(idx):"cc"); } |
|
static inline void _reset_savu(count_t idx) |
{ asm volatile ("btr %0, _sheap+4"::"r"(idx):"cc"); } |
|
|
int __fastcall init_heap(addr_t base, size_t size) |
{ |
md_t *md; |
63,8 → 73,11 |
|
for (i = 0; i < 32; i++) |
{ |
list_initialize(&lheap.free[i]); |
list_initialize(&sheap.free[i]); |
list_initialize(&lheap.mapped[i]); |
list_initialize(&lheap.unmapped[i]); |
|
list_initialize(&sheap.mapped[i]); |
list_initialize(&sheap.unmapped[i]); |
}; |
|
list_initialize(&lheap.used); |
81,12 → 94,12 |
md->parent = NULL; |
md->state = MD_FREE; |
|
list_prepend(&md->link, &lheap.free[31]); |
lheap.availmask = 0x80000000; |
sheap.availmask = 0x00000000; |
list_prepend(&md->link, &lheap.unmapped[31]); |
lheap.av_mapped = 0x00000000; |
lheap.av_unmapped = 0x80000000; |
sheap.av_mapped = 0x00000000; |
sheap.av_unmapped = 0x00000000; |
|
// phm_slab = slab_cache_create(sizeof(phismem_t), 32,NULL,NULL,SLAB_CACHE_MAGDEFERRED); |
|
return 1; |
}; |
|
100,14 → 113,14 |
ASSERT((size & 0x3FFFFF) == 0); |
|
idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31; |
mask = lheap.availmask & ( -1<<idx0 ); |
mask = lheap.av_unmapped & ( -1<<idx0 ); |
|
if(mask) |
{ |
if(idx0 == 31) |
{ |
md_t *tmp = (md_t*)lheap.free[31].next; |
while((link_t*)tmp != &lheap.free[31]) |
md_t *tmp = (md_t*)lheap.unmapped[31].next; |
while((link_t*)tmp != &lheap.unmapped[31]) |
{ |
if(tmp->size >= size) |
{ |
123,9 → 136,9 |
{ |
idx0 = _bsf(mask); |
|
ASSERT( !list_empty(&lheap.free[idx0])) |
ASSERT( !list_empty(&lheap.unmapped[idx0])) |
|
md = (md_t*)lheap.free[idx0].next; |
md = (md_t*)lheap.unmapped[idx0].next; |
}; |
} |
else |
134,8 → 147,8 |
ASSERT(md->state == MD_FREE); |
|
list_remove((link_t*)md); |
if(list_empty(&lheap.free[idx0])) |
_reset_lmask(idx0); |
if(list_empty(&lheap.unmapped[idx0])) |
_reset_lavu(idx0); |
|
if(md->size > size) |
{ |
147,6 → 160,7 |
|
new_md->base = md->base; |
new_md->size = size; |
new_md->parent = NULL; |
new_md->state = MD_USED; |
|
md->base+= size; |
154,8 → 168,8 |
|
idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31; |
|
list_prepend(&md->link, &lheap.free[idx1]); |
_set_lmask(idx1); |
list_prepend(&md->link, &lheap.unmapped[idx1]); |
_set_lavu(idx1); |
|
return new_md; |
}; |
164,7 → 178,7 |
return md; |
} |
|
md_t* __fastcall find_small_md(size_t size) |
md_t* __fastcall find_unmapped_md(size_t size) |
{ |
eflags_t efl; |
|
178,18 → 192,18 |
efl = safe_cli(); |
|
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31; |
mask = sheap.availmask & ( -1<<idx0 ); |
mask = sheap.av_unmapped & ( -1<<idx0 ); |
|
DBG("smask %x size %x idx0 %x mask %x\n",sheap.availmask, size, idx0, mask); |
DBG("smask %x size %x idx0 %x mask %x\n",sheap.av_unmapped, size, idx0, mask); |
|
if(mask) |
{ |
if(idx0 == 31) |
{ |
ASSERT( !list_empty(&sheap.free[31])); |
ASSERT( !list_empty(&sheap.unmapped[31])); |
|
md_t *tmp = (md_t*)sheap.free[31].next; |
while((link_t*)tmp != &sheap.free[31]) |
md_t *tmp = (md_t*)sheap.unmapped[31].next; |
while((link_t*)tmp != &sheap.unmapped[31]) |
{ |
if(tmp->size >= size) |
{ |
203,9 → 217,9 |
{ |
idx0 = _bsf(mask); |
|
ASSERT( !list_empty(&sheap.free[idx0])); |
ASSERT( !list_empty(&sheap.unmapped[idx0])); |
|
md = (md_t*)sheap.free[idx0].next; |
md = (md_t*)sheap.unmapped[idx0].next; |
} |
}; |
|
214,10 → 228,11 |
DBG("remove md %x\n", md); |
|
ASSERT(md->state==MD_FREE); |
ASSERT(md->parent != NULL); |
|
list_remove((link_t*)md); |
if(list_empty(&sheap.free[idx0])) |
_reset_smask(idx0); |
if(list_empty(&sheap.unmapped[idx0])) |
_reset_savu(idx0); |
} |
else |
{ |
232,6 → 247,11 |
return NULL; |
}; |
|
ASSERT(lmd->size != 0); |
ASSERT(lmd->base != 0); |
ASSERT((lmd->base & 0x3FFFFF) == 0); |
ASSERT(lmd->parent == NULL); |
|
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
|
link_initialize(&md->link); |
264,16 → 284,16 |
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1); |
|
if( idx1 < 31) |
list_prepend(&md->link, &sheap.free[idx1]); |
list_prepend(&md->link, &sheap.unmapped[idx1]); |
else |
{ |
if( list_empty(&sheap.free[31])) |
list_prepend(&md->link, &sheap.free[31]); |
if( list_empty(&sheap.unmapped[31])) |
list_prepend(&md->link, &sheap.unmapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.free[31].next; |
md_t *tmp = (md_t*)sheap.unmapped[31].next; |
|
while((link_t*)tmp != &sheap.free[31]) |
while((link_t*)tmp != &sheap.unmapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
283,7 → 303,7 |
}; |
}; |
|
_set_smask(idx1); |
_set_savu(idx1); |
|
safe_sti(efl); |
|
297,13 → 317,167 |
return md; |
} |
|
void __fastcall free_small_md(md_t *md) |
md_t* __fastcall find_mapped_md(size_t size) |
{ |
eflags_t efl ; |
|
md_t *md = NULL; |
|
count_t idx0; |
u32_t mask; |
|
ASSERT((size & 0xFFF) == 0); |
|
efl = safe_cli(); |
|
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31; |
mask = sheap.av_mapped & ( -1<<idx0 ); |
|
DBG("small av_mapped %x size %x idx0 %x mask %x\n",sheap.av_mapped, size, |
idx0, mask); |
|
if(mask) |
{ |
if(idx0 == 31) |
{ |
ASSERT( !list_empty(&sheap.mapped[31])); |
|
md_t *tmp = (md_t*)sheap.mapped[31].next; |
while((link_t*)tmp != &sheap.mapped[31]) |
{ |
if(tmp->size >= size) |
{ |
md = tmp; |
break; |
}; |
tmp = (md_t*)tmp->link.next; |
}; |
} |
else |
{ |
idx0 = _bsf(mask); |
|
ASSERT( !list_empty(&sheap.mapped[idx0])); |
|
md = (md_t*)sheap.mapped[idx0].next; |
} |
}; |
|
if(md) |
{ |
DBG("remove md %x\n", md); |
|
ASSERT(md->state==MD_FREE); |
|
list_remove((link_t*)md); |
if(list_empty(&sheap.mapped[idx0])) |
_reset_savm(idx0); |
} |
else |
{ |
md_t *lmd; |
addr_t frame; |
addr_t *pte; |
int i; |
|
lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF); |
|
DBG("get large md %x\n", lmd); |
|
if( !lmd) |
{ |
safe_sti(efl); |
return NULL; |
}; |
|
ASSERT(lmd->size != 0); |
ASSERT(lmd->base != 0); |
ASSERT((lmd->base & 0x3FFFFF) == 0); |
ASSERT(lmd->parent == NULL); |
|
frame = core_alloc(10); /* FIXME check */ |
|
lmd->parent = (void*)frame; |
|
pte = &((addr_t*)page_tabs)[lmd->base>>12]; /* FIXME remove */ |
|
for(i = 0; i<1024; i++) |
{ |
*pte++ = frame; |
frame+= 4096; |
} |
|
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
|
link_initialize(&md->link); |
list_initialize(&md->adj); |
md->base = lmd->base; |
md->size = lmd->size; |
md->parent = lmd; |
md->state = MD_USED; |
}; |
|
if(md->size > size) |
{ |
count_t idx1; |
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
|
link_initialize(&new_md->link); |
list_insert(&new_md->adj, &md->adj); |
|
new_md->base = md->base; |
new_md->size = size; |
new_md->parent = md->parent; |
|
md->base+= size; |
md->size-= size; |
md->state = MD_FREE; |
|
idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
|
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1); |
|
if( idx1 < 31) |
list_prepend(&md->link, &sheap.mapped[idx1]); |
else |
{ |
if( list_empty(&sheap.mapped[31])) |
list_prepend(&md->link, &sheap.mapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.mapped[31].next; |
|
while((link_t*)tmp != &sheap.mapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
}; |
}; |
|
_set_savm(idx1); |
|
md = new_md; |
}; |
|
md->state = MD_USED; |
|
safe_sti(efl); |
|
return md; |
} |
|
void __fastcall free_unmapped_md(md_t *md) |
{ |
eflags_t efl ; |
md_t *fd; |
md_t *bk; |
count_t idx; |
|
ASSERT(md->parent != NULL); |
|
efl = safe_cli(); |
spinlock_lock(&sheap.lock); |
|
317,8 → 491,8 |
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31; |
|
list_remove((link_t*)fd); |
if(list_empty(&sheap.free[idx])) |
_reset_smask(idx); |
if(list_empty(&sheap.unmapped[idx])) |
_reset_savu(idx); |
|
md->size+= fd->size; |
md->adj.next = fd->adj.next; |
330,8 → 504,8 |
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31; |
|
list_remove((link_t*)bk); |
if(list_empty(&sheap.free[idx])) |
_reset_smask(idx); |
if(list_empty(&sheap.unmapped[idx])) |
_reset_savu(idx); |
|
bk->size+= md->size; |
bk->adj.next = md->adj.next; |
345,19 → 519,19 |
|
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
|
_set_smask(idx); |
_set_savu(idx); |
|
if( idx < 31) |
list_prepend(&md->link, &sheap.free[idx]); |
list_prepend(&md->link, &sheap.unmapped[idx]); |
else |
{ |
if( list_empty(&sheap.free[31])) |
list_prepend(&md->link, &sheap.free[31]); |
if( list_empty(&sheap.unmapped[31])) |
list_prepend(&md->link, &sheap.unmapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.free[31].next; |
md_t *tmp = (md_t*)sheap.unmapped[31].next; |
|
while((link_t*)tmp != &sheap.free[31]) |
while((link_t*)tmp != &sheap.unmapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
371,60 → 545,83 |
|
}; |
|
void __fastcall free_mapped_md(md_t *md) |
{ |
eflags_t efl ; |
md_t *fd; |
md_t *bk; |
count_t idx; |
|
#define page_tabs 0xDF800000 |
ASSERT(md->parent != NULL); |
ASSERT( ((md_t*)(md->parent))->parent != NULL); |
|
/* |
phismem_t* __fastcall phis_alloc(count_t count) |
efl = safe_cli(); |
spinlock_lock(&sheap.lock); |
|
if( !list_empty(&md->adj)) |
{ |
phismem_t *phm; |
count_t tmp; |
phm = (phismem_t*)slab_alloc(phm_slab, 0); |
bk = (md_t*)md->adj.prev; |
fd = (md_t*)md->adj.next; |
|
phm->count = count; |
tmp = count; |
while(tmp) |
if(fd->state == MD_FREE) |
{ |
u32_t order; |
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31; |
|
asm volatile ("bsr %0, %1":"=&r"(order):"r"(tmp):"cc"); |
asm volatile ("btr %0, %1" :"=r"(tmp):"r"(order):"cc"); |
list_remove((link_t*)fd); |
if(list_empty(&sheap.mapped[idx])) |
_reset_savm(idx); |
|
phm->frames[order] = core_alloc(order); |
md->size+= fd->size; |
md->adj.next = fd->adj.next; |
md->adj.next->prev = (link_t*)md; |
slab_free(md_slab, fd); |
}; |
if(bk->state == MD_FREE) |
{ |
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31; |
|
list_remove((link_t*)bk); |
if(list_empty(&sheap.mapped[idx])) |
_reset_savm(idx); |
|
bk->size+= md->size; |
bk->adj.next = md->adj.next; |
bk->adj.next->prev = (link_t*)bk; |
slab_free(md_slab, md); |
md = fd; |
}; |
}; |
|
return phm; |
} |
md->state = MD_FREE; |
|
void map_phm(addr_t base, phismem_t *phm, u32_t mapflags) |
{ |
count_t count; |
addr_t *pte; |
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
|
count = phm->count; |
pte = &((addr_t*)page_tabs)[base>>12]; |
_set_savm(idx); |
|
while(count) |
if( idx < 31) |
list_prepend(&md->link, &sheap.mapped[idx]); |
else |
{ |
u32_t order; |
addr_t frame; |
count_t size; |
if( list_empty(&sheap.mapped[31])) |
list_prepend(&md->link, &sheap.mapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.mapped[31].next; |
|
asm volatile ("bsr %0, %1":"=&r"(order):"r"(count):"cc"); |
asm volatile ("btr %0, %1" :"=r"(count):"r"(order):"cc"); |
|
frame = phm->frames[order] | mapflags; |
size = (1 << order); |
while(size--) |
while((link_t*)tmp != &sheap.mapped[31]) |
{ |
*pte++ = frame; |
frame+= 4096; |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
} |
list_insert(&md->link, &tmp->link); |
}; |
*/ |
}; |
spinlock_unlock(&sheap.lock); |
safe_sti(efl); |
}; |
|
|
void * __fastcall mem_alloc(size_t size, u32_t flags) |
{ |
eflags_t efl; |
437,37 → 634,42 |
|
size = (size+4095)&~4095; |
|
md = find_small_md(size); |
|
if( md ) |
{ |
ASSERT(md->state == MD_USED); |
|
if( flags & PG_MAP ) |
{ |
count_t tmp = size >> 12; |
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; |
md = find_mapped_md(size); |
|
while(tmp) |
{ |
u32_t order; |
addr_t frame; |
size_t size; |
if( !md ) |
return NULL; |
|
asm volatile ("bsr %1, %0":"=&r"(order):"r"(tmp):"cc"); |
asm volatile ("btr %1, %0" :"=r"(tmp):"r"(order):"cc"); |
md_t *lmd = (md_t*)md->parent; |
|
frame = core_alloc(order) | flags; /* FIXME check */ |
ASSERT( lmd != NULL); |
ASSERT( lmd->parent != NULL); |
|
size = (1 << order); |
while(size--) |
addr_t frame = (md->base - lmd->base + (addr_t)lmd->parent)| |
(flags & 0xFFF); |
DBG("frame %x\n", frame); |
ASSERT(frame != 0); |
|
count_t tmp = size >> 12; |
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; |
|
while(tmp--) |
{ |
*pte++ = frame; |
frame+= 4096; |
}; |
}; |
}; |
} |
else |
md = find_unmapped_md(size); |
|
if( !md ) |
return NULL; |
|
ASSERT(md->parent != NULL); |
ASSERT(md->state == MD_USED); |
|
|
efl = safe_cli(); |
spinlock_lock(&sheap.lock); |
|
492,8 → 694,6 |
DBG("allocate: %x size %x\n\n",md->base, size); |
return (void*)md->base; |
}; |
return NULL; |
}; |
|
void __fastcall mem_free(void *mem) |
{ |
524,10 → 724,20 |
|
if( md ) |
{ |
md_t *lmd; |
|
DBG("\tmd: %x base: %x size: %x\n",md, md->base, md->size); |
|
ASSERT(md->state == MD_USED); |
|
list_remove((link_t*)md); |
|
lmd = (md_t*)md->parent; |
|
ASSERT(lmd != 0); |
|
if(lmd->parent != 0) |
{ |
count_t tmp = md->size >> 12; |
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; |
|
536,17 → 746,17 |
*pte++ = 0; |
asm volatile ( |
"invlpg (%0)" |
: |
:"r" (mem) ); |
::"r" (mem) ); |
mem+= 4096; |
}; |
list_remove((link_t*)md); |
free_small_md( md ); |
|
free_mapped_md( md ); |
} |
else |
{ |
free_unmapped_md( md ); |
} |
else |
DBG("\tERROR: invalid base address: %x\n", mem); |
}; |
|
safe_sti(efl); |
}; |