/kernel/branches/kolibri_pe/const.inc |
---|
191,9 → 191,11 |
HEAP_MIN_SIZE equ 0x01000000 |
page_tabs equ 0xDDC00000 |
app_page_tabs equ 0xDDC00000 |
page_tabs equ 0xDD800000 |
app_page_tabs equ 0xDD800000 |
shared_tabs equ 0xDDC00000 |
heap_tabs equ (page_tabs+ (HEAP_BASE shr 10)) |
kernel_tabs equ (page_tabs+ (OS_BASE shr 10)) |
master_tab equ (page_tabs+ (page_tabs shr 10)) |
/kernel/branches/kolibri_pe/core/buddy.inc |
---|
0,0 → 1,368 |
#define BUDDY_SYSTEM_INNER_BLOCK 0xff |
#define frame_index( frame ) \ |
(index_t)( (frame) - z_core.frames) |
#define frame_initialize( frame ) \ |
(frame)->refcount = 1; \ |
(frame)->buddy_order = 0 |
#define buddy_get_order( block) \ |
((frame_t*)(block))->buddy_order |
#define buddy_set_order( block, order) \ |
((frame_t*)(block))->buddy_order = (order) |
#define buddy_mark_busy( block ) \ |
((frame_t*)(block))->refcount = 1 |
#define IS_BUDDY_LEFT_BLOCK(frame) \ |
(((frame_index((frame)) >> (frame)->buddy_order) & 0x1) == 0) |
#define IS_BUDDY_RIGHT_BLOCK(frame) \ |
(((frame_index((frame)) >> (frame)->buddy_order) & 0x1) == 1) |
#define buddy_mark_available( block ) \ |
((frame_t*)(block))->refcount = 0 |
static __inline link_t * buddy_bisect(link_t *block) |
{ |
frame_t *frame_l, *frame_r; |
frame_l = (frame_t*)block; |
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
return &frame_r->buddy_link; |
} |
static __inline link_t *buddy_coalesce(link_t *block_1, link_t *block_2) |
{ |
frame_t *frame1, *frame2; |
frame1 = (frame_t*)block_1; |
frame2 = (frame_t*)block_2; |
return frame1 < frame2 ? block_1 : block_2; |
} |
static link_t *find_buddy(link_t *block) |
{ |
frame_t *frame; |
index_t index; |
u32_t is_left, is_right; |
frame = (frame_t*)block; |
// ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),frame->buddy_order)); |
is_left = IS_BUDDY_LEFT_BLOCK( frame); |
is_right = IS_BUDDY_RIGHT_BLOCK( frame); |
// ASSERT(is_left ^ is_right); |
if (is_left) { |
index = (frame_index(frame)) + (1 << frame->buddy_order); |
} else { /* if (is_right) */ |
index = (frame_index(frame)) - (1 << frame->buddy_order); |
} |
if ( index < z_core.count) |
{ |
if (z_core.frames[index].buddy_order == frame->buddy_order && |
z_core.frames[index].refcount == 0) { |
return &z_core.frames[index].buddy_link; |
} |
} |
return NULL; |
} |
static link_t *buddy_find_block(link_t *child, u32_t order) |
{ |
frame_t *frame; |
index_t index; |
frame = (frame_t*)child; |
index = frame_index(frame); |
do { |
if (z_core.frames[index].buddy_order != order) |
return &z_core.frames[index].buddy_link; |
} while(index-- > 0); |
return NULL; |
} |
static void buddy_system_free(link_t *block) |
{ |
link_t *buddy, *hlp; |
u32_t i; |
/* |
* Determine block's order. |
*/ |
i = buddy_get_order(block); |
// ASSERT(i <= z->max_order); |
if (i != z_core.max_order) |
{ |
/* |
* See if there is any buddy in the list of order i. |
*/ |
buddy = find_buddy( block ); |
if (buddy) |
{ |
// ASSERT(buddy_get_order(z, buddy) == i); |
/* |
* Remove buddy from the list of order i. |
*/ |
list_remove(buddy); |
/* |
* Invalidate order of both block and buddy. |
*/ |
buddy_set_order(block, BUDDY_SYSTEM_INNER_BLOCK); |
buddy_set_order(buddy, BUDDY_SYSTEM_INNER_BLOCK); |
/* |
* Coalesce block and buddy into one block. |
*/ |
hlp = buddy_coalesce( block, buddy ); |
/* |
* Set order of the coalesced block to i + 1. |
*/ |
buddy_set_order(hlp, i + 1); |
/* |
* Recursively add the coalesced block to the list of order i + 1. |
*/ |
buddy_system_free( hlp ); |
return; |
} |
} |
/* |
* Insert block into the list of order i. |
*/ |
list_append(block, &z_core.order[i]); |
} |
static link_t* buddy_alloc( u32_t i) |
{ |
link_t *res, *hlp; |
ASSERT(i <= z_core.max_order); |
/* |
* If the list of order i is not empty, |
* the request can be immediatelly satisfied. |
*/ |
if (!list_empty(&z_core.order[i])) { |
res = z_core.order[i].next; |
list_remove(res); |
buddy_mark_busy(res); |
return res; |
} |
/* |
* If order i is already the maximal order, |
* the request cannot be satisfied. |
*/ |
if (i == z_core.max_order) |
return NULL; |
/* |
* Try to recursively satisfy the request from higher order lists. |
*/ |
hlp = buddy_alloc( i + 1 ); |
/* |
* The request could not be satisfied |
* from higher order lists. |
*/ |
if (!hlp) |
return NULL; |
res = hlp; |
/* |
* Bisect the block and set order of both of its parts to i. |
*/ |
hlp = buddy_bisect( res ); |
buddy_set_order(res, i); |
buddy_set_order(hlp, i); |
/* |
* Return the other half to buddy system. Mark the first part |
* full, so that it won't coalesce again. |
*/ |
buddy_mark_busy(res); |
buddy_system_free( hlp ); |
return res; |
} |
static link_t* buddy_alloc_block(link_t *block) |
{ |
link_t *left,*right, *tmp; |
u32_t order; |
left = buddy_find_block(block, BUDDY_SYSTEM_INNER_BLOCK); |
ASSERT(left); |
list_remove(left); |
while (1) |
{ |
if ( !buddy_get_order(left)) |
{ |
buddy_mark_busy(left); |
return left; |
} |
order = buddy_get_order(left); |
right = buddy_bisect(left); |
buddy_set_order(left, order-1); |
buddy_set_order(right, order-1); |
tmp = buddy_find_block( block, BUDDY_SYSTEM_INNER_BLOCK); |
if (tmp == right) { |
right = left; |
left = tmp; |
} |
ASSERT(tmp == left); |
buddy_mark_busy(left); |
buddy_system_free(right); |
buddy_mark_available(left); |
} |
} |
static void zone_create(zone_t *z, pfn_t start, count_t count) |
{ |
unsigned int i; |
spinlock_initialize(&z->lock); |
z->base = start; |
z->count = count; |
z->free_count = count; |
z->busy_count = 0; |
z->max_order = fnzb(count); |
ASSERT(z->max_order < BUDDY_SYSTEM_INNER_BLOCK); |
for (i = 0; i <= z->max_order; i++) |
list_initialize(&z->order[i]); |
z->frames = (frame_t *)balloc(count*sizeof(frame_t)); |
for (i = 0; i < count; i++) |
frame_initialize(&z->frames[i]); |
/* |
for (i = 0; i < count; i++) { |
z_core.frames[i].buddy_order=0; |
z_core.frames[i].parent = NULL; |
z_core.frames[i].refcount=1; |
} |
for (i = 0; i < count; i++) |
{ |
z_core.frames[i].refcount = 0; |
buddy_system_free(&z_core.frames[i].buddy_link); |
} |
*/ |
DBG("create zone: base %x count %x order %d\n", |
start, count, z->max_order); |
} |
static void zone_mark_unavailable(zone_t *zone, index_t frame_idx) |
{ |
frame_t *frame; |
link_t *link; |
ASSERT(frame_idx < zone->count); |
frame = &zone->frames[frame_idx]; |
if (frame->refcount) |
return; |
link = buddy_alloc_block( &frame->buddy_link); |
ASSERT(link); |
zone->free_count--; |
} |
static void zone_reserve(zone_t *z, pfn_t base, count_t count) |
{ |
int i; |
pfn_t top = base + count; |
if( (base+count < z->base)||(base > z->base+z->count)) |
return; |
if(base < z->base) |
base = z->base; |
if(top > z->base+z->count) |
top = z->base+z->count; |
DBG("zone reserve base %x top %x\n", base, top); |
for (i = base; i < top; i++) |
zone_mark_unavailable(z, i - z->base); |
}; |
static void zone_release(zone_t *z, pfn_t base, count_t count) |
{ |
int i; |
pfn_t top = base+count; |
if( (base+count < z->base)||(base > z->base+z->count)) |
return; |
if(base < z->base) |
base = z->base; |
if(top > z->base+z->count) |
top = z->base+z->count; |
DBG("zone release base %x top %x\n", base, top); |
for (i = base; i < top; i++) { |
z->frames[i-z->base].refcount = 0; |
buddy_system_free(&z->frames[i-z->base].buddy_link); |
} |
}; |
static inline frame_t * zone_get_frame(zone_t *zone, index_t frame_idx) |
{ |
ASSERT(frame_idx < zone->count); |
return &zone->frames[frame_idx]; |
} |
void __fastcall frame_set_parent(pfn_t pfn, void *data) |
{ |
spinlock_lock(&z_core.lock); |
zone_get_frame(&z_core, pfn-z_core.base)->parent = data; |
spinlock_unlock(&z_core.lock); |
} |
void* __fastcall frame_get_parent(pfn_t pfn) |
{ |
void *res; |
spinlock_lock(&z_core.lock); |
res = zone_get_frame(&z_core, pfn)->parent; |
spinlock_unlock(&z_core.lock); |
return res; |
} |
/kernel/branches/kolibri_pe/core/dll.c |
---|
173,32 → 173,32 |
PIMAGE_NT_HEADERS32 nt; |
drv_entry_t *drv_entry; |
md_t *md; |
addr_t *img_base ; |
srv_t *srv; |
md = load_image(path); |
img_base = load_image(path); |
if( ! md ) |
if( ! img_base ) |
return 0; |
if( link_image( md->base ) ) |
if( link_image( img_base ) ) |
{ |
dos = (PIMAGE_DOS_HEADER)md->base; |
dos = (PIMAGE_DOS_HEADER)img_base; |
nt = MakePtr( PIMAGE_NT_HEADERS32, dos, dos->e_lfanew); |
drv_entry = MakePtr(drv_entry_t*, md->base, |
drv_entry = MakePtr(drv_entry_t*, img_base, |
nt->OptionalHeader.AddressOfEntryPoint); |
srv = drv_entry(1); |
if(srv != NULL) |
srv->entry = nt->OptionalHeader.AddressOfEntryPoint + md->base; |
srv->entry = nt->OptionalHeader.AddressOfEntryPoint + img_base; |
return srv; |
} |
else |
{ |
md_free( md ); |
mem_free( img_base ); |
return NULL; |
} |
} |
277,7 → 277,7 |
( raw[1] == 0x30305445) ) ) |
{ |
DBG("leagacy Kolibri application"); |
DBG("leagacy Kolibri application\n"); |
int tmp = mnt_exec(raw, raw_size, path, cmdline, flags); |
DBG(" pid %x\n",tmp); |
return tmp; |
311,7 → 311,7 |
return -30; |
} |
ex_stack_page = core_alloc(0); /* 2^0 = 1 page */ |
ex_stack_page = alloc_page(); /* 2^0 = 1 page */ |
if( ! ex_stack_page ) |
{ |
mem_free(raw); |
327,7 → 327,7 |
if( !ex_pg_dir ) |
{ |
core_free(ex_stack_page); |
frame_free(ex_stack_page); |
mem_free(raw); |
return -30; /* FIXME */ |
}; |
/kernel/branches/kolibri_pe/core/frame.c |
---|
0,0 → 1,442 |
#include <types.h> |
#include <core.h> |
#include <spinlock.h> |
#include <link.h> |
#include <mm.h> |
extern u32_t pg_balloc; |
extern u32_t mem_amount; |
void __fastcall *balloc(size_t size); |
static zone_t z_core; |
#include "buddy.inc" |
typedef struct |
{ |
link_t link; |
SPINLOCK_DECLARE(lock); |
u32_t state; |
void *parent; |
count_t avail; |
addr_t base; |
index_t next; |
int list[512]; |
}pslab_t; |
typedef struct |
{ |
SPINLOCK_DECLARE(lock); |
count_t partial_count; |
link_t full_slabs; /**< List of full slabs */ |
link_t partial_slabs; /**< List of partial slabs */ |
}pcache_t; |
static pcache_t page_cache; |
static pslab_t *create_page_slab(); |
void init_mm() |
{ |
int i; |
u32_t base; |
u32_t size; |
count_t pages; |
size_t conf_size; |
size_t core_size; |
pslab_t *slab; |
pages = mem_amount >> PAGE_WIDTH; |
DBG("last page = %x total pages = %x\n",mem_amount, pages); |
conf_size = pages*sizeof(frame_t); |
DBG("conf_size = %x free mem start =%x\n",conf_size, pg_balloc); |
zone_create(&z_core, 0, pages); |
zone_release(&z_core, 0, pages); |
zone_reserve(&z_core, 0, pg_balloc >> PAGE_WIDTH); |
list_initialize(&page_cache.full_slabs); |
list_initialize(&page_cache.partial_slabs); |
slab = create_page_slab(); |
ASSERT(slab); |
slab->parent = &page_cache; |
page_cache.partial_count++; |
list_prepend(&slab->link, &page_cache.partial_slabs); |
}; |
/** Return wasted space in slab */ |
static unsigned int badness(index_t order, size_t size) |
{ |
unsigned int objects; |
unsigned int ssize; |
ssize = PAGE_SIZE << order; |
objects = (PAGE_SIZE << order) / size; |
return ssize - objects * size; |
} |
#define SLAB_MAX_BADNESS(order) (((size_t) PAGE_SIZE << (order)) >> 2) |
static pslab_t *create_page_slab() |
{ |
pslab_t *slab; |
link_t *tmp; |
spinlock_lock(&z_core.lock); |
tmp = buddy_alloc(9); |
if( tmp != 0 ) |
{ |
frame_t *frame; |
int i; |
addr_t v; |
/* Update zone information. */ |
z_core.free_count -= 512; |
z_core.busy_count += 512; |
spinlock_unlock(&z_core.lock); |
/* Frame will be actually a first frame of the block. */ |
frame = (frame_t*)tmp; |
frame->parent = 0; |
v = (z_core.base + (index_t)(frame - z_core.frames)) << PAGE_WIDTH; |
slab = (pslab_t*)PA2KA(v); |
for(i = 1; i < 512; i++) |
frame[i].parent = slab; |
slab->base = v + PAGE_SIZE; |
slab->avail = 511; |
slab->next = 0; |
for(i = 0; i < 511; i++) |
slab->list[i] = i + 1; |
} |
else |
{ |
spinlock_unlock(&z_core.lock); |
slab = NULL; |
}; |
DBG("create page slab at %x\n", slab); |
return slab; |
} |
static void destroy_page_slab(pslab_t *slab) |
{ |
u32_t order; |
count_t idx; |
frame_t *frame; |
idx = (KA2PA(slab) >> PAGE_WIDTH)-z_core.base; |
frame = &z_core.frames[idx]; |
/* remember frame order */ |
order = frame->buddy_order; |
ASSERT(frame->refcount); |
if (!--frame->refcount) |
{ |
spinlock_lock(&z_core.lock); |
buddy_system_free(&frame->buddy_link); |
/* Update zone information. */ |
z_core.free_count += (1 << order); |
z_core.busy_count -= (1 << order); |
spinlock_unlock(&z_core.lock); |
} |
} |
#if 0 |
fslab_t *create_slab(index_t order, size_t size) |
{ |
fslab_t *slab; |
slab = (fslab_t*)PA2KA(frame_alloc(0)); |
if( slab ) |
{ |
link_t *tmp; |
spinlock_lock(&z_core.lock); |
tmp = buddy_alloc(order); |
ASSERT(tmp); |
if( tmp ) |
{ |
frame_t *frame; |
count_t objects; |
count_t i; |
addr_t v; |
/* Update zone information. */ |
z_core.free_count -= (1 << order); |
z_core.busy_count += (1 << order); |
spinlock_unlock(&z_heap.lock); |
/* Frame will be actually a first frame of the block. */ |
frame = (frame_t*)tmp; |
for(i = 0; i < (1U<<order); i++) |
frame[i].parent = slab; |
/* get frame address */ |
v = z_core.base + (index_t)(frame - z_core.frames); |
slab->base = (v << PAGE_WIDTH); |
slab->avail = (PAGE_SIZE << order) / size; |
slab->next = 0; |
objects = (PAGE_SIZE << order) / size; |
for(i = 0; i < objects; i++) |
slab->list[i] = i + 1; |
} |
else |
{ |
spinlock_unlock(&z_core.lock); |
frame_free(KA2PA(slab)); |
slab = NULL; |
}; |
}; |
return slab; |
} |
static void destroy_slab(fslab_t *slab) |
{ |
u32_t order; |
count_t idx; |
frame_t *frame; |
idx = (slab->base >> PAGE_WIDTH)-z_core.base; |
frame = &z_core.frames[idx]; |
/* remember frame order */ |
order = frame->buddy_order; |
ASSERT(frame->refcount); |
if (!--frame->refcount) |
{ |
spinlock_lock(&z_core.lock); |
buddy_system_free(&frame->buddy_link); |
/* Update zone information. */ |
z_core.free_count += (1 << order); |
z_core.busy_count -= (1 << order); |
spinlock_unlock(&z_core.lock); |
} |
// slab_free(fslab, slab); |
}; |
#endif |
addr_t alloc_page(void) |
{ |
eflags_t efl; |
pslab_t *slab; |
addr_t frame; |
efl = safe_cli(); |
spinlock_lock(&page_cache.lock); |
if (list_empty(&page_cache.partial_slabs)) |
{ |
slab = create_page_slab(); |
if (!slab) |
{ |
spinlock_unlock(&page_cache.lock); |
safe_sti(efl); |
return 0; |
} |
slab->parent = &page_cache; |
slab->state = 1; |
page_cache.partial_count++; |
list_prepend(&slab->link, &page_cache.partial_slabs); |
} |
else |
slab = (pslab_t*)page_cache.partial_slabs.next; |
frame = slab->base + (slab->next << PAGE_WIDTH); |
slab->next = slab->list[slab->next]; |
slab->avail--; |
if( slab->avail == 0 ) |
{ |
slab->state = 0; |
list_remove(&slab->link); |
list_prepend(&slab->link, &page_cache.full_slabs); |
page_cache.partial_count--; |
DBG("%s insert empty page slab\n"); |
}; |
spinlock_unlock(&page_cache.lock); |
// DBG("alloc_page: %x remain %d\n", frame, slab->avail); |
safe_sti(efl); |
return frame; |
} |
addr_t __fastcall frame_alloc(count_t count) |
{ |
addr_t frame; |
if ( count > 1) |
{ |
eflags_t efl; |
index_t order; |
frame_t *tmp; |
count_t i; |
order = fnzb(count-1)+1; |
efl = safe_cli(); |
spinlock_lock(&z_core.lock); |
tmp = (frame_t*)buddy_alloc( order ); |
ASSERT(tmp); |
z_core.free_count -= (1 << order); |
z_core.busy_count += (1 << order); |
for(i = 0; i < (1 << order); i++) |
tmp[i].parent = NULL; |
spinlock_unlock(&z_core.lock); |
safe_sti(efl); |
frame = (z_core.base + |
(index_t)(tmp - z_core.frames)) << PAGE_WIDTH; |
DBG("%s %x order %d remain %d\n", __FUNCTION__, |
frame, order, z_core.free_count); |
} |
else |
frame = alloc_page(); |
return frame; |
} |
size_t __fastcall frame_free(addr_t addr) |
{ |
eflags_t efl; |
index_t idx; |
frame_t *frame; |
size_t frame_size; |
idx = addr >> PAGE_WIDTH; |
if( (idx < z_core.base) || |
(idx >= z_core.base+z_core.count)) { |
DBG("%s: invalid address %x\n", __FUNCTION__, addr); |
return 0; |
} |
efl = safe_cli(); |
frame = &z_core.frames[idx-z_core.base]; |
if( frame->parent != NULL ) |
{ |
pslab_t *slab; |
slab = frame->parent; |
spinlock_lock(&page_cache.lock); |
idx = (addr - slab->base) >> PAGE_WIDTH; |
ASSERT(idx < 512); |
slab->list[idx] = slab->next; |
slab->next = idx; |
slab->avail++; |
if( (slab->state == 0 ) && |
(slab->avail >= 4)) |
{ |
slab->state = 1; |
// list_remove(&slab->link); |
// list_prepend(&slab->link, &page_cache.partial_slabs); |
// page_cache.partial_count++; |
DBG("%s: insert partial page slab\n", __FUNCTION__); |
} |
spinlock_unlock(&page_cache.lock); |
frame_size = 1; |
} |
else |
{ |
count_t order; |
order = frame->buddy_order; |
DBG("%s %x order %d\n", __FUNCTION__, addr, order); |
ASSERT(frame->refcount); |
spinlock_lock(&z_core.lock); |
if (!--frame->refcount) |
{ |
buddy_system_free(&frame->buddy_link); |
/* Update zone information. */ |
z_core.free_count += (1 << order); |
z_core.busy_count -= (1 << order); |
} |
spinlock_unlock(&z_core.lock); |
frame_size = 1 << order; |
}; |
safe_sti(efl); |
return frame_size; |
} |
count_t get_free_mem() |
{ |
return z_core.free_count; |
} |
/kernel/branches/kolibri_pe/core/heap.c |
---|
4,775 → 4,481 |
#include <spinlock.h> |
#include <link.h> |
#include <mm.h> |
#include <slab.h> |
#define PG_DEMAND 0x400 |
#define MD_FREE 1 |
#define MD_USED 2 |
#define HF_WIDTH 16 |
#define HF_SIZE (1 << HF_WIDTH) |
typedef struct { |
u32_t av_mapped; |
u32_t av_unmapped; |
#define BUDDY_SYSTEM_INNER_BLOCK 0xff |
link_t mapped[32]; |
link_t unmapped[32]; |
static zone_t z_heap; |
link_t used; |
static link_t shared_mmap; |
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
}heap_t; |
#define heap_index( frame ) \ |
(index_t)( (frame) - z_heap.frames) |
slab_cache_t *md_slab; |
slab_cache_t *phm_slab; |
#define heap_index_abs( frame ) \ |
(index_t)( (frame) - z_heap.frames) |
heap_t lheap; |
heap_t sheap; |
static __inline void frame_initialize(frame_t *frame) |
{ |
frame->refcount = 1; |
frame->buddy_order = 0; |
} |
#define buddy_get_order( block) \ |
((frame_t*)(block))->buddy_order |
static inline void _set_lavu(count_t idx) |
{ asm volatile ("bts %0, _lheap+4"::"r"(idx):"cc"); } |
static inline void _reset_lavu(count_t idx) |
{ asm volatile ("btr %0, _lheap+4"::"r"(idx):"cc"); } |
#define buddy_set_order( block, order) \ |
((frame_t*)(block))->buddy_order = (order) |
static inline void _set_savm(count_t idx) |
{ asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); } |
#define buddy_mark_busy( block ) \ |
((frame_t*)(block))->refcount = 1 |
static inline void _reset_savm(count_t idx) |
{ asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); } |
static inline void _set_savu(count_t idx) |
{ asm volatile ("bts %0, _sheap+4"::"r"(idx):"cc"); } |
static __inline link_t * buddy_bisect(link_t *block) |
{ |
frame_t *frame_l, *frame_r; |
static inline void _reset_savu(count_t idx) |
{ asm volatile ("btr %0, _sheap+4"::"r"(idx):"cc"); } |
frame_l = (frame_t*)block; |
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
return &frame_r->buddy_link; |
} |
int __fastcall init_heap(addr_t base, size_t size) |
static __inline link_t *buddy_coalesce(link_t *block_1, link_t *block_2) |
{ |
md_t *md; |
u32_t i; |
frame_t *frame1, *frame2; |
ASSERT(base != 0); |
ASSERT(size != 0) |
ASSERT((base & 0x3FFFFF) == 0); |
ASSERT((size & 0x3FFFFF) == 0); |
frame1 = (frame_t*)block_1; |
frame2 = (frame_t*)block_2; |
for (i = 0; i < 32; i++) |
{ |
list_initialize(&lheap.mapped[i]); |
list_initialize(&lheap.unmapped[i]); |
return frame1 < frame2 ? block_1 : block_2; |
} |
list_initialize(&sheap.mapped[i]); |
list_initialize(&sheap.unmapped[i]); |
}; |
list_initialize(&lheap.used); |
list_initialize(&sheap.used); |
#define IS_BUDDY_LEFT_BLOCK_ABS(frame) \ |
(((heap_index_abs((frame)) >> (frame)->buddy_order) & 0x1) == 0) |
md_slab = slab_cache_create(sizeof(md_t), 16,NULL,NULL,SLAB_CACHE_MAGDEFERRED); |
#define IS_BUDDY_RIGHT_BLOCK_ABS(frame) \ |
(((heap_index_abs((frame)) >> (frame)->buddy_order) & 0x1) == 1) |
md = (md_t*)slab_alloc(md_slab,0); |
list_initialize(&md->adj); |
md->base = base; |
md->size = size; |
md->parent = NULL; |
md->state = MD_FREE; |
static link_t *find_buddy(link_t *block) |
{ |
frame_t *frame; |
index_t index; |
u32_t is_left, is_right; |
list_prepend(&md->link, &lheap.unmapped[31]); |
lheap.av_mapped = 0x00000000; |
lheap.av_unmapped = 0x80000000; |
sheap.av_mapped = 0x00000000; |
sheap.av_unmapped = 0x00000000; |
frame = (frame_t*)block; |
// ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),frame->buddy_order)); |
return 1; |
}; |
is_left = IS_BUDDY_LEFT_BLOCK_ABS( frame); |
is_right = IS_BUDDY_RIGHT_BLOCK_ABS( frame); |
md_t* __fastcall find_large_md(size_t size) |
{ |
md_t *md = NULL; |
// ASSERT(is_left ^ is_right); |
count_t idx0; |
u32_t mask; |
if (is_left) { |
index = (heap_index(frame)) + (1 << frame->buddy_order); |
} |
else { /* if (is_right) */ |
index = (heap_index(frame)) - (1 << frame->buddy_order); |
}; |
ASSERT((size & 0x3FFFFF) == 0); |
idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31; |
mask = lheap.av_unmapped & ( -1<<idx0 ); |
if(mask) |
if ( index < z_heap.count) |
{ |
if(idx0 == 31) |
{ |
md_t *tmp = (md_t*)lheap.unmapped[31].next; |
while(&tmp->link != &lheap.unmapped[31]) |
{ |
if(tmp->size >= size) |
{ |
DBG("remove large tmp %x\n", tmp); |
md = tmp; |
break; |
}; |
}; |
tmp = (md_t*)tmp->link.next; |
if (z_heap.frames[index].buddy_order == frame->buddy_order && |
z_heap.frames[index].refcount == 0) { |
return &z_heap.frames[index].buddy_link; |
} |
else |
{ |
idx0 = _bsf(mask); |
} |
ASSERT( !list_empty(&lheap.unmapped[idx0])) |
md = (md_t*)lheap.unmapped[idx0].next; |
}; |
return NULL; |
} |
else |
return NULL; |
ASSERT(md->state == MD_FREE); |
list_remove((link_t*)md); |
if(list_empty(&lheap.unmapped[idx0])) |
_reset_lavu(idx0); |
if(md->size > size) |
static void buddy_system_free(link_t *block) |
{ |
count_t idx1; |
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
link_t *buddy, *hlp; |
u32_t i; |
link_initialize(&new_md->link); |
list_insert(&new_md->adj, &md->adj); |
/* |
* Determine block's order. |
*/ |
i = buddy_get_order(block); |
new_md->base = md->base; |
new_md->size = size; |
new_md->parent = NULL; |
new_md->state = MD_USED; |
ASSERT(i <= z_heap.max_order); |
md->base+= size; |
md->size-= size; |
idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31; |
list_prepend(&md->link, &lheap.unmapped[idx1]); |
_set_lavu(idx1); |
return new_md; |
}; |
md->state = MD_USED; |
return md; |
} |
md_t* __fastcall find_unmapped_md(size_t size) |
if (i != z_heap.max_order) |
{ |
eflags_t efl; |
/* |
* See if there is any buddy in the list of order i. |
*/ |
buddy = find_buddy( block ); |
if (buddy) |
{ |
md_t *md = NULL; |
ASSERT(buddy_get_order(buddy) == i); |
/* |
* Remove buddy from the list of order i. |
*/ |
list_remove(buddy); |
count_t idx0; |
u32_t mask; |
/* |
* Invalidate order of both block and buddy. |
*/ |
buddy_set_order(block, BUDDY_SYSTEM_INNER_BLOCK); |
buddy_set_order(buddy, BUDDY_SYSTEM_INNER_BLOCK); |
ASSERT((size & 0xFFF) == 0); |
/* |
* Coalesce block and buddy into one block. |
*/ |
hlp = buddy_coalesce( block, buddy ); |
efl = safe_cli(); |
/* |
* Set order of the coalesced block to i + 1. |
*/ |
buddy_set_order(hlp, i + 1); |
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31; |
mask = sheap.av_unmapped & ( -1<<idx0 ); |
DBG("smask %x size %x idx0 %x mask %x\n",sheap.av_unmapped, size, idx0, mask); |
if(mask) |
{ |
if(idx0 == 31) |
{ |
ASSERT( !list_empty(&sheap.unmapped[31])); |
md_t *tmp = (md_t*)sheap.unmapped[31].next; |
while( &tmp->link != &sheap.unmapped[31]) |
{ |
if(tmp->size >= size) |
{ |
md = tmp; |
break; |
}; |
tmp = (md_t*)tmp->link.next; |
}; |
/* |
* Recursively add the coalesced block to the list of order i + 1. |
*/ |
buddy_system_free( hlp ); |
return; |
} |
else |
{ |
idx0 = _bsf(mask); |
} |
/* |
* Insert block into the list of order i. |
*/ |
list_append(block, &z_heap.order[i]); |
} |
ASSERT( !list_empty(&sheap.unmapped[idx0])); |
md = (md_t*)sheap.unmapped[idx0].next; |
} |
}; |
if(md) |
static link_t* buddy_system_alloc( u32_t i) |
{ |
DBG("remove md %x\n", md); |
link_t *res, *hlp; |
ASSERT(md->state==MD_FREE); |
ASSERT(md->parent != NULL); |
ASSERT(i <= z_heap.max_order); |
list_remove((link_t*)md); |
if(list_empty(&sheap.unmapped[idx0])) |
_reset_savu(idx0); |
/* |
* If the list of order i is not empty, |
* the request can be immediatelly satisfied. |
*/ |
if (!list_empty(&z_heap.order[i])) { |
res = z_heap.order[i].next; |
list_remove(res); |
buddy_mark_busy(res); |
return res; |
} |
else |
{ |
md_t *lmd; |
lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF); |
/* |
* If order i is already the maximal order, |
* the request cannot be satisfied. |
*/ |
if (i == z_heap.max_order) |
return NULL; |
DBG("get large md %x\n", lmd); |
/* |
* Try to recursively satisfy the request from higher order lists. |
*/ |
hlp = buddy_system_alloc( i + 1 ); |
if( !lmd) |
{ |
safe_sti(efl); |
/* |
* The request could not be satisfied |
* from higher order lists. |
*/ |
if (!hlp) |
return NULL; |
}; |
ASSERT(lmd->size != 0); |
ASSERT(lmd->base != 0); |
ASSERT((lmd->base & 0x3FFFFF) == 0); |
ASSERT(lmd->parent == NULL); |
res = hlp; |
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
/* |
* Bisect the block and set order of both of its parts to i. |
*/ |
hlp = buddy_bisect( res ); |
link_initialize(&md->link); |
list_initialize(&md->adj); |
md->base = lmd->base; |
md->size = lmd->size; |
md->parent = lmd; |
md->state = MD_USED; |
}; |
buddy_set_order(res, i); |
buddy_set_order(hlp, i); |
if(md->size > size) |
{ |
count_t idx1; |
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
/* |
* Return the other half to buddy system. Mark the first part |
* full, so that it won't coalesce again. |
*/ |
buddy_mark_busy(res); |
buddy_system_free( hlp ); |
link_initialize(&new_md->link); |
list_insert(&new_md->adj, &md->adj); |
return res; |
} |
new_md->base = md->base; |
new_md->size = size; |
new_md->parent = md->parent; |
new_md->state = MD_USED; |
md->base+= size; |
md->size-= size; |
md->state = MD_FREE; |
idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1); |
if( idx1 < 31) |
list_prepend(&md->link, &sheap.unmapped[idx1]); |
else |
int __fastcall init_heap(addr_t start, size_t size) |
{ |
if( list_empty(&sheap.unmapped[31])) |
list_prepend(&md->link, &sheap.unmapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.unmapped[31].next; |
count_t i; |
count_t count; |
while( &tmp->link != &sheap.unmapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
}; |
}; |
count = size >> HF_WIDTH; |
_set_savu(idx1); |
ASSERT( start != 0); |
ASSERT( count != 0); |
safe_sti(efl); |
spinlock_initialize(&z_heap.lock); |
return new_md; |
}; |
z_heap.base = start >> HF_WIDTH; |
z_heap.count = count; |
z_heap.free_count = count; |
z_heap.busy_count = 0; |
md->state = MD_USED; |
z_heap.max_order = fnzb(count); |
safe_sti(efl); |
DBG("create heap zone: base %x count %x\n", start, count); |
return md; |
} |
ASSERT(z_heap.max_order < BUDDY_SYSTEM_INNER_BLOCK); |
md_t* __fastcall find_mapped_md(size_t size) |
{ |
eflags_t efl; |
for (i = 0; i <= z_heap.max_order; i++) |
list_initialize(&z_heap.order[i]); |
md_t *md = NULL; |
count_t idx0; |
u32_t mask; |
DBG("count %d frame_t %d page_size %d\n", |
count, sizeof(frame_t), PAGE_SIZE); |
ASSERT((size & 0xFFF) == 0); |
z_heap.frames = (frame_t *)PA2KA(frame_alloc( (count*sizeof(frame_t)) >> PAGE_WIDTH )); |
efl = safe_cli(); |
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31; |
mask = sheap.av_mapped & ( -1<<idx0 ); |
if( z_heap.frames == 0 ) |
return 0; |
DBG("small av_mapped %x size %x idx0 %x mask %x\n",sheap.av_mapped, size, |
idx0, mask); |
if(mask) |
{ |
if(idx0 == 31) |
{ |
ASSERT( !list_empty(&sheap.mapped[31])); |
for (i = 0; i < count; i++) { |
z_heap.frames[i].buddy_order=0; |
z_heap.frames[i].parent = NULL; |
z_heap.frames[i].refcount=1; |
} |
md_t *tmp = (md_t*)sheap.mapped[31].next; |
while( &tmp->link != &sheap.mapped[31]) |
for (i = 0; i < count; i++) |
{ |
if(tmp->size >= size) |
{ |
md = tmp; |
break; |
}; |
tmp = (md_t*)tmp->link.next; |
}; |
z_heap.frames[i].refcount = 0; |
buddy_system_free(&z_heap.frames[i].buddy_link); |
} |
else |
{ |
idx0 = _bsf(mask); |
ASSERT( !list_empty(&sheap.mapped[idx0])); |
list_initialize(&shared_mmap); |
md = (md_t*)sheap.mapped[idx0].next; |
return 1; |
} |
}; |
if(md) |
addr_t __fastcall mem_alloc(size_t size, u32_t flags) |
{ |
DBG("remove md %x\n", md); |
eflags_t efl; |
addr_t heap = 0; |
ASSERT(md->state==MD_FREE); |
list_remove((link_t*)md); |
if(list_empty(&sheap.mapped[idx0])) |
_reset_savm(idx0); |
} |
else |
{ |
md_t *lmd; |
addr_t frame; |
addr_t *pte; |
count_t order; |
frame_t *frame; |
index_t v; |
int i; |
mmap_t *map; |
count_t pages; |
lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF); |
// __asm__ __volatile__ ("xchgw %bx, %bx"); |
DBG("get large md %x\n", lmd); |
size = (size + 4095) & ~4095; |
if( !lmd) |
{ |
safe_sti(efl); |
return NULL; |
}; |
pages = size >> PAGE_WIDTH; |
ASSERT(lmd->size != 0); |
ASSERT(lmd->base != 0); |
ASSERT((lmd->base & 0x3FFFFF) == 0); |
ASSERT(lmd->parent == NULL); |
// map = (mmap_t*)malloc( sizeof(mmap_t) + |
// sizeof(addr_t) * pages); |
frame = core_alloc(10); /* FIXME check */ |
map = (mmap_t*)PA2KA(frame_alloc( (sizeof(mmap_t) + |
sizeof(addr_t) * pages) >> PAGE_WIDTH)); |
lmd->parent = (void*)frame; |
map->size = size; |
pte = &((addr_t*)page_tabs)[lmd->base>>12]; /* FIXME remove */ |
for(i = 0; i<1024; i++) |
if ( map ) |
{ |
*pte++ = frame; |
frame+= 4096; |
} |
order = size >> HF_WIDTH; |
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
if( order ) |
order = fnzb(order - 1) + 1; |
link_initialize(&md->link); |
list_initialize(&md->adj); |
md->base = lmd->base; |
md->size = lmd->size; |
md->parent = lmd; |
md->state = MD_USED; |
}; |
efl = safe_cli(); |
if(md->size > size) |
{ |
count_t idx1; |
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */ |
spinlock_lock(&z_heap.lock); |
link_initialize(&new_md->link); |
list_insert(&new_md->adj, &md->adj); |
frame = (frame_t*)buddy_system_alloc(order); |
new_md->base = md->base; |
new_md->size = size; |
new_md->parent = md->parent; |
ASSERT( frame ); |
md->base+= size; |
md->size-= size; |
md->state = MD_FREE; |
if( frame ) |
{ |
addr_t page = 0; |
addr_t mem; |
idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
z_heap.free_count -= (1 << order); |
z_heap.busy_count += (1 << order); |
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1); |
/* get frame address */ |
if( idx1 < 31) |
list_prepend(&md->link, &sheap.mapped[idx1]); |
else |
{ |
if( list_empty(&sheap.mapped[31])) |
list_prepend(&md->link, &sheap.mapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.mapped[31].next; |
v = z_heap.base + (index_t)(frame - z_heap.frames); |
while( &tmp->link != &sheap.mapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
}; |
}; |
heap = v << HF_WIDTH; |
_set_savm(idx1); |
map->base = heap; |
md = new_md; |
}; |
for(i = 0; i < (1 << order); i++) |
frame[i].parent = map; |
md->state = MD_USED; |
spinlock_unlock(&z_heap.lock); |
safe_sti(efl); |
return md; |
} |
void __fastcall free_unmapped_md(md_t *md) |
{ |
eflags_t efl ; |
md_t *fd; |
md_t *bk; |
count_t idx; |
addr_t *pte = &((addr_t*)page_tabs)[heap >> PAGE_WIDTH]; |
addr_t *mpte = &map->pte[0]; |
ASSERT(md->parent != NULL); |
#if 0 |
if( flags & PG_MAP ) |
page = PG_DEMAND | (flags & 0xFFF); |
efl = safe_cli(); |
spinlock_lock(&sheap.lock); |
if( !list_empty(&md->adj)) |
mem = heap; |
while(pages--) |
{ |
bk = (md_t*)md->adj.prev; |
fd = (md_t*)md->adj.next; |
*pte++ = 0; //page; |
*mpte++ = page; |
if(fd->state == MD_FREE) |
asm volatile ( "invlpg (%0)" ::"r" (mem) ); |
mem+= 4096; |
}; |
#else |
mem = heap; |
while(pages--) |
{ |
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31; |
if( flags & PG_MAP ) |
page = alloc_page(); |
list_remove((link_t*)fd); |
if(list_empty(&sheap.unmapped[idx])) |
_reset_savu(idx); |
page |= flags & 0xFFF; |
md->size+= fd->size; |
md->adj.next = fd->adj.next; |
md->adj.next->prev = (link_t*)md; |
slab_free(md_slab, fd); |
}; |
if(bk->state == MD_FREE) |
{ |
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31; |
*pte++ = 0; |
*mpte++ = page; |
list_remove((link_t*)bk); |
if(list_empty(&sheap.unmapped[idx])) |
_reset_savu(idx); |
bk->size+= md->size; |
bk->adj.next = md->adj.next; |
bk->adj.next->prev = (link_t*)bk; |
slab_free(md_slab, md); |
md = fd; |
asm volatile ( "invlpg (%0)" ::"r" (mem) ); |
mem+= 4096; |
}; |
}; |
#endif |
md->state = MD_FREE; |
DBG("%s %x size %d order %d\n", __FUNCTION__, heap, size, order); |
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
return heap; |
} |
_set_savu(idx); |
spinlock_unlock(&z_heap.lock); |
if( idx < 31) |
list_prepend(&md->link, &sheap.unmapped[idx]); |
else |
{ |
if( list_empty(&sheap.unmapped[31])) |
list_prepend(&md->link, &sheap.unmapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.unmapped[31].next; |
while( &tmp->link != &sheap.unmapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
}; |
}; |
spinlock_unlock(&sheap.lock); |
safe_sti(efl); |
frame_free( KA2PA(map) ); |
}; |
void __fastcall free_mapped_md(md_t *md) |
return 0; |
} |
void __fastcall mem_free(addr_t addr) |
{ |
eflags_t efl ; |
md_t *fd; |
md_t *bk; |
frame_t *frame; |
count_t idx; |
ASSERT(md->parent != NULL); |
ASSERT( ((md_t*)(md->parent))->parent != NULL); |
idx = (addr >> HF_WIDTH); |
if( (idx < z_heap.base) || |
(idx >= z_heap.base+z_heap.count)) { |
DBG("invalid address %x\n", addr); |
return; |
} |
efl = safe_cli(); |
spinlock_lock(&sheap.lock); |
if( !list_empty(&md->adj)) |
{ |
bk = (md_t*)md->adj.prev; |
fd = (md_t*)md->adj.next; |
frame = &z_heap.frames[idx-z_heap.base]; |
if(fd->state == MD_FREE) |
{ |
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31; |
u32_t order = frame->buddy_order; |
list_remove((link_t*)fd); |
if(list_empty(&sheap.mapped[idx])) |
_reset_savm(idx); |
DBG("%s %x order %d\n", __FUNCTION__, addr, order); |
md->size+= fd->size; |
md->adj.next = fd->adj.next; |
md->adj.next->prev = (link_t*)md; |
slab_free(md_slab, fd); |
}; |
if(bk->state == MD_FREE) |
{ |
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31; |
ASSERT(frame->refcount); |
list_remove((link_t*)bk); |
if(list_empty(&sheap.mapped[idx])) |
_reset_savm(idx); |
spinlock_lock(&z_heap.lock); |
bk->size+= md->size; |
bk->adj.next = md->adj.next; |
bk->adj.next->prev = (link_t*)bk; |
slab_free(md_slab, md); |
md = fd; |
}; |
}; |
if (!--frame->refcount) |
{ |
mmap_t *map; |
count_t i; |
md->state = MD_FREE; |
map = frame->parent; |
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31; |
for(i = 0; i < (1 << order); i++) |
frame[i].parent = NULL; |
_set_savm(idx); |
buddy_system_free(&frame->buddy_link); |
if( idx < 31) |
list_prepend(&md->link, &sheap.mapped[idx]); |
else |
{ |
if( list_empty(&sheap.mapped[31])) |
list_prepend(&md->link, &sheap.mapped[31]); |
else |
{ |
md_t *tmp = (md_t*)sheap.mapped[31].next; |
/* Update zone information. */ |
z_heap.free_count += (1 << order); |
z_heap.busy_count -= (1 << order); |
while( &tmp->link != &sheap.mapped[31]) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
}; |
}; |
spinlock_unlock(&sheap.lock); |
spinlock_unlock(&z_heap.lock); |
safe_sti(efl); |
}; |
for( i = 0; i < (map->size >> PAGE_WIDTH); i++) |
frame_free(map->pte[i]); |
md_t* __fastcall md_alloc(size_t size, u32_t flags) |
{ |
eflags_t efl; |
md_t *md; |
size = (size+4095)&~4095; |
if( flags & PG_MAP ) |
{ |
md = find_mapped_md(size); |
if( !md ) |
return NULL; |
ASSERT(md->state == MD_USED); |
ASSERT(md->parent != NULL); |
md_t *lmd = (md_t*)md->parent; |
ASSERT( lmd != NULL); |
ASSERT( lmd->parent != NULL); |
addr_t frame = (md->base - lmd->base + (addr_t)lmd->parent)| |
(flags & 0xFFF); |
DBG("frame %x\n", frame); |
ASSERT(frame != 0); |
count_t tmp = size >> 12; |
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; |
while(tmp--) |
{ |
*pte++ = frame; |
frame+= 4096; |
}; |
frame_free( KA2PA(map) ); |
} |
else |
{ |
md = find_unmapped_md(size); |
if( !md ) |
return NULL; |
ASSERT(md->parent != NULL); |
ASSERT(md->state == MD_USED); |
} |
return md; |
spinlock_unlock(&z_heap.lock); |
safe_sti(efl); |
}; |
}; |
void __fastcall md_free(md_t *md) |
void __fastcall heap_fault(addr_t faddr, u32_t code) |
{ |
index_t idx; |
frame_t *frame; |
mmap_t *map; |
if( md ) |
{ |
md_t *lmd; |
idx = faddr >> HF_WIDTH; |
DBG("free md: %x base: %x size: %x\n",md, md->base, md->size); |
frame = &z_heap.frames[idx-z_heap.base]; |
ASSERT(md->state == MD_USED); |
map = frame->parent; |
list_remove((link_t*)md); |
ASSERT( faddr >= map->base); |
lmd = (md_t*)md->parent; |
ASSERT(lmd != 0); |
if(lmd->parent != 0) |
if( faddr < map->base + map->size) |
{ |
addr_t mem = md->base; |
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12]; |
count_t tmp = md->size >> 12; |
addr_t page; |
while(tmp--) |
{ |
*pte++ = 0; |
asm volatile ( "invlpg (%0)" ::"r" (mem) ); |
mem+= 4096; |
}; |
free_mapped_md( md ); |
} |
else |
free_unmapped_md( md ); |
} |
idx = (faddr - map->base) >> PAGE_WIDTH; |
return; |
}; |
page = map->pte[idx]; |
void * __fastcall mem_alloc(size_t size, u32_t flags) |
if( page != 0) |
{ |
eflags_t efl; |
md_t *md; |
DBG("\nmem_alloc: %x bytes\n", size); |
ASSERT(size != 0); |
md = md_alloc(size, flags); |
if( !md ) |
return NULL; |
efl = safe_cli(); |
spinlock_lock(&sheap.lock); |
if( list_empty(&sheap.used) ) |
list_prepend(&md->link, &sheap.used); |
else |
#if 0 |
if( page & PG_DEMAND) |
{ |
md_t *tmp = (md_t*)sheap.used.next; |
page &= ~PG_DEMAND; |
page = alloc_page() | (page & 0xFFF); |
while( &tmp->link != &sheap.used) |
{ |
if(md->base < tmp->base) |
break; |
tmp = (md_t*)tmp->link.next; |
} |
list_insert(&md->link, &tmp->link); |
map->pte[idx] = page; |
}; |
spinlock_unlock(&sheap.lock); |
safe_sti(efl); |
DBG("allocate: %x size %x\n\n",md->base, size); |
return (void*)md->base; |
#endif |
((addr_t*)page_tabs)[faddr >> PAGE_WIDTH] = page; |
}; |
void __fastcall mem_free(void *mem) |
{ |
eflags_t efl; |
md_t *tmp; |
md_t *md = NULL; |
DBG("mem_free: %x\n",mem); |
ASSERT( mem != 0 ); |
ASSERT( ((addr_t)mem & 0xFFF) == 0 ); |
ASSERT( ! list_empty(&sheap.used)); |
efl = safe_cli(); |
tmp = (md_t*)sheap.used.next; |
while( &tmp->link != &sheap.used) |
{ |
if( tmp->base == (addr_t)mem ) |
{ |
md = tmp; |
break; |
}; |
tmp = (md_t*)tmp->link.next; |
} |
}; |
if( md ) |
{ |
md_free( md ); |
//#include "mmap.inc" |
} |
else |
DBG("\tERROR: invalid base address: %x\n", mem); |
safe_sti(efl); |
}; |
/kernel/branches/kolibri_pe/core/heap.inc |
---|
163,7 → 163,7 |
test cl, 1 |
jz @F |
call @core_free@4 |
call @frame_free@4 |
mov eax, esi |
shl eax, 12 |
invlpg [eax] |
287,7 → 287,7 |
jz .loop |
push edx |
call @core_free@4 |
call @frame_free@4 |
pop edx |
mov eax, edx |
shl eax, 12 |
/kernel/branches/kolibri_pe/core/init.asm |
---|
214,7 → 214,7 |
call @init_heap@8 |
call _init_core_dll |
call _init_threads |
; call _init_threads |
; SAVE & CLEAR 0-0xffff |
277,8 → 277,8 |
add eax, 0x00400000 |
mov [_sys_pdbr+4+(LFB_BASE shr 20)], eax |
if SHADOWFB |
mov ecx, 11 |
call @core_alloc@4 |
mov ecx, 1 shl 11 |
call @frame_alloc@4 |
or eax, PG_LARGE+PG_UW |
mov [_sys_pdbr+(SHADOWFB shr 20)], eax |
add eax, 0x00400000 |
/kernel/branches/kolibri_pe/core/malloc.inc |
---|
987,8 → 987,8 |
align 4 |
init_malloc: |
mov ecx, 6 |
call @core_alloc@4 |
mov ecx, 64 |
call @frame_alloc@4 |
add eax, OS_BASE |
mov [mst.top], eax |
/kernel/branches/kolibri_pe/core/memory.inc |
---|
212,7 → 212,7 |
mov ebx, edi |
shl ebx, 12 |
invlpg [ebx] |
call @core_free@4 |
call @frame_free@4 |
.next: |
add edi, 1 |
cmp edi, esi |
220,6 → 220,8 |
.update_size: |
mov ebx, [new_size] |
mov edx, [current_slot] |
call update_mem_size |
xor eax, eax |
422,6 → 424,8 |
align 4 |
.kernel_heap: |
; xchg bx, bx |
shr ebx, 22 |
mov edx, [master_tab + ebx*4] |
428,6 → 432,11 |
test edx, PG_MAP |
jz .check_ptab ;òàáëèöà ñòðàíèö íå ñîçäàíà |
mov ecx, [.err_addr] |
mov edx, [.err_code] |
call @heap_fault@8 |
jmp .exit |
.check_ptab: |
435,8 → 444,7 |
test edx, PG_MAP |
jnz @F |
xor ecx, ecx |
call @core_alloc@4 |
call _alloc_page |
test eax, eax |
jz .fail |
546,7 → 554,7 |
popad |
add esp, 4 |
; iretd |
iretd |
save_ring3_context ;debugger support |
1094,15 → 1102,12 |
push ebx |
xor ecx, ecx |
mov edx, [size] |
shr edx, 12 |
mov ebx, edx |
dec edx |
bsr ecx, edx |
inc ecx |
mov ecx, [size] |
shr ecx, 12 |
call @core_alloc@4 |
mov ebx, ecx |
call @frame_alloc@4 |
test eax, eax |
jz .mm_fail |
/kernel/branches/kolibri_pe/core/mm.c |
---|
13,18 → 13,7 |
zone_t z_core; |
static inline u32_t save_edx(void) |
{ |
u32_t val; |
asm volatile ("movl %%edx, %0":"=r"(val)); |
return val; |
}; |
static inline void restore_edx(u32_t val) |
{ |
asm volatile (""::"d" (val) ); |
}; |
static void buddy_system_create(zone_t *z); |
static void __fastcall buddy_system_free(zone_t *z, link_t *block); |
static void zone_mark_unavailable(zone_t *zone, index_t frame_idx); |
36,9 → 25,7 |
static inline void frame_initialize(frame_t *frame); |
void init_mm(); |
static void zone_create(zone_t *z, pfn_t start, count_t count); |
static void zone_reserve(zone_t *z, pfn_t base, count_t count); |
static void zone_release(zone_t *z, pfn_t base, count_t count); |
558,8 → 545,8 |
spinlock_unlock(&z_core.lock); |
safe_sti(efl); |
DBG("core alloc: %x, size %x remain %d\n", v << FRAME_WIDTH, |
((1<<order)<<12), z_core.free_count); |
DBG("core alloc at: 0x%x, size 0x%x remain %d\n", v << FRAME_WIDTH, |
((1<<order)<<FRAME_WIDTH), z_core.free_count); |
return (v << FRAME_WIDTH); |
}; |
568,11 → 555,11 |
{ |
eflags_t efl; |
DBG("core free %x", frame); |
DBG("core free 0x%x", frame); |
efl = safe_cli(); |
spinlock_lock(&z_core.lock); |
zone_free(&z_core, frame>>12); |
// zone_free(&z_core, frame>>12); |
spinlock_unlock(&z_core.lock); |
safe_sti(efl); |
/kernel/branches/kolibri_pe/core/pe.c |
---|
42,7 → 42,6 |
bool link_image(addr_t img_base); |
md_t* __fastcall load_image(const char *path); |
/* |
void* __fastcall load_pe(const char *path) |
109,12 → 108,12 |
return true; |
} |
md_t* __fastcall load_image(const char *path) |
addr_t __fastcall load_image(const char *path) |
{ |
PIMAGE_DOS_HEADER dos; |
PIMAGE_NT_HEADERS32 nt; |
md_t *img_md; |
// md_t *img_md; |
size_t img_size; |
addr_t img_base; |
147,16 → 146,17 |
img_size = nt->OptionalHeader.SizeOfImage; |
img_md = md_alloc(img_size, PG_SW); |
// img_md = md_alloc(img_size, PG_SW); |
img_base = mem_alloc(img_size, PG_SW); |
if( !img_md) |
if( !img_base) |
{ |
mem_free(raw); |
return NULL; |
}; |
img_base = img_md->base; |
// img_base = img_md->base; |
create_image(img_base, (addr_t)raw, true); |
165,7 → 165,7 |
// dos = (PIMAGE_DOS_HEADER)img_base; |
// nt = MakePtr( PIMAGE_NT_HEADERS32, dos, dos->e_lfanew); |
return img_md; |
return img_base; |
}; |
/kernel/branches/kolibri_pe/core/slab.c |
---|
33,13 → 33,15 |
unsigned int i; |
u32_t p; |
data = (void*)PA2KA(core_alloc(cache->order)); |
DBG("%s order %d\n", __FUNCTION__, cache->order); |
data = (void*)PA2KA(frame_alloc(1 << cache->order)); |
if (!data) { |
return NULL; |
} |
slab = (slab_t*)slab_create(); |
if (!slab) { |
core_free(KA2PA(data)); |
frame_free(KA2PA(data)); |
return NULL; |
} |
74,12 → 76,6 |
spinlock_lock(&cache->slablock); |
if (list_empty(&cache->partial_slabs)) { |
/* Allow recursion and reclaiming |
* - this should work, as the slab control structures |
* are small and do not need to allocate with anything |
* other than frame_alloc when they are allocating, |
* that's why we should get recursion at most 1-level deep |
*/ |
slab = slab_space_alloc(cache, flags); |
if (!slab) |
{ |
211,7 → 207,7 |
/* Minimum slab order */ |
pages = SIZE2FRAMES(cache->size); |
/* We need the 2^order >= pages */ |
if (pages == 1) |
if (pages <= 1) |
cache->order = 0; |
else |
cache->order = fnzb(pages-1)+1; |
241,6 → 237,8 |
{ |
slab_cache_t *cache; |
DBG("%s\n", __FUNCTION__); |
cache = (slab_cache_t*)slab_cache_alloc(); |
_slab_cache_create(cache, size, align, constructor, destructor, flags); |
return cache; |
337,15 → 335,11 |
void *obj; |
u32_t p; |
DBG("%s\n", __FUNCTION__); |
// spinlock_lock(&cache->slablock); |
if (list_empty(&slab_cache->partial_slabs)) { |
/* Allow recursion and reclaiming |
* - this should work, as the slab control structures |
* are small and do not need to allocate with anything |
* other than frame_alloc when they are allocating, |
* that's why we should get recursion at most 1-level deep |
*/ |
// spinlock_unlock(&cache->slablock); |
// slab = slab_create(); |
352,7 → 346,7 |
void *data; |
unsigned int i; |
data = (void*)PA2KA(core_alloc(0)); |
data = (void*)PA2KA(alloc_page()); |
if (!data) { |
return NULL; |
} |
400,13 → 394,10 |
void *obj; |
u32_t *p; |
if (list_empty(&slab_cache_cache.partial_slabs)) { |
/* Allow recursion and reclaiming |
* - this should work, as the slab control structures |
* are small and do not need to allocate with anything |
* other than frame_alloc when they are allocating, |
* that's why we should get recursion at most 1-level deep |
*/ |
DBG("%s\n", __FUNCTION__); |
if (list_empty(&slab_cache_cache.partial_slabs)) |
{ |
// spinlock_unlock(&cache->slablock); |
// slab = slab_create(); |
413,7 → 404,7 |
void *data; |
unsigned int i; |
data = (void*)(PA2KA(core_alloc(0))); |
data = (void*)(PA2KA(alloc_page())); |
if (!data) { |
return NULL; |
} |
437,7 → 428,8 |
atomic_inc(&slab_cache_cache.allocated_slabs); |
// spinlock_lock(&cache->slablock); |
} else { |
} |
else { |
slab = list_get_instance(slab_cache_cache.partial_slabs.next, slab_t, link); |
list_remove(&slab->link); |
} |
457,6 → 449,7 |
void slab_cache_init(void) |
{ |
DBG("%s\n", __FUNCTION__); |
_slab_cache_create(&slab_cache_cache, sizeof(slab_cache_t), |
sizeof(void *), NULL, NULL, |
/kernel/branches/kolibri_pe/core/sys32.inc |
---|
638,23 → 638,23 |
mov ecx,[edi+APPDATA.pl0_stack] |
sub ecx, OS_BASE |
call @core_free@4 |
call @frame_free@4 |
mov ecx,[edi+APPDATA.cur_dir] |
sub ecx, OS_BASE |
call @core_free@4 |
call @frame_free@4 |
mov ecx, [edi+APPDATA.io_map] |
cmp ecx, (tss._io_map_0-OS_BASE+PG_MAP) |
je @F |
call @core_free@4 |
call @frame_free@4 |
@@: |
mov ecx, [edi+APPDATA.io_map+4] |
cmp ecx, (tss._io_map_1-OS_BASE+PG_MAP) |
je @F |
call @core_free@4 |
call @frame_free@4 |
@@: |
mov eax, 0x20202020 |
stosd |
/kernel/branches/kolibri_pe/core/taskman.inc |
---|
222,10 → 222,9 |
mov ebx,[slot_base] |
mov [ebx+APPDATA.dir_table],eax |
mov eax,[hdr_mem] |
mov [ebx+APPDATA.mem_size],eax |
mov ecx,[hdr_mem] |
mov [ebx+APPDATA.mem_size],ecx |
mov ecx, [hdr_mem] |
mov edi, [file_size] |
; add edi, 4095 |
; and edi, not 4095 |
337,8 → 336,8 |
;mov [ebx+APPDATA.mem_size],eax |
mov ecx, 1 |
call @core_alloc@4 |
mov ecx, 2 |
call @frame_alloc@4 |
lea edi, [eax+OS_BASE] |
mov [pl0_stack], edi |
375,8 → 374,7 |
mov ecx, [def_cursor] |
mov [ebx+APPDATA.cursor],ecx |
xor ecx, ecx |
call @core_alloc@4 |
call _alloc_page |
lea edi, [eax+OS_BASE] ; FIXME |
mov esi,[current_slot] |
542,7 → 540,9 |
cld |
rep stosd |
mov ecx, 512 |
mov esi, [img_base] |
mov ecx, 512 ; FIX only core tabs |
mov esi, _sys_pdbr+(HEAP_BASE shr 20) |
rep movsd |
553,7 → 553,9 |
mov eax, edi |
call set_cr3 |
mov edx, [app_tabs] |
mov esi, [img_base] |
mov ebx, [app_tabs] |
mov edi, master_tab |
@@: |
call _alloc_page |
562,7 → 564,7 |
or eax, PG_UW |
stosd |
dec edx |
dec ebx |
jnz @B |
mov edi, page_tabs |
571,11 → 573,9 |
xor eax, eax |
rep stosd |
mov ecx, [app_pages] |
xor ebx, ebx |
.alloc: |
xor ecx, ecx |
call @core_alloc@4 |
call _alloc_page |
test eax, eax |
jz .fail |
615,8 → 615,7 |
lea ebx, [ecx+0x3FFFFF] |
xor ecx, ecx |
call @core_alloc@4 |
call _alloc_page |
test eax, eax |
mov [esp], eax |
jz .fail |
627,7 → 626,7 |
cld |
rep stosd |
mov ecx, 512 |
mov ecx, 512 ; FIX only core tabs |
mov esi, _sys_pdbr+(HEAP_BASE shr 20) |
rep movsd |
634,8 → 633,7 |
mov esi, [esp] |
shr ebx, 22 |
.new_ptab: |
xor ecx, ecx |
call @core_alloc@4 |
call _alloc_page |
test eax, eax |
jz .fail |
651,8 → 649,7 |
dec ebx |
jnz .new_ptab |
xor ecx, ecx |
call @core_alloc@4 |
call _alloc_page |
test eax, eax |
jz .fail |
701,7 → 698,7 |
test ecx, 1 shl 9 |
jnz .next ;skip shared pages |
call @core_free@4 |
call @frame_free@4 |
.next: |
add esi, 4 |
dec ebx |
759,7 → 756,7 |
stdcall destroy_page_table, eax |
mov ecx, [esi] |
call @core_free@4 |
call @frame_free@4 |
.next: |
add esi, 4 |
dec edi |
766,7 → 763,7 |
jnz .destroy |
mov ecx, [pg_dir] |
call @core_free@4 |
call @frame_free@4 |
.exit: |
dec [pg_data.pg_mutex] |
ret |
1150,8 → 1147,8 |
pl0_stack dd ? |
endl |
mov ecx, 1 ;(RING0_STACK_SIZE+512) shr 12 |
call @core_alloc@4 |
mov ecx, 2 ;(RING0_STACK_SIZE+512) shr 12 |
call @frame_alloc@4 |
add eax, OS_BASE |
mov [pl0_stack], eax |
1196,12 → 1193,11 |
mov [SLOT_BASE+APPDATA.saved_esp0+ebx], eax |
call _alloc_page |
add eax, OS_BASE |
lea edi, [eax + OS_BASE] |
mov [ebx+SLOT_BASE+APPDATA.cur_dir], edi |
mov esi,[current_slot] |
mov esi,[esi+APPDATA.cur_dir] |
mov ecx,0x1000/4 |
mov edi,eax |
mov [ebx+SLOT_BASE+APPDATA.cur_dir],eax |
rep movsd |
shr ebx,3 |
1302,7 → 1298,7 |
mov [SLOT_BASE+ebx*8+APPDATA.debugger_slot],eax |
.no_debug: |
mov [CURRENT_TASK+ebx+TASKDATA.state], cl |
DEBUGF 1,"%s",new_process_running |
; DEBUGF 1,"%s",new_process_running |
ret |
endp |
/kernel/branches/kolibri_pe/core/thread.c |
---|
14,6 → 14,8 |
void init_threads() |
{ |
DBG("%s\n", __FUNCTION__); |
thr_slab = slab_cache_create(sizeof(thr_t), 16, |
NULL,NULL,SLAB_CACHE_MAGDEFERRED); |
}; |
27,8 → 29,10 |
thr_t *thr; |
addr_t thr_stack; |
DBG("%s\n", __FUNCTION__); |
thr = (thr_t*)slab_alloc(thr_slab,0); |
thr_stack = PA2KA(core_alloc(1)); |
thr_stack = PA2KA(frame_alloc(2)); |
thr_cnt++; |
/kernel/branches/kolibri_pe/fs/parse_fn.inc |
---|
85,8 → 85,7 |
proc load_file_parse_table |
xor eac, ecx |
call @core_alloc@4 |
call _alloc_page |
add eax, OS_BASE |
mov [tmp_file_name_table],eax |
mov edi,eax |
/kernel/branches/kolibri_pe/include/core.h |
---|
4,7 → 4,7 |
#define LOAD_BASE 0x00100000 |
#define page_tabs 0xDDC00000 |
#define page_tabs 0xDD800000 |
#define master_tab (page_tabs+(page_tabs>>10)) |
20,6 → 20,10 |
#define sel_srv_stack 0x39 |
#define __export __attribute__ ((dllexport)) |
void printf (const char *format, ...); |
#define CALLER ((addr_t) __builtin_return_address(0)) |
74,11 → 78,10 |
asm volatile ( |
"pushl %0\n\t" |
"popfl\n" |
: : "r" (efl) |
); |
: : "r" (efl)); |
} |
static inline count_t fnzb(u32_t arg) |
static inline index_t fnzb(u32_t arg) |
{ |
count_t n; |
asm volatile ( |
88,7 → 91,7 |
return n; |
} |
static inline count_t _bsf(u32_t arg) |
static inline index_t _bsf(u32_t arg) |
{ |
count_t n; |
asm volatile ( |
201,7 → 204,7 |
}ioctl_t; |
typedef struct |
typedef struct __attribute__ ((packed)) |
{ |
u32_t code; |
union |
214,7 → 217,7 |
u16_t x; /* cursor x */ |
u16_t y; /* cursor y */ |
u32_t unused; |
}__attribute__ ((packed)); |
}; |
struct /* realtime io */ |
{ |
237,6 → 240,7 |
}event_t; |
void __fastcall dump_file(addr_t addr, size_t size); |
/kernel/branches/kolibri_pe/include/mm.h |
---|
1,13 → 1,24 |
typedef struct |
{ |
link_t link; |
addr_t base; |
size_t size; |
addr_t pte[0]; |
}mmap_t; |
typedef struct |
{ |
link_t buddy_link; /**< link to the next free block inside one order */ |
count_t refcount; /**< tracking of shared frames */ |
u32_t buddy_order; /**< buddy system block order */ |
u16_t refcount; /**< tracking of shared frames */ |
u16_t buddy_order; /**< buddy system block order */ |
void *parent; /**< If allocated by slab, this points there */ |
} frame_t; |
typedef struct { |
typedef struct |
{ |
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
pfn_t base; /**< frame_no of the first frame in the frames array */ |
count_t count; /**< Size of zone */ |
44,11 → 55,9 |
#define PAGE_SIZE 4096 |
#define FRAME_WIDTH 12 |
#define PAGE_WIDTH 12 |
#define BUDDY_SYSTEM_INNER_BLOCK 0xff |
# define PA2KA(x) (((addr_t) (x)) + OS_BASE) |
# define KA2PA(x) (((addr_t) (x)) - OS_BASE) |
56,39 → 65,39 |
{ |
if (!size) |
return 0; |
return (count_t) ((size - 1) >> FRAME_WIDTH) + 1; |
return (count_t) ((size - 1) >> PAGE_WIDTH) + 1; |
} |
static inline addr_t PFN2ADDR(pfn_t frame) |
{ |
return (addr_t) (frame << FRAME_WIDTH); |
return (addr_t) (frame << PAGE_WIDTH); |
} |
static inline pfn_t ADDR2PFN(addr_t addr) |
{ |
return (pfn_t) (addr >> FRAME_WIDTH); |
return (pfn_t) (addr >> PAGE_WIDTH); |
}; |
void init_mm(); |
void init_pg_slab(); |
void* __fastcall frame_get_parent(pfn_t pfn); |
void __fastcall frame_set_parent(pfn_t pfn, void *data); |
void frame_free(pfn_t frame); |
addr_t __fastcall core_alloc(u32_t order); |
void __fastcall core_free(addr_t frame); |
pfn_t alloc_page() __attribute__ ((deprecated)); |
addr_t alloc_page(void); |
#define __export __attribute__ ((dllexport)) |
md_t* __fastcall md_alloc(size_t size, u32_t flags) ; |
void __fastcall md_free(md_t *md); |
void* __fastcall __export mem_alloc(size_t size, u32_t flags) asm ("MemAlloc"); |
void __fastcall __export mem_free(void *mem) asm ("MemFree"); |
addr_t __fastcall __export mem_alloc(size_t size, u32_t flags) asm ("MemAlloc"); |
void __fastcall __export mem_free(addr_t mem) asm ("MemFree"); |
addr_t __fastcall frame_alloc(size_t size); |
size_t __fastcall frame_free(addr_t addr); |
/kernel/branches/kolibri_pe/include/pe.h |
---|
201,7 → 201,7 |
dll_t * find_dll(link_t *list, const char *name); |
md_t* __fastcall load_image(const char *path); |
addr_t __fastcall load_image(const char *path); |
void create_image(addr_t img_base, addr_t raw, bool force_clear) asm ("CreateImage"); |
/kernel/branches/kolibri_pe/kernel.asm |
---|
122,6 → 122,7 |
public __hlt |
public _panic_printf |
public _printf |
public _dump |
public _pg_balloc |
public _mem_amount |
public @balloc@4 |
215,11 → 216,16 |
extrn _poweroff |
extrn @core_alloc@4 |
extrn @core_free@4 |
extrn @pf_dump@8 |
extrn @frame_alloc@4 |
extrn @frame_free@4 |
extrn @find_large_md@4 |
extrn @heap_fault@8 |
extrn _MemAlloc |
extrn _MemFree |
486,7 → 492,7 |
call rerouteirqs |
; Initialize system V86 machine |
call init_sys_v86 |
; call init_sys_v86 |
; TIMER SET TO 1/100 S |
3429,7 → 3435,7 |
jz newdw8 |
test al,al |
jz .az |
lea eax,[edi+draw_data+(0x100000000-OS_BASE)] |
lea eax,[edi+draw_data-window_data] |
mov ebx,[dlx] |
cmp ebx,[eax+RECT.left] |
jae @f |
3454,7 → 3460,7 |
.az: |
mov eax,edi |
add eax, draw_data+(0x100000000-OS_BASE) |
add eax, draw_data-window_data |
mov ebx,[dlx] ; set limits |
mov [eax + RECT.left], ebx |
3465,7 → 3471,7 |
mov ebx,[dlye] |
mov [eax + RECT.bottom], ebx |
sub eax,draw_data+(0x100000000-OS_BASE) |
sub eax,draw_data - window_data |
cmp dword [esp],1 |
jne nobgrd |
/kernel/branches/kolibri_pe/ld.x |
---|
25,9 → 25,10 |
.flat . + 0x00400000: |
{ |
*(.flat) *(.data) |
. = ALIGN(4096); |
} |
.edata ALIGN(32): |
.edata : |
{ |
*(.edata) |
_code_end = .; |
/kernel/branches/kolibri_pe/makefile |
---|
6,7 → 6,7 |
DEFS = -DUSE_SMP -DCONFIG_DEBUG |
CFLAGS = -c -O2 -DCONFIG_DEBUG -I $(INCLUDE) -fomit-frame-pointer -fno-builtin |
CFLAGS = -c -O2 $(DEFS) -I $(INCLUDE) -fomit-frame-pointer -fno-builtin-printf |
LDFLAGS = -shared -s -Map kernel.map --image-base 0x100000 --file-alignment 32 |
KERNEL_SRC:= \ |
33,9 → 33,9 |
PE_SRC:= \ |
init.asm \ |
mbi.c \ |
mm.c \ |
heap.c \ |
slab.c \ |
heap.c \ |
frame.c \ |
pe.c \ |
dll.c \ |
spinlock.c \ |
70,6 → 70,9 |
bin/%.obj : core/%.c $(H_SRC) Makefile |
$(CC) $(CFLAGS) -o $@ $< |
bin/%.obj : gui/%.c $(H_SRC) Makefile |
$(CC) $(CFLAGS) -o $@ $< |
bin/%.obj: core/%.asm Makefile |
$(FASM) $< $@ |
/kernel/branches/kolibri_pe/printf.inc |
---|
5,6 → 5,24 |
PTR equ |
align 4 |
_dump: |
mov ecx, DWORD PTR [esp+4] |
@@: |
mov edx, 0x3FD |
in al, dx |
test al, 96 |
je @B |
mov dl, -8 |
mov eax, ecx |
out dx, al |
ret |
align 4 |
_putc: |
mov ecx, DWORD PTR [esp+4] |
.L13: |
/kernel/branches/kolibri_pe/unpacker.inc |
---|
28,7 → 28,11 |
popad |
ret 8 |
.lzma: |
pushfd |
cli |
call .lzma_unpack |
popfd |
.common: |
pop eax |
test al, 0x80 |