Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 887 → Rev 888

/kernel/branches/kolibri_pe/core/test.c
File deleted
/kernel/branches/kolibri_pe/core/exports.inc
103,7 → 103,6
dd szPciWrite32 , pci_write32
 
dd szAllocPage , _alloc_page ;stdcall
dd szFreePage , free_page
dd szMapPage , map_page ;stdcall
dd szMapSpace , map_space
dd szMapIoMem , map_io_mem ;stdcall
/kernel/branches/kolibri_pe/core/heap.c
6,6 → 6,8
#include <mm.h>
#include <slab.h>
 
#define page_tabs 0xDF800000
 
typedef struct
{
link_t link;
20,12 → 22,15
#define MD_USED 2
 
typedef struct {
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
u32_t av_mapped;
u32_t av_unmapped;
 
u32_t availmask;
link_t free[32];
link_t mapped[32];
link_t unmapped[32];
 
link_t used;
 
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
}heap_t;
 
 
37,20 → 42,25
heap_t sheap;
 
 
static inline void _set_lavu(count_t idx)
{ asm volatile ("bts %0, _lheap+4"::"r"(idx):"cc"); }
 
static inline void _set_lmask(count_t idx)
{ asm volatile ("bts %0, _lheap"::"r"(idx):"cc"); }
static inline void _reset_lavu(count_t idx)
{ asm volatile ("btr %0, _lheap+4"::"r"(idx):"cc"); }
 
static inline void _reset_lmask(count_t idx)
{ asm volatile ("btr %0, _lheap"::"r"(idx):"cc"); }
 
static inline void _set_smask(count_t idx)
static inline void _set_savm(count_t idx)
{ asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); }
 
static inline void _reset_smask(count_t idx)
static inline void _reset_savm(count_t idx)
{ asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); }
 
static inline void _set_savu(count_t idx)
{ asm volatile ("bts %0, _sheap+4"::"r"(idx):"cc"); }
 
static inline void _reset_savu(count_t idx)
{ asm volatile ("btr %0, _sheap+4"::"r"(idx):"cc"); }
 
 
int __fastcall init_heap(addr_t base, size_t size)
{
md_t *md;
63,8 → 73,11
 
for (i = 0; i < 32; i++)
{
list_initialize(&lheap.free[i]);
list_initialize(&sheap.free[i]);
list_initialize(&lheap.mapped[i]);
list_initialize(&lheap.unmapped[i]);
 
list_initialize(&sheap.mapped[i]);
list_initialize(&sheap.unmapped[i]);
};
 
list_initialize(&lheap.used);
81,12 → 94,12
md->parent = NULL;
md->state = MD_FREE;
 
list_prepend(&md->link, &lheap.free[31]);
lheap.availmask = 0x80000000;
sheap.availmask = 0x00000000;
list_prepend(&md->link, &lheap.unmapped[31]);
lheap.av_mapped = 0x00000000;
lheap.av_unmapped = 0x80000000;
sheap.av_mapped = 0x00000000;
sheap.av_unmapped = 0x00000000;
 
// phm_slab = slab_cache_create(sizeof(phismem_t), 32,NULL,NULL,SLAB_CACHE_MAGDEFERRED);
 
return 1;
};
 
100,14 → 113,14
ASSERT((size & 0x3FFFFF) == 0);
 
idx0 = (size>>22) - 1 < 32 ? (size>>22) - 1 : 31;
mask = lheap.availmask & ( -1<<idx0 );
mask = lheap.av_unmapped & ( -1<<idx0 );
 
if(mask)
{
if(idx0 == 31)
{
md_t *tmp = (md_t*)lheap.free[31].next;
while((link_t*)tmp != &lheap.free[31])
md_t *tmp = (md_t*)lheap.unmapped[31].next;
while((link_t*)tmp != &lheap.unmapped[31])
{
if(tmp->size >= size)
{
123,9 → 136,9
{
idx0 = _bsf(mask);
 
ASSERT( !list_empty(&lheap.free[idx0]))
ASSERT( !list_empty(&lheap.unmapped[idx0]))
 
md = (md_t*)lheap.free[idx0].next;
md = (md_t*)lheap.unmapped[idx0].next;
};
}
else
134,8 → 147,8
ASSERT(md->state == MD_FREE);
 
list_remove((link_t*)md);
if(list_empty(&lheap.free[idx0]))
_reset_lmask(idx0);
if(list_empty(&lheap.unmapped[idx0]))
_reset_lavu(idx0);
 
if(md->size > size)
{
147,6 → 160,7
 
new_md->base = md->base;
new_md->size = size;
new_md->parent = NULL;
new_md->state = MD_USED;
 
md->base+= size;
154,8 → 168,8
 
idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31;
 
list_prepend(&md->link, &lheap.free[idx1]);
_set_lmask(idx1);
list_prepend(&md->link, &lheap.unmapped[idx1]);
_set_lavu(idx1);
 
return new_md;
};
164,7 → 178,7
return md;
}
 
md_t* __fastcall find_small_md(size_t size)
md_t* __fastcall find_unmapped_md(size_t size)
{
eflags_t efl;
 
178,18 → 192,18
efl = safe_cli();
 
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31;
mask = sheap.availmask & ( -1<<idx0 );
mask = sheap.av_unmapped & ( -1<<idx0 );
 
DBG("smask %x size %x idx0 %x mask %x\n",sheap.availmask, size, idx0, mask);
DBG("smask %x size %x idx0 %x mask %x\n",sheap.av_unmapped, size, idx0, mask);
 
if(mask)
{
if(idx0 == 31)
{
ASSERT( !list_empty(&sheap.free[31]));
ASSERT( !list_empty(&sheap.unmapped[31]));
 
md_t *tmp = (md_t*)sheap.free[31].next;
while((link_t*)tmp != &sheap.free[31])
md_t *tmp = (md_t*)sheap.unmapped[31].next;
while((link_t*)tmp != &sheap.unmapped[31])
{
if(tmp->size >= size)
{
203,9 → 217,9
{
idx0 = _bsf(mask);
 
ASSERT( !list_empty(&sheap.free[idx0]));
ASSERT( !list_empty(&sheap.unmapped[idx0]));
 
md = (md_t*)sheap.free[idx0].next;
md = (md_t*)sheap.unmapped[idx0].next;
}
};
 
214,10 → 228,11
DBG("remove md %x\n", md);
 
ASSERT(md->state==MD_FREE);
ASSERT(md->parent != NULL);
 
list_remove((link_t*)md);
if(list_empty(&sheap.free[idx0]))
_reset_smask(idx0);
if(list_empty(&sheap.unmapped[idx0]))
_reset_savu(idx0);
}
else
{
232,6 → 247,11
return NULL;
};
 
ASSERT(lmd->size != 0);
ASSERT(lmd->base != 0);
ASSERT((lmd->base & 0x3FFFFF) == 0);
ASSERT(lmd->parent == NULL);
 
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */
 
link_initialize(&md->link);
264,16 → 284,16
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1);
 
if( idx1 < 31)
list_prepend(&md->link, &sheap.free[idx1]);
list_prepend(&md->link, &sheap.unmapped[idx1]);
else
{
if( list_empty(&sheap.free[31]))
list_prepend(&md->link, &sheap.free[31]);
if( list_empty(&sheap.unmapped[31]))
list_prepend(&md->link, &sheap.unmapped[31]);
else
{
md_t *tmp = (md_t*)sheap.free[31].next;
md_t *tmp = (md_t*)sheap.unmapped[31].next;
 
while((link_t*)tmp != &sheap.free[31])
while((link_t*)tmp != &sheap.unmapped[31])
{
if(md->base < tmp->base)
break;
283,7 → 303,7
};
};
 
_set_smask(idx1);
_set_savu(idx1);
 
safe_sti(efl);
 
297,13 → 317,167
return md;
}
 
void __fastcall free_small_md(md_t *md)
md_t* __fastcall find_mapped_md(size_t size)
{
eflags_t efl ;
 
md_t *md = NULL;
 
count_t idx0;
u32_t mask;
 
ASSERT((size & 0xFFF) == 0);
 
efl = safe_cli();
 
idx0 = (size>>12) - 1 < 32 ? (size>>12) - 1 : 31;
mask = sheap.av_mapped & ( -1<<idx0 );
 
DBG("small av_mapped %x size %x idx0 %x mask %x\n",sheap.av_mapped, size,
idx0, mask);
 
if(mask)
{
if(idx0 == 31)
{
ASSERT( !list_empty(&sheap.mapped[31]));
 
md_t *tmp = (md_t*)sheap.mapped[31].next;
while((link_t*)tmp != &sheap.mapped[31])
{
if(tmp->size >= size)
{
md = tmp;
break;
};
tmp = (md_t*)tmp->link.next;
};
}
else
{
idx0 = _bsf(mask);
 
ASSERT( !list_empty(&sheap.mapped[idx0]));
 
md = (md_t*)sheap.mapped[idx0].next;
}
};
 
if(md)
{
DBG("remove md %x\n", md);
 
ASSERT(md->state==MD_FREE);
 
list_remove((link_t*)md);
if(list_empty(&sheap.mapped[idx0]))
_reset_savm(idx0);
}
else
{
md_t *lmd;
addr_t frame;
addr_t *pte;
int i;
 
lmd = find_large_md((size+0x3FFFFF)&~0x3FFFFF);
 
DBG("get large md %x\n", lmd);
 
if( !lmd)
{
safe_sti(efl);
return NULL;
};
 
ASSERT(lmd->size != 0);
ASSERT(lmd->base != 0);
ASSERT((lmd->base & 0x3FFFFF) == 0);
ASSERT(lmd->parent == NULL);
 
frame = core_alloc(10); /* FIXME check */
 
lmd->parent = (void*)frame;
 
pte = &((addr_t*)page_tabs)[lmd->base>>12]; /* FIXME remove */
 
for(i = 0; i<1024; i++)
{
*pte++ = frame;
frame+= 4096;
}
 
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */
 
link_initialize(&md->link);
list_initialize(&md->adj);
md->base = lmd->base;
md->size = lmd->size;
md->parent = lmd;
md->state = MD_USED;
};
 
if(md->size > size)
{
count_t idx1;
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */
 
link_initialize(&new_md->link);
list_insert(&new_md->adj, &md->adj);
 
new_md->base = md->base;
new_md->size = size;
new_md->parent = md->parent;
 
md->base+= size;
md->size-= size;
md->state = MD_FREE;
 
idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
 
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1);
 
if( idx1 < 31)
list_prepend(&md->link, &sheap.mapped[idx1]);
else
{
if( list_empty(&sheap.mapped[31]))
list_prepend(&md->link, &sheap.mapped[31]);
else
{
md_t *tmp = (md_t*)sheap.mapped[31].next;
 
while((link_t*)tmp != &sheap.mapped[31])
{
if(md->base < tmp->base)
break;
tmp = (md_t*)tmp->link.next;
}
list_insert(&md->link, &tmp->link);
};
};
 
_set_savm(idx1);
 
md = new_md;
};
 
md->state = MD_USED;
 
safe_sti(efl);
 
return md;
}
 
void __fastcall free_unmapped_md(md_t *md)
{
eflags_t efl ;
md_t *fd;
md_t *bk;
count_t idx;
 
ASSERT(md->parent != NULL);
 
efl = safe_cli();
spinlock_lock(&sheap.lock);
 
317,8 → 491,8
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
 
list_remove((link_t*)fd);
if(list_empty(&sheap.free[idx]))
_reset_smask(idx);
if(list_empty(&sheap.unmapped[idx]))
_reset_savu(idx);
 
md->size+= fd->size;
md->adj.next = fd->adj.next;
330,8 → 504,8
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
 
list_remove((link_t*)bk);
if(list_empty(&sheap.free[idx]))
_reset_smask(idx);
if(list_empty(&sheap.unmapped[idx]))
_reset_savu(idx);
 
bk->size+= md->size;
bk->adj.next = md->adj.next;
345,19 → 519,19
 
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
 
_set_smask(idx);
_set_savu(idx);
 
if( idx < 31)
list_prepend(&md->link, &sheap.free[idx]);
list_prepend(&md->link, &sheap.unmapped[idx]);
else
{
if( list_empty(&sheap.free[31]))
list_prepend(&md->link, &sheap.free[31]);
if( list_empty(&sheap.unmapped[31]))
list_prepend(&md->link, &sheap.unmapped[31]);
else
{
md_t *tmp = (md_t*)sheap.free[31].next;
md_t *tmp = (md_t*)sheap.unmapped[31].next;
 
while((link_t*)tmp != &sheap.free[31])
while((link_t*)tmp != &sheap.unmapped[31])
{
if(md->base < tmp->base)
break;
371,60 → 545,83
 
};
 
void __fastcall free_mapped_md(md_t *md)
{
eflags_t efl ;
md_t *fd;
md_t *bk;
count_t idx;
 
#define page_tabs 0xDF800000
ASSERT(md->parent != NULL);
ASSERT( ((md_t*)(md->parent))->parent != NULL);
 
/*
phismem_t* __fastcall phis_alloc(count_t count)
efl = safe_cli();
spinlock_lock(&sheap.lock);
 
if( !list_empty(&md->adj))
{
phismem_t *phm;
count_t tmp;
phm = (phismem_t*)slab_alloc(phm_slab, 0);
bk = (md_t*)md->adj.prev;
fd = (md_t*)md->adj.next;
 
phm->count = count;
tmp = count;
while(tmp)
if(fd->state == MD_FREE)
{
u32_t order;
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
 
asm volatile ("bsr %0, %1":"=&r"(order):"r"(tmp):"cc");
asm volatile ("btr %0, %1" :"=r"(tmp):"r"(order):"cc");
list_remove((link_t*)fd);
if(list_empty(&sheap.mapped[idx]))
_reset_savm(idx);
 
phm->frames[order] = core_alloc(order);
md->size+= fd->size;
md->adj.next = fd->adj.next;
md->adj.next->prev = (link_t*)md;
slab_free(md_slab, fd);
};
if(bk->state == MD_FREE)
{
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
 
list_remove((link_t*)bk);
if(list_empty(&sheap.mapped[idx]))
_reset_savm(idx);
 
bk->size+= md->size;
bk->adj.next = md->adj.next;
bk->adj.next->prev = (link_t*)bk;
slab_free(md_slab, md);
md = fd;
};
};
 
return phm;
}
md->state = MD_FREE;
 
void map_phm(addr_t base, phismem_t *phm, u32_t mapflags)
{
count_t count;
addr_t *pte;
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
 
count = phm->count;
pte = &((addr_t*)page_tabs)[base>>12];
_set_savm(idx);
 
while(count)
if( idx < 31)
list_prepend(&md->link, &sheap.mapped[idx]);
else
{
u32_t order;
addr_t frame;
count_t size;
if( list_empty(&sheap.mapped[31]))
list_prepend(&md->link, &sheap.mapped[31]);
else
{
md_t *tmp = (md_t*)sheap.mapped[31].next;
 
asm volatile ("bsr %0, %1":"=&r"(order):"r"(count):"cc");
asm volatile ("btr %0, %1" :"=r"(count):"r"(order):"cc");
 
frame = phm->frames[order] | mapflags;
size = (1 << order);
while(size--)
while((link_t*)tmp != &sheap.mapped[31])
{
*pte++ = frame;
frame+= 4096;
if(md->base < tmp->base)
break;
tmp = (md_t*)tmp->link.next;
}
}
list_insert(&md->link, &tmp->link);
};
*/
};
spinlock_unlock(&sheap.lock);
safe_sti(efl);
};
 
 
void * __fastcall mem_alloc(size_t size, u32_t flags)
{
eflags_t efl;
437,37 → 634,42
 
size = (size+4095)&~4095;
 
md = find_small_md(size);
 
if( md )
{
ASSERT(md->state == MD_USED);
 
if( flags & PG_MAP )
{
count_t tmp = size >> 12;
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12];
md = find_mapped_md(size);
 
while(tmp)
{
u32_t order;
addr_t frame;
size_t size;
if( !md )
return NULL;
 
asm volatile ("bsr %1, %0":"=&r"(order):"r"(tmp):"cc");
asm volatile ("btr %1, %0" :"=r"(tmp):"r"(order):"cc");
md_t *lmd = (md_t*)md->parent;
 
frame = core_alloc(order) | flags; /* FIXME check */
ASSERT( lmd != NULL);
ASSERT( lmd->parent != NULL);
 
size = (1 << order);
while(size--)
addr_t frame = (md->base - lmd->base + (addr_t)lmd->parent)|
(flags & 0xFFF);
DBG("frame %x\n", frame);
ASSERT(frame != 0);
 
count_t tmp = size >> 12;
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12];
 
while(tmp--)
{
*pte++ = frame;
frame+= 4096;
};
};
};
}
else
md = find_unmapped_md(size);
 
if( !md )
return NULL;
 
ASSERT(md->parent != NULL);
ASSERT(md->state == MD_USED);
 
 
efl = safe_cli();
spinlock_lock(&sheap.lock);
 
492,8 → 694,6
DBG("allocate: %x size %x\n\n",md->base, size);
return (void*)md->base;
};
return NULL;
};
 
void __fastcall mem_free(void *mem)
{
524,10 → 724,20
 
if( md )
{
md_t *lmd;
 
DBG("\tmd: %x base: %x size: %x\n",md, md->base, md->size);
 
ASSERT(md->state == MD_USED);
 
list_remove((link_t*)md);
 
lmd = (md_t*)md->parent;
 
ASSERT(lmd != 0);
 
if(lmd->parent != 0)
{
count_t tmp = md->size >> 12;
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12];
 
536,17 → 746,17
*pte++ = 0;
asm volatile (
"invlpg (%0)"
:
:"r" (mem) );
::"r" (mem) );
mem+= 4096;
};
list_remove((link_t*)md);
free_small_md( md );
 
free_mapped_md( md );
}
else
{
free_unmapped_md( md );
}
else
DBG("\tERROR: invalid base address: %x\n", mem);
};
 
safe_sti(efl);
};
/kernel/branches/kolibri_pe/core/heap.inc
149,30 → 149,31
test al, DONT_FREE_BLOCK
jnz .cantfree
 
push edi
 
and eax, not 4095
mov ecx, eax
mov edi, eax
or al, FREE_BLOCK
mov [page_tabs+(esi-1)*4], eax
sub ecx, 4096
mov ebx, ecx
shr ecx, 12
sub edi, 4096
mov ebx, edi
shr edi, 12
jz .released
.release:
xor eax, eax
xchg eax, [page_tabs+esi*4]
test al, 1
xor ecx, ecx
xchg ecx, [page_tabs+esi*4]
test cl, 1
jz @F
call free_page
 
call @core_free@4
mov eax, esi
shl eax, 12
invlpg [eax]
@@:
inc esi
dec ecx
dec edi
jnz .release
.released:
push edi
 
mov edx, [current_slot]
mov esi, dword [edx+APPDATA.heap_base]
mov edi, dword [edx+APPDATA.heap_top]
276,20 → 277,28
cmp edx, ebx
jb .realloc_add
; release part of allocated memory
 
push ecx
.loop:
cmp edx, ebx
jz .release_done
dec edx
xor eax, eax
xchg eax, [page_tabs+edx*4]
xor ecx, ecx
xchg ecx, [page_tabs+edx*4]
test al, 1
jz .loop
call free_page
 
push edx
call @core_free@4
pop edx
mov eax, edx
shl eax, 12
invlpg [eax]
jmp .loop
.release_done:
 
pop ecx
 
sub ebx, ecx
cmp ebx, 1
jnz .nofreeall
/kernel/branches/kolibri_pe/core/memory.inc
29,12 → 29,6
 
ret
 
 
align 4
free_page:
 
ret
 
proc map_io_mem stdcall, base:dword, size:dword, flags:dword
 
push edi
246,18 → 240,16
shr edi, 12
shr esi, 12
@@:
mov eax, [app_page_tabs+edi*4]
test eax, 1
mov ecx, [app_page_tabs+edi*4]
test ecx, 1
jz .next
mov dword [app_page_tabs+edi*4], 2
mov ebx, edi
shl ebx, 12
push eax
invlpg [ebx]
pop eax
call free_page
 
.next: add edi, 1
call @core_free@4
.next:
add edi, 1
cmp edi, esi
jb @B
 
/kernel/branches/kolibri_pe/core/mm.c
549,7 → 549,7
return (v << FRAME_WIDTH);
}
 
addr_t __fastcall core_alloc(u32_t order) //export
addr_t __fastcall core_alloc(u32_t order)
{
eflags_t efl;
pfn_t v;
559,11 → 559,14
v = zone_frame_alloc(&z_core, order);
spinlock_unlock(&z_core.lock);
safe_sti(efl);
DBG("core alloc: %x, size %x\n", v << FRAME_WIDTH, (1<<order)<<12);
 
DBG("core alloc: %x, size %x remain %d\n", v << FRAME_WIDTH,
((1<<order)<<12), z_core.free_count);
 
return (v << FRAME_WIDTH);
};
 
void __fastcall core_free(addr_t frame) //export
void __fastcall core_free(addr_t frame)
{
eflags_t efl;
 
572,6 → 575,9
zone_free(&z_core, frame>>12);
spinlock_unlock(&z_core.lock);
safe_sti(efl);
 
DBG("core free %x remain %d\n", frame, z_core.free_count);
 
}
 
addr_t alloc_page() //obsolete
587,7 → 593,7
spinlock_unlock(&z_core.lock);
safe_sti(efl);
 
DBG("alloc_page: %x\n", v << FRAME_WIDTH);
DBG("alloc_page: %x remain %d\n", v << FRAME_WIDTH, z_core.free_count);
 
restore_edx(edx);
return (v << FRAME_WIDTH);
605,7 → 611,8
 
ASSERT(frame->refcount);
 
if (!--frame->refcount) {
if (!--frame->refcount)
{
buddy_system_free(zone, &frame->buddy_link);
 
/* Update zone information. */
/kernel/branches/kolibri_pe/core/sys32.inc
500,7 → 500,7
mov eax, [.slot]
shl eax, 8
mov eax,[SLOT_BASE+eax+APPDATA.dir_table]
; stdcall destroy_app_space, eax
stdcall destroy_app_space, eax
 
mov esi, [.slot]
cmp [fpu_owner],esi ; if user fpu last -> fpu user = 1
629,23 → 629,23
shl edi, 8
add edi,SLOT_BASE
 
mov eax,[edi+APPDATA.pl0_stack]
sub eax, OS_BASE
call free_page
mov ecx,[edi+APPDATA.pl0_stack]
sub ecx, OS_BASE
call @core_free@4
 
mov eax,[edi+APPDATA.cur_dir]
sub eax, OS_BASE
call free_page
mov ecx,[edi+APPDATA.cur_dir]
sub ecx, OS_BASE
call @core_free@4
 
mov eax, [edi+APPDATA.io_map]
cmp eax, (tss._io_map_0-OS_BASE+PG_MAP)
mov ecx, [edi+APPDATA.io_map]
cmp ecx, (tss._io_map_0-OS_BASE+PG_MAP)
je @F
call free_page
call @core_free@4
@@:
mov eax, [edi+APPDATA.io_map+4]
cmp eax, (tss._io_map_1-OS_BASE+PG_MAP)
mov ecx, [edi+APPDATA.io_map+4]
cmp ecx, (tss._io_map_1-OS_BASE+PG_MAP)
je @F
call free_page
call @core_free@4
@@:
mov eax, 0x20202020
stosd
/kernel/branches/kolibri_pe/core/taskman.inc
200,8 → 200,6
mov eax,[hdr_mem]
mov [ebx+APPDATA.mem_size],eax
 
if GREEDY_KERNEL
else
mov ecx, [hdr_mem]
mov edi, [file_size]
add edi, 4095
213,11 → 211,9
cld
rep stosb
@@:
end if
mov ecx, [file_base]
call @mem_free@4
 
; release only virtual space, not phisical memory
 
stdcall free_kernel_space, [file_base] ;
lea eax, [hdr_cmdline]
lea ebx, [cmdline]
lea ecx, [filename]
375,14 → 371,6
shr ecx, 12
mov [img_pages], ecx
 
; if GREEDY_KERNEL
; lea eax, [ecx+ebx+2] ;only image size
; else
; lea eax, [eax+ebx+2] ;all requested memory
; end if
; cmp eax, [pg_data.pages_free]
; ja .fail
 
call _alloc_page
test eax, eax
mov [dir_addr], eax
434,42 → 422,25
xor eax, eax
rep stosd
 
mov ecx, [img_pages]
mov ebx, PG_UW
mov esi, [img_base]
shr esi, 10
add esi, page_tabs
xor edx, edx
mov edi, page_tabs
.remap:
lodsd
or eax, ebx ; force user level r/w access
stosd
add edx, 0x1000
dec [app_pages]
dec ecx
jnz .remap
 
mov ecx, [app_pages]
test ecx, ecx
jz .done
 
if GREEDY_KERNEL
mov eax, 0x02
rep stosd
else
 
xor ebx, ebx
.alloc:
call _alloc_page
xor ecx, ecx
call @core_alloc@4
test eax, eax
jz .fail
 
stdcall map_page,edx,eax,dword PG_UW
add edx, 0x1000
stdcall map_page,ebx,eax,dword PG_UW
add ebx, 0x1000
dec [app_pages]
jnz .alloc
end if
 
mov ecx, [img_size] ; FIXME remap md
mov esi, [img_base]
xor edi, edi
 
rep movsb
 
.done:
dec [pg_data.pg_mutex]
mov eax, [dir_addr]
495,24 → 466,26
align 4
proc destroy_page_table stdcall, pg_tab:dword
 
push ebx
push esi
 
mov esi, [pg_tab]
mov ecx, 1024
mov ebx, 1024
.free:
mov eax, [esi]
test eax, 1
mov ecx, [esi]
test ecx, 1
jz .next
 
test eax, 1 shl 9
test ecx, 1 shl 9
jnz .next ;skip shared pages
 
call free_page
call @core_free@4
.next:
add esi, 4
dec ecx
dec ebx
jnz .free
pop esi
pop ebx
ret
endp
 
563,15 → 536,15
 
stdcall destroy_page_table, eax
 
mov eax, [esi]
call free_page
mov ecx, [esi]
call @core_free@4
.next:
add esi, 4
dec edi
jnz .destroy
 
mov eax, [pg_dir]
call free_page
mov ecx, [pg_dir]
call @core_free@4
.exit:
dec [pg_data.pg_mutex]
ret
744,7 → 717,6
mov eax, [slot]
shl eax,8
mov ebx, [offset]
; add ebx, new_app_base
push ecx
stdcall map_memEx, [proc_mem_map],\
[SLOT_BASE+eax+0xB8],\
/kernel/branches/kolibri_pe/include/core.h
30,6 → 30,13
 
# define DBG(format,...)
 
# define PANIC(expr) \
if (!(expr)) { \
panic_printf("Kernel panic in %s() at %s:%u: " \
"assertion failed (%s)",__func__ ,__FILE__,__LINE__, \
#expr); \
};
 
#endif
 
 
/kernel/branches/kolibri_pe/include/mm.h
71,4 → 71,4
void __fastcall frame_set_parent(pfn_t pfn, void *data);
void* __fastcall frame_get_parent(pfn_t pfn);
 
void* __fastcall heap_alloc(size_t size, u32_t flags) ;
void* __fastcall mem_alloc(size_t size, u32_t flags) ;
/kernel/branches/kolibri_pe/kernel.asm
146,7 → 146,6
 
extrn @init_heap@8
extrn @find_large_md@4
extrn @find_small_md@4
extrn @phis_alloc@4
 
extrn @mem_alloc@8
2397,11 → 2396,6
add ecx, 0xFFF
shr ecx, 12
.z:
mov eax, [page_tabs+ebx*4]
test al, 1
jz @f
call free_page
@@:
mov eax, [page_tabs+esi*4]
or al, PG_UW
mov [page_tabs+ebx*4], eax