Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 885 → Rev 886

/kernel/branches/kolibri_pe/core/dll.inc
67,7 → 67,6
.err:
xor eax, eax
ret
 
endp
 
align 4
179,6 → 178,7
cmp dword [esp + 32], 8
mov al, 0x20
jb @f
 
out 0xa0, al
@@:
out 0x20, al
195,6 → 195,7
mov ebx,[current_slot]
test dword [ebx+APPDATA.event_mask],EVENT_NOTIFY
jz @f
 
and dword [ebx+APPDATA.event_mask], not EVENT_NOTIFY
mov edi, [p_ev]
mov dword [edi], EV_INTR
465,6 → 466,7
add [pSym], 18
dec [count]
jnz @b
 
xor eax, eax
ret
.ok:
474,11 → 476,10
endp
 
align 4
proc get_curr_task
get_curr_task:
mov eax,[CURRENT_TASK]
shl eax, 8
ret
endp
 
align 4
proc get_fileinfo stdcall, file_name:dword, info:dword
612,7 → 613,10
 
mov [file2], eax
stdcall unpack, [file], eax
stdcall kernel_free, [file]
 
mov ecx, [file]
call @mem_free@4
 
mov eax, [file2]
mov ebx, [file_size]
.exit:
844,6 → 848,7
call @mem_alloc@8
test eax, eax
jz .fail
 
mov [img_base], eax
 
mov edi, eax
1015,7 → 1020,6
jmp .next
.copy:
add esi, edx
; add edi, new_app_base
mov ecx, [eax+CFS.SizeOfRawData]
cld
rep movsb
/kernel/branches/kolibri_pe/core/exports.inc
112,7 → 112,7
dd szReleasePages , release_pages
 
dd szFreeKernelSpace , free_kernel_space ;stdcall
dd szHeapAlloc , @heap_alloc@8 ;fastcall
dd szHeapAlloc , @mem_alloc@8 ;fastcall
dd szKernelFree , kernel_free ;stdcall
dd szUserAlloc , user_alloc ;stdcall
dd szUserFree , user_free ;stdcall
/kernel/branches/kolibri_pe/core/heap.c
13,33 → 13,42
addr_t base;
size_t size;
void* parent;
u32_t reserved;
u32_t state;
}md_t;
 
#define MD_FREE 1
#define MD_USED 2
 
typedef struct {
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
 
u32_t availmask;
link_t list[32];
link_t free[32];
 
link_t used;
}heap_t;
 
 
slab_cache_t *md_slab;
slab_cache_t *phm_slab;
 
 
heap_t lheap;
heap_t sheap;
 
 
 
static inline void _set_lmask(count_t idx)
{ asm volatile ("bts DWORD PTR [_lheap], %0"::"r"(idx):"cc"); }
{ asm volatile ("bts %0, _lheap"::"r"(idx):"cc"); }
 
static inline void _reset_lmask(count_t idx)
{ asm volatile ("btr DWORD PTR [_lheap], %0"::"r"(idx):"cc"); }
{ asm volatile ("btr %0, _lheap"::"r"(idx):"cc"); }
 
static inline void _set_smask(count_t idx)
{ asm volatile ("bts DWORD PTR [_sheap], %0"::"r"(idx):"cc"); }
{ asm volatile ("bts %0, _sheap"::"r"(idx):"cc"); }
 
static inline void _reset_smask(count_t idx)
{ asm volatile ("btr DWORD PTR [_sheap], %0"::"r"(idx):"cc"); }
{ asm volatile ("btr %0, _sheap"::"r"(idx):"cc"); }
 
 
int __fastcall init_heap(addr_t base, size_t size)
54,10 → 63,14
 
for (i = 0; i < 32; i++)
{
list_initialize(&lheap.list[i]);
list_initialize(&sheap.list[i]);
list_initialize(&lheap.free[i]);
list_initialize(&sheap.free[i]);
};
 
list_initialize(&lheap.used);
list_initialize(&sheap.used);
 
 
md_slab = slab_cache_create(sizeof(md_t), 32,NULL,NULL,SLAB_CACHE_MAGDEFERRED);
 
md = (md_t*)slab_alloc(md_slab,0);
66,9 → 79,9
md->base = base;
md->size = size;
md->parent = NULL;
md->reserved = 0;
md->state = MD_FREE;
 
list_prepend(&md->link, &lheap.list[31]);
list_prepend(&md->link, &lheap.free[31]);
lheap.availmask = 0x80000000;
sheap.availmask = 0x00000000;
 
93,8 → 106,8
{
if(idx0 == 31)
{
md_t *tmp = (md_t*)lheap.list[31].next;
while((link_t*)tmp != &lheap.list[31])
md_t *tmp = (md_t*)lheap.free[31].next;
while((link_t*)tmp != &lheap.free[31])
{
if(tmp->size >= size)
{
110,22 → 123,24
{
idx0 = _bsf(mask);
 
ASSERT( !list_empty(&lheap.list[idx0]))
ASSERT( !list_empty(&lheap.free[idx0]))
 
md = (md_t*)lheap.list[idx0].next;
md = (md_t*)lheap.free[idx0].next;
};
}
else
return NULL;
 
ASSERT(md->state == MD_FREE);
 
list_remove((link_t*)md);
if(list_empty(&lheap.list[idx0]))
if(list_empty(&lheap.free[idx0]))
_reset_lmask(idx0);
 
if(md->size > size)
{
count_t idx1;
md_t *new_md = (md_t*)slab_alloc(md_slab,0);
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */
 
link_initialize(&new_md->link);
list_insert(&new_md->adj, &md->adj);
132,6 → 147,7
 
new_md->base = md->base;
new_md->size = size;
new_md->state = MD_USED;
 
md->base+= size;
md->size-= size;
138,11 → 154,13
 
idx1 = (md->size>>22) - 1 < 32 ? (md->size>>22) - 1 : 31;
 
list_prepend(&md->link, &lheap.list[idx1]);
list_prepend(&md->link, &lheap.free[idx1]);
_set_lmask(idx1);
 
return new_md;
}
};
md->state = MD_USED;
 
return md;
}
 
168,8 → 186,10
{
if(idx0 == 31)
{
md_t *tmp = (md_t*)sheap.list[31].next;
while((link_t*)tmp != &sheap.list[31])
ASSERT( !list_empty(&sheap.free[31]));
 
md_t *tmp = (md_t*)sheap.free[31].next;
while((link_t*)tmp != &sheap.free[31])
{
if(tmp->size >= size)
{
182,8 → 202,10
else
{
idx0 = _bsf(mask);
ASSERT( !list_empty(&sheap.list[idx0]))
md = (md_t*)sheap.list[idx0].next;
 
ASSERT( !list_empty(&sheap.free[idx0]));
 
md = (md_t*)sheap.free[idx0].next;
}
};
 
191,8 → 213,10
{
DBG("remove md %x\n", md);
 
ASSERT(md->state==MD_FREE);
 
list_remove((link_t*)md);
if(list_empty(&sheap.list[idx0]))
if(list_empty(&sheap.free[idx0]))
_reset_smask(idx0);
}
else
208,19 → 232,20
return NULL;
};
 
md = (md_t*)slab_alloc(md_slab,0);
md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */
 
link_initialize(&md->link);
list_initialize(&md->adj);
md->base = lmd->base;
md->size = lmd->size;
md->parent = lmd;
md->reserved = 0;
md->state = MD_USED;
};
 
if(md->size > size)
{
count_t idx1;
md_t *new_md = (md_t*)slab_alloc(md_slab,0);
md_t *new_md = (md_t*)slab_alloc(md_slab,0); /* FIXME check */
 
link_initialize(&new_md->link);
list_insert(&new_md->adj, &md->adj);
228,10 → 253,11
new_md->base = md->base;
new_md->size = size;
new_md->parent = md->parent;
new_md->reserved = 0;
new_md->state = MD_USED;
 
md->base+= size;
md->size-= size;
md->state = MD_FREE;
 
idx1 = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
 
238,16 → 264,16
DBG("insert md %x, base %x size %x idx %x\n", md,md->base, md->size,idx1);
 
if( idx1 < 31)
list_prepend(&md->link, &sheap.list[idx1]);
list_prepend(&md->link, &sheap.free[idx1]);
else
{
if( list_empty(&sheap.list[31]))
list_prepend(&md->link, &sheap.list[31]);
if( list_empty(&sheap.free[31]))
list_prepend(&md->link, &sheap.free[31]);
else
{
md_t *tmp = (md_t*)sheap.list[31].next;
md_t *tmp = (md_t*)sheap.free[31].next;
 
while((link_t*)tmp != &sheap.list[31])
while((link_t*)tmp != &sheap.free[31])
{
if(md->base < tmp->base)
break;
262,11 → 288,93
safe_sti(efl);
 
return new_md;
}
};
 
md->state = MD_USED;
 
safe_sti(efl);
 
return md;
}
 
void __fastcall free_small_md(md_t *md)
{
eflags_t efl ;
md_t *fd;
md_t *bk;
count_t idx;
 
efl = safe_cli();
spinlock_lock(&sheap.lock);
 
if( !list_empty(&md->adj))
{
bk = (md_t*)md->adj.prev;
fd = (md_t*)md->adj.next;
 
if(fd->state == MD_FREE)
{
idx = (fd->size>>12) - 1 < 32 ? (fd->size>>12) - 1 : 31;
 
list_remove((link_t*)fd);
if(list_empty(&sheap.free[idx]))
_reset_smask(idx);
 
md->size+= fd->size;
md->adj.next = fd->adj.next;
md->adj.next->prev = (link_t*)md;
slab_free(md_slab, fd);
};
if(bk->state == MD_FREE)
{
idx = (bk->size>>12) - 1 < 32 ? (bk->size>>12) - 1 : 31;
 
list_remove((link_t*)bk);
if(list_empty(&sheap.free[idx]))
_reset_smask(idx);
 
bk->size+= md->size;
bk->adj.next = md->adj.next;
bk->adj.next->prev = (link_t*)bk;
slab_free(md_slab, md);
md = fd;
};
};
 
md->state = MD_FREE;
 
idx = (md->size>>12) - 1 < 32 ? (md->size>>12) - 1 : 31;
 
_set_smask(idx);
 
if( idx < 31)
list_prepend(&md->link, &sheap.free[idx]);
else
{
if( list_empty(&sheap.free[31]))
list_prepend(&md->link, &sheap.free[31]);
else
{
md_t *tmp = (md_t*)sheap.free[31].next;
 
while((link_t*)tmp != &sheap.free[31])
{
if(md->base < tmp->base)
break;
tmp = (md_t*)tmp->link.next;
}
list_insert(&md->link, &tmp->link);
};
};
spinlock_unlock(&sheap.lock);
safe_sti(efl);
 
};
 
 
#define page_tabs 0xDF800000
 
/*
phismem_t* __fastcall phis_alloc(count_t count)
{
phismem_t *phm;
289,8 → 397,6
return phm;
}
 
#define page_tabs 0xDF800000
 
void map_phm(addr_t base, phismem_t *phm, u32_t mapflags)
{
count_t count;
317,28 → 423,18
}
}
};
*/
 
void* __fastcall mem_alloc(size_t size, u32_t flags)
{
eflags_t efl;
 
md_t *md;
phismem_t *phm;
 
size = (size+4095)&~4095;
DBG("\nmem_alloc: %x bytes\n", size);
 
md = find_small_md(size);
if( md )
{
phm = phis_alloc(size>>12);
map_phm(md->base , phm, flags);
return (void*)md->base;
}
return NULL;
};
ASSERT(size != 0);
 
void * __fastcall heap_alloc(size_t size, u32_t flags)
{
md_t *md;
 
size = (size+4095)&~4095;
 
md = find_small_md(size);
345,6 → 441,8
 
if( md )
{
ASSERT(md->state == MD_USED);
 
if( flags & PG_MAP )
{
count_t tmp = size >> 12;
356,10 → 454,10
addr_t frame;
size_t size;
 
asm volatile ("bsr %0, %1":"=&r"(order):"r"(tmp):"cc");
asm volatile ("btr %0, %1" :"=r"(tmp):"r"(order):"cc");
asm volatile ("bsr %1, %0":"=&r"(order):"r"(tmp):"cc");
asm volatile ("btr %1, %0" :"=r"(tmp):"r"(order):"cc");
 
frame = core_alloc(order) | flags;
frame = core_alloc(order) | flags; /* FIXME check */
 
size = (1 << order);
while(size--)
369,10 → 467,86
};
};
};
DBG("alloc_heap: %x size %x\n\n",md->base, size);
 
efl = safe_cli();
spinlock_lock(&sheap.lock);
 
if( list_empty(&sheap.used) )
list_prepend(&md->link, &sheap.used);
else
{
md_t *tmp = (md_t*)sheap.used.next;
 
while((link_t*)tmp != &sheap.used)
{
if(md->base < tmp->base)
break;
tmp = (md_t*)tmp->link.next;
}
list_insert(&md->link, &tmp->link);
};
 
spinlock_unlock(&sheap.lock);
safe_sti(efl);
 
DBG("allocate: %x size %x\n\n",md->base, size);
return (void*)md->base;
};
return NULL;
};
 
void __fastcall mem_free(void *mem)
{
eflags_t efl;
 
md_t *tmp;
md_t *md = NULL;
 
DBG("mem_free: %x\n",mem);
 
ASSERT( mem != 0 );
ASSERT( ((addr_t)mem & 0xFFF) == 0 );
ASSERT( ! list_empty(&sheap.used));
 
efl = safe_cli();
 
tmp = (md_t*)sheap.used.next;
 
while((link_t*)tmp != &sheap.used)
{
if( tmp->base == (addr_t)mem )
{
md = tmp;
break;
};
tmp = (md_t*)tmp->link.next;
}
 
if( md )
{
DBG("\tmd: %x base: %x size: %x\n",md, md->base, md->size);
 
ASSERT(md->state == MD_USED);
 
count_t tmp = md->size >> 12;
addr_t *pte = &((addr_t*)page_tabs)[md->base>>12];
 
while(tmp--)
{
*pte++ = 0;
asm volatile (
"invlpg (%0)"
:
:"r" (mem) );
mem+= 4096;
};
list_remove((link_t*)md);
free_small_md( md );
}
else
{
DBG("\tERROR: invalid base address: %x\n", mem);
};
 
safe_sti(efl);
};
/kernel/branches/kolibri_pe/core/mm.c
16,7 → 16,7
static inline u32_t save_edx(void)
{
u32_t val;
asm volatile ("mov %0, edx":"=r"(val));
asm volatile ("movl %%edx, %0":"=r"(val));
return val;
};
 
524,9 → 524,9
{
int n;
asm volatile (
"xor eax, eax \n\t"
"bsr eax, edx \n\t"
"inc eax"
"xorl %eax, %eax \n\t"
"bsr %edx, %eax \n\t"
"incl %eax"
:"=a" (n)
:"d"(arg)
);
/kernel/branches/kolibri_pe/core/slab.c
20,9 → 20,7
 
static slab_cache_t * slab_cache_alloc();
 
void slab_free(slab_cache_t *cache, void *obj);
 
 
/**
* Allocate frames for slab space and initialize
*
313,22 → 311,22
/** Return object to cache, use slab if known */
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
{
// ipl_t ipl;
eflags_t efl;
 
// ipl = interrupts_disable();
efl = safe_cli();
 
// if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|| magazine_obj_put(cache, obj)) {
// || magazine_obj_put(cache, obj)) {
 
slab_obj_destroy(cache, obj, slab);
 
// }
// interrupts_restore(ipl);
// atomic_dec(&cache->allocated_objs);
safe_sti(efl);
atomic_dec(&cache->allocated_objs);
}
 
/** Return slab object to cache */
void slab_free(slab_cache_t *cache, void *obj)
void __fastcall slab_free(slab_cache_t *cache, void *obj)
{
_slab_free(cache, obj, NULL);
}
/kernel/branches/kolibri_pe/core/taskman.inc
187,7 → 187,6
loop .copy_process_name_loop
.copy_process_name_done:
 
 
mov ebx, cr3
mov [save_cr3], ebx
 
218,7 → 217,7
 
; release only virtual space, not phisical memory
 
stdcall free_kernel_space, [file_base]
stdcall free_kernel_space, [file_base] ;
lea eax, [hdr_cmdline]
lea ebx, [cmdline]
lea ecx, [filename]
415,7 → 414,6
mov eax, edi
call set_cr3
 
 
mov edx, [app_tabs]
mov edi, master_tab
@@:
504,8 → 502,10
mov eax, [esi]
test eax, 1
jz .next
 
test eax, 1 shl 9
jnz .next ;skip shared pages
 
call free_page
.next:
add esi, 4
530,9 → 530,11
shl ecx,5
cmp byte [CURRENT_TASK+ecx+0xa],9 ;if process running?
jz @f ;skip empty slots
 
shl ecx,3
cmp [SLOT_BASE+ecx+0xB8],ebx ;compare page directory addresses
jnz @f
 
inc edx ;thread found
@@:
inc eax
858,6 → 860,7
.wait_lock:
cmp [application_table_status],0
je .get_lock
 
call change_task
jmp .wait_lock
 
930,6 → 933,7
.do_wait:
cmp dword [ebx],0
je .get_lock
 
call change_task
jmp .do_wait
.get_lock:
937,6 → 941,7
xchg eax, [ebx]
test eax, eax
jnz .do_wait
 
pop ebx
pop eax
ret
1101,13 → 1106,12
; set if debuggee
test byte [flags], 1
jz .no_debug
 
inc ecx ; process state - suspended
mov eax,[CURRENT_TASK]
mov [SLOT_BASE+ebx*8+APPDATA.debugger_slot],eax
.no_debug:
mov [CURRENT_TASK+ebx+TASKDATA.state], cl
;mov esi,new_process_running
;call sys_msg_board_str ;output information about succefull startup
DEBUGF 1,"%s",new_process_running
ret
endp
/kernel/branches/kolibri_pe/include/atomic.h
36,17 → 36,17
 
static inline void atomic_inc(atomic_t *val) {
#ifdef USE_SMP
asm volatile ("lock inc %0\n" : "+m" (val->count));
asm volatile ("lock incl %0\n" : "+m" (val->count));
#else
asm volatile ("inc %0\n" : "+m" (val->count));
asm volatile ("incl %0\n" : "+m" (val->count));
#endif /* USE_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef USE_SMP
asm volatile ("lock dec %0\n" : "+m" (val->count));
asm volatile ("lock decl %0\n" : "+m" (val->count));
#else
asm volatile ("dec %0\n" : "+m" (val->count));
asm volatile ("decl %0\n" : "+m" (val->count));
#endif /* USE_SMP */
}
 
98,22 → 98,20
u32_t tmp;
 
// preemption_disable();
 
asm volatile (
"0:\n"
"pause\n\t" /* Pentium 4's HT love this instruction */
"mov %1, [%0]\n\t"
"test %1, %1\n\t"
"jnz 0b\n\t" /* lightweight looping on locked spinlock */
"pause\n" /* Pentium 4's HT love this instruction */
"mov %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n" /* lightweight looping on locked spinlock */
 
"inc %1\n\t" /* now use the atomic operation */
"xchg [%0], %1\n\t"
"test %1, %1\n\t"
"jnz 0b\n\t"
: "+m" (val->count), "=r"(tmp)
"incl %1\n" /* now use the atomic operation */
"xchgl %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n"
: "+m" (val->count), "=&r"(tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
*/
// CS_ENTER_BARRIER();
}
 
/kernel/branches/kolibri_pe/include/core.h
37,8 → 37,8
{
eflags_t tmp;
asm volatile (
"pushf\n\t"
"pop %0\n\t"
"pushfl\n\t"
"popl %0\n\t"
"cli\n"
: "=r" (tmp)
);
48,8 → 48,8
static inline void safe_sti(eflags_t efl)
{
asm volatile (
"push %0\n\t"
"popf\n"
"pushl %0\n\t"
"popfl\n"
: : "r" (efl)
);
}
57,8 → 57,8
static inline count_t fnzb(u32_t arg)
{
count_t n;
asm volatile ("xor %0, %0 \n\t"
"bsr %0, %1"
asm volatile ("xorl %0, %0 \n\t"
"bsr %1, %0"
:"=&r" (n)
:"r"(arg)
);
68,8 → 68,8
static inline count_t _bsf(u32_t arg)
{
count_t n;
asm volatile ("xor %0, %0 \n\t"
"bsf %0, %1"
asm volatile ("xorl %0, %0 \n\t"
"bsf %1, %0"
:"=&r" (n)
:"r"(arg)
);
/kernel/branches/kolibri_pe/include/slab.h
78,3 → 78,5
int flags);
 
void* __fastcall slab_alloc(slab_cache_t *cache, int flags);
void __fastcall slab_free(slab_cache_t *cache, void *obj);
 
/kernel/branches/kolibri_pe/kernel.asm
147,10 → 147,10
extrn @find_large_md@4
extrn @find_small_md@4
extrn @phis_alloc@4
 
extrn @mem_alloc@8
extrn @mem_free@4
 
extrn @heap_alloc@8
 
extrn _slab_cache_init
extrn _alloc_page
 
159,9 → 159,6
extrn _bx_from_load
 
 
@mem_alloc@8 equ @heap_alloc@8
 
 
section '.flat' code readable align 4096
 
use32
/kernel/branches/kolibri_pe/makefile
6,7 → 6,7
 
DEFS = -DUSE_SMP -DCONFIG_DEBUG
 
CFLAGS = -c -O2 -I $(INCLUDE) -fomit-frame-pointer -fno-builtin-printf -masm=intel
CFLAGS = -c -O2 -I $(INCLUDE) -fomit-frame-pointer -fno-builtin-printf
LDFLAGS = -shared -s -Map kernel.map --image-base 0x100000 --file-alignment 32
 
KERNEL_SRC:= \