/kernel/branches/Kolibri-acpi/core/apic.inc |
---|
1,11 → 1,13 |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
;; ;; |
;; Copyright (C) KolibriOS team 2004-2012. All rights reserved. ;; |
;; Copyright (C) KolibriOS team 2004-2014. All rights reserved. ;; |
;; Distributed under terms of the GNU General Public License ;; |
;; ;; |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
$Revision: 4850 $ |
iglobal |
IRQ_COUNT dd 24 |
endg |
/kernel/branches/Kolibri-acpi/core/clipboard.inc |
---|
1,3 → 1,13 |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
;; ;; |
;; Copyright (C) KolibriOS team 2013-2014. All rights reserved. ;; |
;; Distributed under terms of the GNU General Public License ;; |
;; ;; |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
$Revision: 4850 $ |
;------------------------------------------------------------------------------ |
align 4 |
sys_clipboard: |
/kernel/branches/Kolibri-acpi/core/conf_lib-sp.inc |
---|
1,3 → 1,13 |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
;; ;; |
;; Copyright (C) KolibriOS team 2013-2014. All rights reserved. ;; |
;; Distributed under terms of the GNU General Public License ;; |
;; ;; |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
$Revision: 4850 $ |
; Éste archivo debe ser editado con codificación CP866 |
ugui_mouse_speed cp850 'velocidad del ratón',0 |
/kernel/branches/Kolibri-acpi/core/debug.inc |
---|
136,9 → 136,18 |
; ecx=pid |
; edx=sizeof(CONTEXT) |
; esi->CONTEXT |
; destroys eax,ecx,edx,esi,edi |
cmp edx, 28h |
jnz .ret |
; destroys eax,ebx,ecx,edx,esi,edi |
xor ebx, ebx ; 0 - get only gp regs |
cmp edx, 40 |
je .std_ctx |
cmp edx, 48+288 |
jne .ret |
inc ebx ; 1 - get sse context |
; TODO legacy 32-bit FPU/MMX context |
.std_ctx: |
; push ecx |
; mov ecx, esi |
call check_region |
147,8 → 156,15 |
jnz .ret |
call get_debuggee_slot |
jc .ret |
shr eax, 5 |
cmp eax, [fpu_owner] |
jne @f |
inc bh ; set swap context flag |
@@: |
shl eax, 8 |
mov edi, esi |
mov eax, [eax*8+SLOT_BASE+APPDATA.pl0_stack] |
mov eax, [eax+SLOT_BASE+APPDATA.pl0_stack] |
lea esi, [eax+RING0_STACK_SIZE] |
.ring0: |
178,6 → 194,29 |
mov [edi+4], eax |
lodsd ;esp |
mov [edi+18h], eax |
dec bl |
js .ret |
dec bl |
jns .ret |
test bh, bh ; check swap flag |
jz @F |
ffree st0 ; swap context |
@@: |
add esi, 4 ;top of ring0 stack |
;fpu/sse context saved here |
add edi, 40 |
mov eax, 1 ;sse context |
stosd |
xor eax, eax ;reserved dword |
stosd |
mov ecx, 288/4 |
rep movsd ;copy sse context |
.ret: |
sti |
ret |
/kernel/branches/Kolibri-acpi/core/dll.inc |
---|
123,13 → 123,13 |
stdcall strncmp, edx, [sz_name], 16 |
test eax, eax |
je .ok |
mov eax, edx |
je .nothing |
mov edx, [edx+SRV.fd] |
jmp @B |
.not_load: |
mov eax, [sz_name] |
; Try to load .dll driver first. If not, fallback to .obj. |
push edi |
sub esp, 36 |
mov edi, esp |
150,12 → 150,6 |
stdcall load_pe_driver, edi, 0 |
add esp, 36 |
pop edi |
test eax, eax |
jnz .nothing |
pop ebp |
jmp load_driver |
.ok: |
mov eax, edx |
.nothing: |
ret |
endp |
794,177 → 788,6 |
ret |
endp |
align 4 |
proc load_driver stdcall, driver_name:dword |
locals |
coff dd ? |
sym dd ? |
strings dd ? |
img_size dd ? |
img_base dd ? |
start dd ? |
file_name rb 13+16+4+1 ; '/sys/drivers/<up-to-16-chars>.obj' |
endl |
lea edx, [file_name] |
mov dword [edx], '/sys' |
mov dword [edx+4], '/dri' |
mov dword [edx+8], 'vers' |
mov byte [edx+12], '/' |
mov esi, [driver_name] |
.redo: |
lea edx, [file_name] |
lea edi, [edx+13] |
mov ecx, 16 |
@@: |
lodsb |
test al, al |
jz @f |
stosb |
loop @b |
@@: |
mov dword [edi], '.obj' |
mov byte [edi+4], 0 |
stdcall load_file, edx |
test eax, eax |
jz .exit |
mov [coff], eax |
movzx ecx, [eax+COFF_HEADER.nSections] |
xor ebx, ebx |
lea edx, [eax+20] |
@@: |
add ebx, [edx+COFF_SECTION.SizeOfRawData] |
add ebx, 15 |
and ebx, not 15 |
add edx, sizeof.COFF_SECTION |
dec ecx |
jnz @B |
mov [img_size], ebx |
stdcall kernel_alloc, ebx |
test eax, eax |
jz .fail |
mov [img_base], eax |
mov edi, eax |
xor eax, eax |
mov ecx, [img_size] |
add ecx, 4095 |
and ecx, not 4095 |
shr ecx, 2 |
cld |
rep stosd |
mov edx, [coff] |
movzx ebx, [edx+COFF_HEADER.nSections] |
mov edi, [img_base] |
lea eax, [edx+20] |
@@: |
mov [eax+COFF_SECTION.VirtualAddress], edi |
mov esi, [eax+COFF_SECTION.PtrRawData] |
test esi, esi |
jnz .copy |
add edi, [eax+COFF_SECTION.SizeOfRawData] |
jmp .next |
.copy: |
add esi, edx |
mov ecx, [eax+COFF_SECTION.SizeOfRawData] |
cld |
rep movsb |
.next: |
add edi, 15 |
and edi, not 15 |
add eax, sizeof.COFF_SECTION |
dec ebx |
jnz @B |
mov ebx, [edx+COFF_HEADER.pSymTable] |
add ebx, edx |
mov [sym], ebx |
mov ecx, [edx+COFF_HEADER.nSymbols] |
add ecx, ecx |
lea ecx, [ecx+ecx*8];ecx*=18 = nSymbols*CSYM_SIZE |
add ecx, [sym] |
mov [strings], ecx |
lea eax, [edx+20] |
stdcall fix_coff_symbols, eax, [sym], [edx+COFF_HEADER.nSymbols], \ |
[strings], __exports |
test eax, eax |
jz .link_fail |
mov ebx, [coff] |
stdcall fix_coff_relocs, ebx, [sym], 0 |
stdcall get_coff_sym, [sym], [ebx+COFF_HEADER.nSymbols], szVersion |
test eax, eax |
jz .link_fail |
mov eax, [eax] |
shr eax, 16 |
cmp eax, DRV_COMPAT |
jb .ver_fail |
cmp eax, DRV_CURRENT |
ja .ver_fail |
mov ebx, [coff] |
stdcall get_coff_sym, [sym], [ebx+COFF_HEADER.nSymbols], szSTART |
mov [start], eax |
stdcall kernel_free, [coff] |
mov ebx, [start] |
stdcall ebx, DRV_ENTRY |
test eax, eax |
jnz .ok |
stdcall kernel_free, [img_base] |
xor eax, eax |
ret |
.ok: |
mov ebx, [img_base] |
mov [eax+SRV.base], ebx |
mov ecx, [start] |
mov [eax+SRV.entry], ecx |
ret |
.ver_fail: |
mov esi, msg_CR |
call sys_msg_board_str |
mov esi, [driver_name] |
call sys_msg_board_str |
mov esi, msg_CR |
call sys_msg_board_str |
mov esi, msg_version |
call sys_msg_board_str |
mov esi, msg_www |
call sys_msg_board_str |
jmp .cleanup |
.link_fail: |
mov esi, msg_module |
call sys_msg_board_str |
mov esi, [driver_name] |
call sys_msg_board_str |
mov esi, msg_CR |
call sys_msg_board_str |
.cleanup: |
stdcall kernel_free, [img_base] |
.fail: |
stdcall kernel_free, [coff] |
.exit: |
xor eax, eax |
ret |
endp |
; in: edx -> COFF_SECTION struct |
; out: eax = alignment as mask for bits to drop |
coff_get_align: |
1009,10 → 832,9 |
; ignore timestamp |
cli |
mov esi, [CURRENT_TASK] |
shl esi, 8 |
mov esi, [current_process] |
lea edi, [fullname] |
mov ebx, [esi+SLOT_BASE+APPDATA.dlls_list_ptr] |
mov ebx, [esi+PROC.dlls_list_ptr] |
test ebx, ebx |
jz .not_in_process |
mov esi, [ebx+HDLL.fd] |
1372,28 → 1194,21 |
; out: eax = APPDATA.dlls_list_ptr if all is OK, |
; NULL if memory allocation failed |
init_dlls_in_thread: |
mov ebx, [current_slot] |
mov eax, [ebx+APPDATA.dlls_list_ptr] |
mov ebx, [current_process] |
mov eax, [ebx+PROC.dlls_list_ptr] |
test eax, eax |
jnz .ret |
push [ebx+APPDATA.dir_table] |
mov eax, 8 |
call malloc |
pop edx |
call malloc ; FIXME |
test eax, eax |
jz .ret |
mov [eax], eax |
mov [eax+4], eax |
mov ecx, [TASK_COUNT] |
mov ebx, SLOT_BASE+256 |
.set: |
cmp [ebx+APPDATA.dir_table], edx |
jnz @f |
mov [ebx+APPDATA.dlls_list_ptr], eax |
@@: |
add ebx, 256 |
dec ecx |
jnz .set |
mov ebx, [current_process] |
mov [ebx+PROC.dlls_list_ptr], eax |
.ret: |
ret |
1414,60 → 1229,11 |
destroy_hdll: |
push ebx ecx esi edi |
push eax |
mov ebx, [eax+HDLL.base] |
mov esi, [eax+HDLL.parent] |
mov edx, [esi+DLLDESCR.size] |
; The following actions require the context of application where HDLL is mapped. |
; However, destroy_hdll can be called in the context of OS thread when |
; cleaning up objects created by the application which is destroyed. |
; So remember current cr3 and set it to page table of target. |
mov eax, [ecx+APPDATA.dir_table] |
; Because we cheat with cr3, disable interrupts: task switch would restore |
; page table from APPDATA of current thread. |
; Also set [current_slot] because it is used by user_free. |
pushf |
cli |
push [current_slot] |
mov [current_slot], ecx |
mov ecx, cr3 |
push ecx |
mov cr3, eax |
push ebx ; argument for user_free |
mov eax, ebx |
shr ebx, 12 |
push ebx |
mov esi, [esi+DLLDESCR.data] |
shr esi, 12 |
.unmap_loop: |
push eax |
mov eax, 2 |
xchg eax, [page_tabs+ebx*4] |
mov ecx, [page_tabs+esi*4] |
and eax, not 0xFFF |
and ecx, not 0xFFF |
cmp eax, ecx |
jz @f |
call free_page |
@@: |
pop eax |
invlpg [eax] |
add eax, 0x1000 |
inc ebx |
inc esi |
sub edx, 0x1000 |
ja .unmap_loop |
pop ebx |
and dword [page_tabs+(ebx-1)*4], not DONT_FREE_BLOCK |
call user_free |
; Restore context. |
pop eax |
mov cr3, eax |
pop [current_slot] |
popf |
; Ok, cheating is done. |
pop eax |
push eax |
mov esi, [eax+HDLL.parent] |
mov eax, [eax+HDLL.refcount] |
call dereference_dll |
/kernel/branches/Kolibri-acpi/core/exports.inc |
---|
7,11 → 7,6 |
$Revision$ |
iglobal |
szKernel db 'KERNEL', 0 |
szVersion db 'version',0 |
endg |
align 4 |
__exports: |
export 'KERNEL', \ |
48,6 → 43,7 |
get_phys_addr, 'GetPhysAddr', \ ; eax |
map_space, 'MapSpace', \ |
release_pages, 'ReleasePages', \ |
alloc_dma24, 'AllocDMA24', \ ; stdcall |
\ |
mutex_init, 'MutexInit', \ ; gcc fastcall |
mutex_lock, 'MutexLock', \ ; gcc fastcall |
94,6 → 90,7 |
load_cursor, 'LoadCursor', \ ;stdcall |
\ |
get_curr_task, 'GetCurrentTask', \ |
change_task, 'ChangeTask', \ |
load_file, 'LoadFile', \ ;retval eax, ebx |
delay_ms, 'Sleep', \ |
\ |
105,6 → 102,7 |
strrchr, 'strrchr', \ |
\ |
timer_hs, 'TimerHS', \ |
timer_hs, 'TimerHs', \ ; shit happens |
cancel_timer_hs, 'CancelTimerHS', \ |
\ |
reg_usb_driver, 'RegUSBDriver', \ |
121,6 → 119,8 |
NET_link_changed, 'NetLinkChanged', \ |
ETH_input, 'Eth_input', \ |
\ |
get_pcidev_list, 'GetPCIList', \ |
\ |
0, 'LFBAddress' ; must be the last one |
load kernel_exports_count dword from __exports + 24 |
load kernel_exports_addresses dword from __exports + 28 |
/kernel/branches/Kolibri-acpi/core/heap.inc |
---|
129,15 → 129,11 |
loop @B |
stdcall alloc_pages, dword 32 |
or eax, PG_SW |
mov ebx, HEAP_BASE |
mov ecx, 32 |
mov edx, eax |
mov edi, HEAP_BASE |
.l1: |
stdcall map_page, edi, edx, PG_SW |
add edi, 0x1000 |
add edx, 0x1000 |
dec ecx |
jnz .l1 |
call commit_pages |
mov edi, HEAP_BASE ;descriptors |
mov ebx, HEAP_BASE+sizeof.MEM_BLOCK ;free space |
480,46 → 476,39 |
mov [pages_count], ebx |
stdcall alloc_kernel_space, eax |
mov [lin_addr], eax |
mov ebx, [pages_count] |
test eax, eax |
jz .err |
mov [lin_addr], eax |
mov ecx, [pages_count] |
mov edx, eax |
mov ebx, ecx |
shr ecx, 3 |
jz .next |
shr ebx, 3 |
jz .tail |
and ebx, not 7 |
push ebx |
shl ebx, 3 |
stdcall alloc_pages, ebx |
pop ecx ; yes ecx!!! |
and eax, eax |
test eax, eax |
jz .err |
mov edi, eax |
mov edx, [lin_addr] |
@@: |
stdcall map_page, edx, edi, dword PG_SW |
add edx, 0x1000 |
add edi, 0x1000 |
dec ecx |
jnz @B |
.next: |
mov ecx, [pages_count] |
and ecx, 7 |
mov ecx, ebx |
or eax, PG_SW |
mov ebx, [lin_addr] |
call commit_pages |
mov edx, ebx ; this dirty hack |
.tail: |
mov ebx, [pages_count] |
and ebx, 7 |
jz .end |
@@: |
push ecx |
call alloc_page |
pop ecx |
test eax, eax |
jz .err |
stdcall map_page, edx, eax, dword PG_SW |
add edx, 0x1000 |
dec ecx |
dec ebx |
jnz @B |
.end: |
mov eax, [lin_addr] |
569,7 → 558,7 |
restore block_size |
restore block_flags |
;;;;;;;;;;;;;; USER ;;;;;;;;;;;;;;;;; |
;;;;;;;;;;;;;; USER HEAP ;;;;;;;;;;;;;;;;; |
HEAP_TOP equ 0x80000000 |
576,26 → 565,29 |
align 4 |
proc init_heap |
mov ebx, [current_slot] |
mov eax, [ebx+APPDATA.heap_top] |
mov ebx, [current_process] |
mov eax, [ebx+PROC.heap_top] |
test eax, eax |
jz @F |
sub eax, [ebx+APPDATA.heap_base] |
sub eax, 4096 |
sub eax, [ebx+PROC.heap_base] |
sub eax, PAGE_SIZE |
ret |
@@: |
mov esi, [ebx+APPDATA.mem_size] |
lea ecx, [ebx+PROC.heap_lock] |
call mutex_init |
mov esi, [ebx+PROC.mem_used] |
add esi, 4095 |
and esi, not 4095 |
mov [ebx+APPDATA.mem_size], esi |
mov [ebx+PROC.mem_used], esi |
mov eax, HEAP_TOP |
mov [ebx+APPDATA.heap_base], esi |
mov [ebx+APPDATA.heap_top], eax |
mov [ebx+PROC.heap_base], esi |
mov [ebx+PROC.heap_top], eax |
sub eax, esi |
shr esi, 10 |
mov ecx, eax |
sub eax, 4096 |
sub eax, PAGE_SIZE |
or ecx, FREE_BLOCK |
mov [page_tabs+esi], ecx |
ret |
608,25 → 600,28 |
push esi |
push edi |
mov ebx, [current_process] |
lea ecx, [ebx+PROC.heap_lock] |
call mutex_lock |
mov ecx, [alloc_size] |
add ecx, (4095+4096) |
add ecx, (4095+PAGE_SIZE) |
and ecx, not 4095 |
mov ebx, [current_slot] |
mov esi, dword [ebx+APPDATA.heap_base] ; heap_base |
mov edi, dword [ebx+APPDATA.heap_top] ; heap_top |
l_0: |
mov esi, dword [ebx+PROC.heap_base] ; heap_base |
mov edi, dword [ebx+PROC.heap_top] ; heap_top |
.scan: |
cmp esi, edi |
jae m_exit |
jae .m_exit |
mov ebx, esi |
shr ebx, 12 |
mov eax, [page_tabs+ebx*4] |
test al, FREE_BLOCK |
jz test_used |
jz .test_used |
and eax, 0xFFFFF000 |
cmp eax, ecx ;alloc_size |
jb m_next |
jb .m_next |
jz @f |
lea edx, [esi+ecx] |
648,13 → 643,15 |
jnz @B |
.no: |
mov edx, [current_slot] |
mov edx, [current_process] |
mov ebx, [alloc_size] |
add ebx, 0xFFF |
and ebx, not 0xFFF |
add ebx, [edx+APPDATA.mem_size] |
call update_mem_size |
add [edx+PROC.mem_used], ebx |
lea ecx, [edx+PROC.heap_lock] |
call mutex_unlock |
lea eax, [esi+4096] |
pop edi |
661,15 → 658,19 |
pop esi |
pop ebx |
ret |
test_used: |
.test_used: |
test al, USED_BLOCK |
jz m_exit |
jz .m_exit |
and eax, 0xFFFFF000 |
m_next: |
.m_next: |
add esi, eax |
jmp l_0 |
m_exit: |
jmp .scan |
.m_exit: |
mov ecx, [current_process] |
lea ecx, [ecx+PROC.heap_lock] |
call mutex_unlock |
xor eax, eax |
pop edi |
pop esi |
684,14 → 685,17 |
push esi |
push edi |
mov ebx, [current_slot] |
mov ebx, [current_process] |
lea ecx, [ebx+PROC.heap_lock] |
call mutex_lock |
mov edx, [address] |
and edx, not 0xFFF |
mov [address], edx |
sub edx, 0x1000 |
jb .error |
mov esi, [ebx+APPDATA.heap_base] |
mov edi, [ebx+APPDATA.heap_top] |
mov esi, [ebx+PROC.heap_base] |
mov edi, [ebx+PROC.heap_top] |
cmp edx, esi |
jb .error |
.scan: |
708,6 → 712,10 |
mov esi, ecx |
jmp .scan |
.error: |
mov ecx, [current_process] |
lea ecx, [ecx+PROC.heap_lock] |
call mutex_unlock |
xor eax, eax |
pop edi |
pop esi |
759,14 → 767,15 |
mov [page_tabs+ebx*4], ecx |
.nothird: |
mov edx, [current_slot] |
mov edx, [current_process] |
mov ebx, [alloc_size] |
add ebx, 0xFFF |
and ebx, not 0xFFF |
add ebx, [edx+APPDATA.mem_size] |
call update_mem_size |
add [edx+PROC.mem_used], ebx |
lea ecx, [edx+PROC.heap_lock] |
call mutex_unlock |
mov eax, [address] |
pop edi |
782,10 → 791,14 |
mov esi, [base] |
test esi, esi |
jz .exit |
jz .fail |
push ebx |
mov ebx, [current_process] |
lea ecx, [ebx+PROC.heap_lock] |
call mutex_lock |
xor ebx, ebx |
shr esi, 12 |
mov eax, [page_tabs+(esi-1)*4] |
821,25 → 834,30 |
.released: |
push edi |
mov edx, [current_slot] |
mov esi, dword [edx+APPDATA.heap_base] |
mov edi, dword [edx+APPDATA.heap_top] |
sub ebx, [edx+APPDATA.mem_size] |
mov edx, [current_process] |
lea ecx, [edx+PROC.heap_lock] |
mov esi, dword [edx+PROC.heap_base] |
mov edi, dword [edx+PROC.heap_top] |
sub ebx, [edx+PROC.mem_used] |
neg ebx |
call update_mem_size |
mov [edx+PROC.mem_used], ebx |
call user_normalize |
pop edi |
pop ebx |
pop esi |
ret |
.exit: |
call mutex_unlock |
xor eax, eax |
inc eax |
pop ebx |
pop esi |
ret |
.cantfree: |
mov ecx, [current_process] |
lea ecx, [ecx+PROC.heap_lock] |
jmp .exit |
.fail: |
xor eax, eax |
pop ebx |
pop esi |
ret |
endp |
968,6 → 986,13 |
ret |
@@: |
push ecx edx |
push eax |
mov ecx, [current_process] |
lea ecx, [ecx+PROC.heap_lock] |
call mutex_lock |
pop eax |
lea ecx, [eax - 0x1000] |
shr ecx, 12 |
mov edx, [page_tabs+ecx*4] |
975,6 → 1000,10 |
jnz @f |
; attempt to realloc invalid pointer |
.ret0: |
mov ecx, [current_process] |
lea ecx, [ecx+PROC.heap_lock] |
call mutex_unlock |
pop edx ecx |
xor eax, eax |
ret |
1009,16 → 1038,16 |
jnz .nofreeall |
mov eax, [page_tabs+ecx*4] |
and eax, not 0xFFF |
mov edx, [current_slot] |
mov ebx, [APPDATA.mem_size+edx] |
mov edx, [current_process] |
mov ebx, [edx+PROC.mem_used] |
sub ebx, eax |
add ebx, 0x1000 |
or al, FREE_BLOCK |
mov [page_tabs+ecx*4], eax |
push esi edi |
mov esi, [APPDATA.heap_base+edx] |
mov edi, [APPDATA.heap_top+edx] |
call update_mem_size |
mov esi, [edx+PROC.heap_base] |
mov edi, [edx+PROC.heap_top] |
mov [edx+PROC.mem_used], ebx |
call user_normalize |
pop edi esi |
jmp .ret0 ; all freed |
1030,11 → 1059,11 |
shr ebx, 12 |
sub ebx, edx |
push ebx ecx edx |
mov edx, [current_slot] |
mov edx, [current_process] |
shl ebx, 12 |
sub ebx, [APPDATA.mem_size+edx] |
sub ebx, [edx+PROC.mem_used] |
neg ebx |
call update_mem_size |
mov [edx+PROC.mem_used], ebx |
pop edx ecx ebx |
lea eax, [ecx+1] |
shl eax, 12 |
1044,8 → 1073,8 |
shl ebx, 12 |
jz .ret |
push esi |
mov esi, [current_slot] |
mov esi, [APPDATA.heap_top+esi] |
mov esi, [current_process] |
mov esi, [esi+PROC.heap_top] |
shr esi, 12 |
@@: |
cmp edx, esi |
1064,12 → 1093,16 |
or ebx, FREE_BLOCK |
mov [page_tabs+ecx*4], ebx |
.ret: |
mov ecx, [current_process] |
lea ecx, [ecx+PROC.heap_lock] |
call mutex_unlock |
pop eax edx ecx |
ret |
.realloc_add: |
; get some additional memory |
mov eax, [current_slot] |
mov eax, [APPDATA.heap_top+eax] |
mov eax, [current_process] |
mov eax, [eax+PROC.heap_top] |
shr eax, 12 |
cmp edx, eax |
jae .cant_inplace |
1101,17 → 1134,21 |
cld |
rep stosd |
pop edi |
mov edx, [current_slot] |
mov edx, [current_process] |
shl ebx, 12 |
add ebx, [APPDATA.mem_size+edx] |
call update_mem_size |
add [edx+PROC.mem_used], ebx |
mov ecx, [current_process] |
lea ecx, [ecx+PROC.heap_lock] |
call mutex_unlock |
pop eax edx ecx |
ret |
.cant_inplace: |
push esi edi |
mov eax, [current_slot] |
mov esi, [APPDATA.heap_base+eax] |
mov edi, [APPDATA.heap_top+eax] |
mov eax, [current_process] |
mov esi, [eax+PROC.heap_base] |
mov edi, [eax+PROC.heap_top] |
shr esi, 12 |
shr edi, 12 |
sub ebx, ecx |
1174,10 → 1211,9 |
jnz @b |
.no: |
push ebx |
mov edx, [current_slot] |
mov edx, [current_process] |
shl ebx, 12 |
add ebx, [APPDATA.mem_size+edx] |
call update_mem_size |
add [edx+PROC.mem_used], ebx |
pop ebx |
@@: |
mov dword [page_tabs+esi*4], 2 |
1184,50 → 1220,18 |
inc esi |
dec ebx |
jnz @b |
mov ecx, [current_process] |
lea ecx, [ecx+PROC.heap_lock] |
call mutex_unlock |
pop eax edi esi edx ecx |
ret |
if 0 |
align 4 |
proc alloc_dll |
pushf |
cli |
bsf eax, [dll_map] |
jnz .find |
popf |
xor eax, eax |
ret |
.find: |
btr [dll_map], eax |
popf |
shl eax, 5 |
add eax, dll_tab |
ret |
endp |
align 4 |
proc alloc_service |
pushf |
cli |
bsf eax, [srv_map] |
jnz .find |
popf |
xor eax, eax |
ret |
.find: |
btr [srv_map], eax |
popf |
shl eax, 0x02 |
lea eax, [srv_tab+eax+eax*8] ;srv_tab+eax*36 |
ret |
endp |
end if |
;;;;;;;;;;;;;; SHARED MEMORY ;;;;;;;;;;;;;;;;; |
;;;;;;;;;;;;;; SHARED ;;;;;;;;;;;;;;;;; |
; param |
; eax= shm_map object |
/kernel/branches/Kolibri-acpi/core/irq.inc |
---|
1,10 → 1,13 |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
;; ;; |
;; Copyright (C) KolibriOS team 2004-2012. All rights reserved. ;; |
;; Copyright (C) KolibriOS team 2004-2014. All rights reserved. ;; |
;; Distributed under terms of the GNU General Public License ;; |
;; ;; |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
$Revision: 4850 $ |
IRQ_RESERVED equ 24 |
IRQ_POOL_SIZE equ 48 |
/kernel/branches/Kolibri-acpi/core/memory.inc |
---|
123,19 → 123,19 |
endp |
align 4 |
proc map_page stdcall,lin_addr:dword,phis_addr:dword,flags:dword |
;proc map_page stdcall,lin_addr:dword,phis_addr:dword,flags:dword |
map_page: |
push ebx |
mov eax, [phis_addr] |
mov eax, [esp+12] ; phis_addr |
and eax, not 0xFFF |
or eax, [flags] |
mov ebx, [lin_addr] |
or eax, [esp+16] ; flags |
mov ebx, [esp+8] ; lin_addr |
shr ebx, 12 |
mov [page_tabs+ebx*4], eax |
mov eax, [lin_addr] |
mov eax, [esp+8] ; lin_addr |
pop ebx |
invlpg [eax] |
pop ebx |
ret |
endp |
ret 12 |
align 4 |
map_space: ;not implemented |
350,9 → 350,64 |
ret |
endp |
uglobal |
sb16_buffer_allocated db 0 |
endg |
; Allocates [.size] bytes so that the target memory block |
; is inside one 64K page for 24-bit DMA controller, |
; that is, somewhere between 00xx0000h and 00xxFFFFh. |
proc alloc_dma24 |
; Implementation note. |
; The only user of that function is SB16 driver, |
; so just return a statically allocated buffer. |
virtual at esp |
dd ? ; return address |
.size dd ? |
end virtual |
cmp [sb16_buffer_allocated], 0 |
jnz .fail |
inc [sb16_buffer_allocated] |
mov eax, SB16Buffer |
ret 4 |
.fail: |
xor eax, eax |
ret 4 |
endp |
; Allocates a physical page for master page table |
; that duplicates first Mb of OS_BASE at address 0; |
; used for starting APs and for shutting down, |
; where it is important to execute code in trivial-mapped pages. |
; Returns eax = allocated physical page. |
proc create_trampoline_pgmap |
; The only non-trivial moment: |
; we need a linear address to fill information, |
; but we don't need it outside of this function, |
; so we're returning physical address. |
; Therefore, allocate memory with kernel_alloc, |
; this will allocate physical page and a linear address somewhere, |
; and deallocate only linear address with free_kernel_space. |
stdcall kernel_alloc, 0x1000 |
mov edi, eax |
mov esi, master_tab |
mov ecx, 1024 |
rep movsd |
mov ecx, [master_tab+(OS_BASE shr 20)] |
mov [eax], ecx |
mov edi, eax |
call get_pg_addr |
push eax |
stdcall free_kernel_space, edi |
pop eax |
ret |
endp |
align 4 |
init_LFB: |
xchg bx, bx |
proc init_LFB |
locals |
pg_count dd ? |
endl |
cmp dword [LFBAddress], -1 |
jne @f |
381,33 → 436,61 |
@@: |
call init_mtrr |
xor edx, edx |
mov eax, [LFBAddress] |
mov edx, LFB_BASE |
mov esi, [LFBAddress] |
mov edi, 0x00C00000 |
mov dword [exp_lfb+4], edx |
shr edi, 12 |
mov [pg_count], edi |
shr edi, 10 |
bt [cpu_caps], CAPS_PSE |
jnc .map_page_tables |
or esi, PG_LARGE+PG_UW |
mov edx, sys_proc+PROC.pdt_0+(LFB_BASE shr 20) |
@@: |
mov [edx], esi |
add edx, 4 |
add esi, 0x00400000 |
dec edi |
jnz @B |
bt [cpu_caps], CAPS_PGE |
setc dh ;eliminate branch and |
mov ecx, LFB_SIZE/4096 |
mov edi, lfb_pd_0 |
lea eax, [eax+edx+PG_UW] ;set PG_GLOBAL if supported |
jnc @F |
or dword [sys_proc+PROC.pdt_0+(LFB_BASE shr 20)], PG_GLOBAL |
@@: |
mov dword [LFBAddress], LFB_BASE |
mov eax, cr3 ;flush TLB |
mov cr3, eax |
ret |
.map_pte: |
stosd |
add eax, 0x1000 |
loop .map_pte |
.map_page_tables: |
mov ecx, (LFB_SIZE/4096)/1024 |
mov edi, sys_pgdir+(LFB_BASE shr 20) |
lea eax, [(lfb_pd_0-OS_BASE)+PG_UW] |
@@: |
call alloc_page |
stdcall map_page_table, edx, eax |
add edx, 0x00400000 |
dec edi |
jnz @B |
.map_pde: |
mov eax, [LFBAddress] |
mov edi, page_tabs + (LFB_BASE shr 10) |
or eax, PG_UW |
mov ecx, [pg_count] |
cld |
@@: |
stosd |
add eax, 0x1000 |
loop .map_pde |
dec ecx |
jnz @B |
mov dword [exp_lfb+4], LFB_BASE |
mov dword [LFBAddress], LFB_BASE |
mov eax, cr3 ;flush TLB |
mov cr3, eax |
ret |
endp |
align 4 |
proc new_mem_resize stdcall, new_size:dword |
417,7 → 500,9 |
push edi |
mov edx, [current_slot] |
cmp [edx+APPDATA.heap_base], 0 |
mov ebx, [edx+APPDATA.process] |
cmp [ebx+PROC.heap_base], 0 |
jne .exit |
mov edi, [new_size] |
425,7 → 510,7 |
and edi, not 4095 |
mov [new_size], edi |
mov esi, [edx+APPDATA.mem_size] |
mov esi, [ebx+PROC.mem_used] |
add esi, 4095 |
and esi, not 4095 |
460,7 → 545,8 |
.update_size: |
mov edx, [current_slot] |
mov ebx, [new_size] |
call update_mem_size |
mov edx, [edx+APPDATA.process] |
mov [edx+PROC.mem_used], ebx |
.exit: |
pop edi |
pop esi |
536,38 → 622,6 |
endp |
align 4 |
update_mem_size: |
; in: edx = slot base |
; ebx = new memory size |
; destroys eax,ecx,edx |
mov [APPDATA.mem_size+edx], ebx |
;search threads and update |
;application memory size infomation |
mov ecx, [APPDATA.dir_table+edx] |
mov eax, 2 |
.search_threads: |
;eax = current slot |
;ebx = new memory size |
;ecx = page directory |
cmp eax, [TASK_COUNT] |
jg .search_threads_end |
mov edx, eax |
shl edx, 5 |
cmp word [CURRENT_TASK+edx+TASKDATA.state], 9 ;if slot empty? |
jz .search_threads_next |
shl edx, 3 |
cmp [SLOT_BASE+edx+APPDATA.dir_table], ecx ;if it is our thread? |
jnz .search_threads_next |
mov [SLOT_BASE+edx+APPDATA.mem_size], ebx ;update memory size |
.search_threads_next: |
inc eax |
jmp .search_threads |
.search_threads_end: |
ret |
; param |
; eax= linear address |
; |
624,11 → 678,6 |
pop ebx ;restore exception number (#PF) |
ret |
; xchg bx, bx |
; add esp,12 ;clear in stack: locals(.err_addr) + #PF + ret_to_caller |
; restore_ring3_context |
; iretd |
.user_space: |
test eax, PG_MAP |
jnz .err_access ;Страница присутствует |
668,9 → 717,8 |
; access denied? this may be a result of copy-on-write protection for DLL |
; check list of HDLLs |
and ebx, not 0xFFF |
mov eax, [CURRENT_TASK] |
shl eax, 8 |
mov eax, [SLOT_BASE+eax+APPDATA.dlls_list_ptr] |
mov eax, [current_process] |
mov eax, [eax+PROC.dlls_list_ptr] |
test eax, eax |
jz .fail |
mov esi, [eax+HDLL.fd] |
746,35 → 794,31 |
endp |
; returns number of mapped bytes |
proc map_mem stdcall, lin_addr:dword,slot:dword,\ |
proc map_mem_ipc stdcall, lin_addr:dword,slot:dword,\ |
ofs:dword,buf_size:dword,req_access:dword |
push 0 ; initialize number of mapped bytes |
locals |
count dd ? |
process dd ? |
endl |
mov [count], 0 |
cmp [buf_size], 0 |
jz .exit |
mov eax, [slot] |
shl eax, 8 |
mov eax, [SLOT_BASE+eax+APPDATA.dir_table] |
and eax, 0xFFFFF000 |
mov eax, [SLOT_BASE+eax+APPDATA.process] |
test eax, eax |
jz .exit |
stdcall map_page, [ipc_pdir], eax, PG_UW |
mov [process], eax |
mov ebx, [ofs] |
shr ebx, 22 |
mov esi, [ipc_pdir] |
mov edi, [ipc_ptab] |
mov eax, [esi+ebx*4] |
mov eax, [eax+PROC.pdt_0+ebx*4] ;get page table |
mov esi, [ipc_ptab] |
and eax, 0xFFFFF000 |
jz .exit |
stdcall map_page, edi, eax, PG_UW |
; inc ebx |
; add edi, 0x1000 |
; mov eax, [esi+ebx*4] |
; test eax, eax |
; jz @f |
; and eax, 0xFFFFF000 |
; stdcall map_page, edi, eax |
stdcall map_page, esi, eax, PG_SW |
@@: |
mov edi, [lin_addr] |
and edi, 0xFFFFF000 |
781,61 → 825,63 |
mov ecx, [buf_size] |
add ecx, 4095 |
shr ecx, 12 |
inc ecx |
inc ecx ; ??????????? |
mov edx, [ofs] |
shr edx, 12 |
and edx, 0x3FF |
mov esi, [ipc_ptab] |
.map: |
stdcall safe_map_page, [slot], [req_access], [ofs] |
jnc .exit |
add dword [ebp-4], 4096 |
add [ofs], 4096 |
add [count], PAGE_SIZE |
add [ofs], PAGE_SIZE |
dec ecx |
jz .exit |
add edi, 0x1000 |
add edi, PAGE_SIZE |
inc edx |
cmp edx, 0x400 |
cmp edx, 1024 |
jnz .map |
inc ebx |
mov eax, [ipc_pdir] |
mov eax, [eax+ebx*4] |
mov eax, [process] |
mov eax, [eax+PROC.pdt_0+ebx*4] |
and eax, 0xFFFFF000 |
jz .exit |
stdcall map_page, esi, eax, PG_UW |
stdcall map_page, esi, eax, PG_SW |
xor edx, edx |
jmp .map |
.exit: |
pop eax |
mov eax, [count] |
ret |
endp |
proc map_memEx stdcall, lin_addr:dword,slot:dword,\ |
ofs:dword,buf_size:dword,req_access:dword |
push 0 ; initialize number of mapped bytes |
locals |
count dd ? |
process dd ? |
endl |
mov [count], 0 |
cmp [buf_size], 0 |
jz .exit |
mov eax, [slot] |
shl eax, 8 |
mov eax, [SLOT_BASE+eax+APPDATA.dir_table] |
and eax, 0xFFFFF000 |
mov eax, [SLOT_BASE+eax+APPDATA.process] |
test eax, eax |
jz .exit |
stdcall map_page, [proc_mem_pdir], eax, PG_UW |
mov [process], eax |
mov ebx, [ofs] |
shr ebx, 22 |
mov esi, [proc_mem_pdir] |
mov edi, [proc_mem_tab] |
mov eax, [esi+ebx*4] |
mov eax, [eax+PROC.pdt_0+ebx*4] ;get page table |
mov esi, [proc_mem_tab] |
and eax, 0xFFFFF000 |
test eax, eax |
jz .exit |
stdcall map_page, edi, eax, PG_UW |
stdcall map_page, esi, eax, PG_SW |
@@: |
mov edi, [lin_addr] |
and edi, 0xFFFFF000 |
842,24 → 888,35 |
mov ecx, [buf_size] |
add ecx, 4095 |
shr ecx, 12 |
inc ecx |
inc ecx ; ??????????? |
mov edx, [ofs] |
shr edx, 12 |
and edx, 0x3FF |
mov esi, [proc_mem_tab] |
.map: |
stdcall safe_map_page, [slot], [req_access], [ofs] |
jnc .exit |
add dword [ebp-4], 0x1000 |
add edi, 0x1000 |
add [ofs], 0x1000 |
add [count], PAGE_SIZE |
add [ofs], PAGE_SIZE |
dec ecx |
jz .exit |
add edi, PAGE_SIZE |
inc edx |
dec ecx |
cmp edx, 1024 |
jnz .map |
inc ebx |
mov eax, [process] |
mov eax, [eax+PROC.pdt_0+ebx*4] |
and eax, 0xFFFFF000 |
jz .exit |
stdcall map_page, esi, eax, PG_SW |
xor edx, edx |
jmp .map |
.exit: |
pop eax |
mov eax, [count] |
ret |
endp |
905,7 → 962,8 |
push ebx ecx |
mov eax, [slot] |
shl eax, 8 |
mov eax, [SLOT_BASE+eax+APPDATA.dlls_list_ptr] |
mov eax, [SLOT_BASE+eax+APPDATA.process] |
mov eax, [eax+PROC.dlls_list_ptr] |
test eax, eax |
jz .no_hdll |
mov ecx, [eax+HDLL.fd] |
992,29 → 1050,6 |
mov [esp+32], eax |
ret |
;align 4 |
;proc set_ipc_buff |
; mov eax,[current_slot] |
; pushf |
; cli |
; mov [eax+APPDATA.ipc_start],ebx ;set fields in extended information area |
; mov [eax+APPDATA.ipc_size],ecx |
; |
; add ecx, ebx |
; add ecx, 4095 |
; and ecx, not 4095 |
; |
;.touch: mov eax, [ebx] |
; add ebx, 0x1000 |
; cmp ebx, ecx |
; jb .touch |
; |
; popf |
; xor eax, eax |
; ret |
;endp |
proc sys_ipc_send stdcall, PID:dword, msg_addr:dword, msg_size:dword |
locals |
dst_slot dd ? |
1033,7 → 1068,7 |
mov [dst_slot], eax |
shl eax, 8 |
mov edi, [eax+SLOT_BASE+0xa0] ;is ipc area defined? |
mov edi, [eax+SLOT_BASE+APPDATA.ipc_start] ;is ipc area defined? |
test edi, edi |
jz .no_ipc_area |
1041,7 → 1076,7 |
and ebx, 0xFFF |
mov [dst_offset], ebx |
mov esi, [eax+SLOT_BASE+0xa4] |
mov esi, [eax+SLOT_BASE+APPDATA.ipc_size] |
mov [buf_size], esi |
mov ecx, [ipc_tmp] |
1054,7 → 1089,7 |
pop edi esi |
@@: |
mov [used_buf], ecx |
stdcall map_mem, ecx, [dst_slot], \ |
stdcall map_mem_ipc, ecx, [dst_slot], \ |
edi, esi, PG_SW |
mov edi, [dst_offset] |
1125,7 → 1160,7 |
.ret: |
mov eax, [used_buf] |
cmp eax, [ipc_tmp] |
jz @f |
je @f |
stdcall free_kernel_space, eax |
@@: |
pop eax |
1330,113 → 1365,6 |
endp |
align 4 |
proc init_mtrr |
cmp [BOOT_VARS+BOOT_MTRR], byte 2 |
je .exit |
bt [cpu_caps], CAPS_MTRR |
jnc .exit |
mov eax, cr0 |
or eax, 0x60000000 ;disable caching |
mov cr0, eax |
wbinvd ;invalidate cache |
mov ecx, 0x2FF |
rdmsr ; |
; has BIOS already initialized MTRRs? |
test ah, 8 |
jnz .skip_init |
; rarely needed, so mainly placeholder |
; main memory - cached |
push eax |
mov eax, [MEM_AMOUNT] |
; round eax up to next power of 2 |
dec eax |
bsr ecx, eax |
mov ebx, 2 |
shl ebx, cl |
dec ebx |
; base of memory range = 0, type of memory range = MEM_WB |
xor edx, edx |
mov eax, MEM_WB |
mov ecx, 0x200 |
wrmsr |
; mask of memory range = 0xFFFFFFFFF - (size - 1), ebx = size - 1 |
mov eax, 0xFFFFFFFF |
mov edx, 0x0000000F |
sub eax, ebx |
sbb edx, 0 |
or eax, 0x800 |
inc ecx |
wrmsr |
; clear unused MTRRs |
xor eax, eax |
xor edx, edx |
@@: |
inc ecx |
wrmsr |
cmp ecx, 0x20F |
jb @b |
; enable MTRRs |
pop eax |
or ah, 8 |
and al, 0xF0; default memtype = UC |
mov ecx, 0x2FF |
wrmsr |
.skip_init: |
stdcall set_mtrr, [LFBAddress], [LFBSize], MEM_WC |
wbinvd ;again invalidate |
mov eax, cr0 |
and eax, not 0x60000000 |
mov cr0, eax ; enable caching |
.exit: |
ret |
endp |
align 4 |
proc set_mtrr stdcall, base:dword,size:dword,mem_type:dword |
; find unused register |
mov ecx, 0x201 |
@@: |
rdmsr |
dec ecx |
test ah, 8 |
jz .found |
rdmsr |
mov al, 0; clear memory type field |
cmp eax, [base] |
jz .ret |
add ecx, 3 |
cmp ecx, 0x210 |
jb @b |
; no free registers, ignore the call |
.ret: |
ret |
.found: |
; found, write values |
xor edx, edx |
mov eax, [base] |
or eax, [mem_type] |
wrmsr |
mov ebx, [size] |
dec ebx |
mov eax, 0xFFFFFFFF |
mov edx, 0x00000000 |
sub eax, ebx |
sbb edx, 0 |
or eax, 0x800 |
inc ecx |
wrmsr |
ret |
endp |
align 4 |
proc create_ring_buffer stdcall, size:dword, flags:dword |
locals |
buf_ptr dd ? |
/kernel/branches/Kolibri-acpi/core/mtrr.inc |
---|
0,0 → 1,881 |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
;; ;; |
;; Copyright (C) KolibriOS team 2004-2014. All rights reserved. ;; |
;; Distributed under terms of the GNU General Public License ;; |
;; ;; |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
$Revision: 5130 $ |
; Initializes MTRRs. |
proc init_mtrr |
cmp [BOOT_VARS+BOOT_MTRR], byte 2 |
je .exit |
bt [cpu_caps], CAPS_MTRR |
jnc .exit |
call mtrr_reconfigure |
stdcall set_mtrr, [LFBAddress], 0x1000000, MEM_WC |
.exit: |
ret |
endp |
; Helper procedure for mtrr_reconfigure and set_mtrr, |
; called before changes in MTRRs. |
proc mtrr_begin_change |
mov eax, cr0 |
or eax, 0x60000000 ;disable caching |
mov cr0, eax |
wbinvd ;invalidate cache |
ret |
endp |
; Helper procedure for mtrr_reconfigure and set_mtrr, |
; called after changes in MTRRs. |
proc mtrr_end_change |
wbinvd ;again invalidate |
mov eax, cr0 |
and eax, not 0x60000000 |
mov cr0, eax ; enable caching |
ret |
endp |
; Some limits to number of structures located in the stack. |
MAX_USEFUL_MTRRS = 16 |
MAX_RANGES = 16 |
; mtrr_reconfigure keeps a list of MEM_WB ranges. |
; This structure describes one item in the list. |
struct mtrr_range |
next dd ? ; next item |
start dq ? ; first byte |
length dq ? ; length in bytes |
ends |
uglobal |
align 4 |
num_variable_mtrrs dd 0 ; number of variable-range MTRRs |
endg |
; Helper procedure for MTRR initialization. |
; Takes MTRR configured by BIOS and tries to recongifure them |
; in order to allow non-UC data at top of 4G memory. |
; Example: if low part of physical memory is 3.5G = 0xE0000000 bytes wide, |
; BIOS can configure two MTRRs so that the first MTRR describes [0, 4G) as WB |
; and the second MTRR describes [3.5G, 4G) as UC; |
; WB+UC=UC, so the resulting memory map would be as needed, |
; but in this configuration our attempts to map LFB at (say) 0xE8000000 as WC |
; would be ignored, WB+UC+WC is still UC. |
; So we must keep top of 4G memory not covered by MTRRs, |
; using three WB MTRRs [0,2G) + [2G,3G) + [3G,3.5G), |
; this gives the same memory map, but allows to add further entries. |
; See mtrrtest.asm for detailed input/output from real hardware+BIOS. |
proc mtrr_reconfigure |
push ebp ; we're called from init_LFB, and it feels hurt when ebp is destroyed |
; 1. Prepare local variables. |
; 1a. Create list of MAX_RANGES free (aka not yet allocated) ranges. |
xor eax, eax |
lea ecx, [eax+MAX_RANGES] |
.init_ranges: |
sub esp, sizeof.mtrr_range - 4 |
push eax |
mov eax, esp |
dec ecx |
jnz .init_ranges |
mov eax, esp |
; 1b. Fill individual local variables. |
xor edx, edx |
sub esp, MAX_USEFUL_MTRRS * 16 ; .mtrrs |
push edx ; .mtrrs_end |
push edx ; .num_used_mtrrs |
push eax ; .first_free_range |
push edx ; .first_range: no ranges yet |
mov cl, [cpu_phys_addr_width] |
or eax, -1 |
shl eax, cl ; note: this uses cl&31 = cl-32, not the entire cl |
push eax ; .phys_reserved_mask |
virtual at esp |
.phys_reserved_mask dd ? |
.first_range dd ? |
.first_free_range dd ? |
.num_used_mtrrs dd ? |
.mtrrs_end dd ? |
.mtrrs rq MAX_USEFUL_MTRRS * 2 |
.local_vars_size = $ - esp |
end virtual |
; 2. Get the number of variable-range MTRRs from MTRRCAP register. |
; Abort if zero. |
mov ecx, 0xFE |
rdmsr |
test al, al |
jz .abort |
mov byte [num_variable_mtrrs], al |
; 3. Validate MTRR_DEF_TYPE register. |
mov ecx, 0x2FF |
rdmsr |
; If BIOS has not initialized variable-range MTRRs, fallback to step 7. |
test ah, 8 |
jz .fill_ranges_from_memory_map |
; If the default memory type (not covered by MTRRs) is not UC, |
; then probably BIOS did something strange, so it is better to exit immediately |
; hoping for the best. |
cmp al, MEM_UC |
jnz .abort |
; 4. Validate all variable-range MTRRs |
; and copy configured MTRRs to the local array [.mtrrs]. |
; 4a. Prepare for the loop over existing variable-range MTRRs. |
mov ecx, 0x200 |
lea edi, [.mtrrs] |
.get_used_mtrrs_loop: |
; 4b. For every MTRR, read PHYSBASEn and PHYSMASKn. |
; In PHYSBASEn, clear upper bits and copy to ebp:ebx. |
rdmsr |
or edx, [.phys_reserved_mask] |
xor edx, [.phys_reserved_mask] |
mov ebp, edx |
mov ebx, eax |
inc ecx |
; If PHYSMASKn is not active, ignore this MTRR. |
rdmsr |
inc ecx |
test ah, 8 |
jz .get_used_mtrrs_next |
; 4c. For every active MTRR, check that number of local entries is not too large. |
inc [.num_used_mtrrs] |
cmp [.num_used_mtrrs], MAX_USEFUL_MTRRS |
ja .abort |
; 4d. For every active MTRR, store PHYSBASEn with upper bits cleared. |
; This contains the MTRR base and the memory type in low byte. |
mov [edi], ebx |
mov [edi+4], ebp |
; 4e. For every active MTRR, check that the range is continuous: |
; PHYSMASKn with upper bits set must be negated power of two, and |
; low bits of PHYSBASEn must be zeroes: |
; PHYSMASKn = 1...10...0, |
; PHYSBASEn = x...x0...0, |
; this defines a continuous range from x...x0...0 to x...x1...1, |
; length = 10...0 = negated PHYSMASKn. |
; Store length in the local array. |
and eax, not 0xFFF |
or edx, [.phys_reserved_mask] |
mov dword [edi+8], 0 |
mov dword [edi+12], 0 |
sub [edi+8], eax |
sbb [edi+12], edx |
; (x and -x) is the maximum power of two that divides x. |
; Condition for powers of two: (x and -x) equals x. |
and eax, [edi+8] |
and edx, [edi+12] |
cmp eax, [edi+8] |
jnz .abort |
cmp edx, [edi+12] |
jnz .abort |
sub eax, 1 |
sbb edx, 0 |
and eax, not 0xFFF |
and eax, ebx |
jnz .abort |
and edx, ebp |
jnz .abort |
; 4f. For every active MTRR, validate memory type: it must be either WB or UC. |
add edi, 16 |
cmp bl, MEM_UC |
jz .get_used_mtrrs_next |
cmp bl, MEM_WB |
jnz .abort |
.get_used_mtrrs_next: |
; 4g. Repeat the loop at 4b-4f for all [num_variable_mtrrs] entries. |
mov eax, [num_variable_mtrrs] |
lea eax, [0x200+eax*2] |
cmp ecx, eax |
jb .get_used_mtrrs_loop |
; 4h. If no active MTRRs were detected, fallback to step 7. |
cmp [.num_used_mtrrs], 0 |
jz .fill_ranges_from_memory_map |
mov [.mtrrs_end], edi |
; 5. Generate sorted list of ranges marked as WB. |
; 5a. Prepare for the loop over configured MTRRs filled at step 4. |
lea ecx, [.mtrrs] |
.fill_wb_ranges: |
; 5b. Ignore non-WB MTRRs. |
mov ebx, [ecx] |
cmp bl, MEM_WB |
jnz .next_wb_range |
mov ebp, [ecx+4] |
and ebx, not 0xFFF ; clear memory type and reserved bits |
; ebp:ebx = start of the range described by the current MTRR. |
; 5c. Find the first existing range containing a point greater than ebp:ebx. |
lea esi, [.first_range] |
.find_range_wb: |
; If there is no next range or start of the next range is greater than ebp:ebx, |
; exit the loop to 5d. |
mov edi, [esi] |
test edi, edi |
jz .found_place_wb |
mov eax, ebx |
mov edx, ebp |
sub eax, dword [edi+mtrr_range.start] |
sbb edx, dword [edi+mtrr_range.start+4] |
jb .found_place_wb |
; Otherwise, if end of the next range is greater than or equal to ebp:ebx, |
; exit the loop to 5e. |
mov esi, edi |
sub eax, dword [edi+mtrr_range.length] |
sbb edx, dword [edi+mtrr_range.length+4] |
jb .expand_wb |
or eax, edx |
jnz .find_range_wb |
jmp .expand_wb |
.found_place_wb: |
; 5d. ebp:ebx is not within any existing range. |
; Insert a new range between esi and edi. |
; (Later, during 5e, it can be merged with the following ranges.) |
mov eax, [.first_free_range] |
test eax, eax |
jz .abort |
mov [esi], eax |
mov edx, [eax+mtrr_range.next] |
mov [.first_free_range], edx |
mov dword [eax+mtrr_range.start], ebx |
mov dword [eax+mtrr_range.start+4], ebp |
; Don't fill [eax+mtrr_range.next] and [eax+mtrr_range.length] yet, |
; they will be calculated including merges at step 5e. |
mov esi, edi |
mov edi, eax |
.expand_wb: |
; 5e. The range at edi contains ebp:ebx, and esi points to the first range |
; to be checked for merge: esi=edi if ebp:ebx was found in an existing range, |
; esi is next after edi if a new range with ebp:ebx was created. |
; Merge it with following ranges while start of the next range is not greater |
; than the end of the new range. |
add ebx, [ecx+8] |
adc ebp, [ecx+12] |
; ebp:ebx = end of the range described by the current MTRR. |
.expand_wb_loop: |
; If there is no next range or start of the next range is greater than ebp:ebx, |
; exit the loop to 5g. |
test esi, esi |
jz .expand_wb_done |
mov eax, ebx |
mov edx, ebp |
sub eax, dword [esi+mtrr_range.start] |
sbb edx, dword [esi+mtrr_range.start+4] |
jb .expand_wb_done |
; Otherwise, if end of the next range is greater than or equal to ebp:ebx, |
; exit the loop to 5f. |
sub eax, dword [esi+mtrr_range.length] |
sbb edx, dword [esi+mtrr_range.length+4] |
jb .expand_wb_last |
; Otherwise, the current range is completely within the new range. |
; Free it and continue the loop. |
mov edx, [esi+mtrr_range.next] |
cmp esi, edi |
jz @f |
mov eax, [.first_free_range] |
mov [esi+mtrr_range.next], eax |
mov [.first_free_range], esi |
@@: |
mov esi, edx |
jmp .expand_wb_loop |
.expand_wb_last: |
; 5f. Start of the new range is inside range described by esi, |
; end of the new range is inside range described by edi. |
; If esi is equal to edi, the new range is completely within |
; an existing range, so proceed to the next range. |
cmp esi, edi |
jz .next_wb_range |
; Otherwise, set end of interval at esi to end of interval at edi |
; and free range described by edi. |
mov ebx, dword [esi+mtrr_range.start] |
mov ebp, dword [esi+mtrr_range.start+4] |
add ebx, dword [esi+mtrr_range.length] |
adc ebp, dword [esi+mtrr_range.length+4] |
mov edx, [esi+mtrr_range.next] |
mov eax, [.first_free_range] |
mov [esi+mtrr_range.next], eax |
mov [.first_free_range], esi |
mov esi, edx |
.expand_wb_done: |
; 5g. We have found the next range (maybe 0) after merging and |
; the new end of range (maybe ebp:ebx from the new range |
; or end of another existing interval calculated at step 5f). |
; Write them to range at edi. |
mov [edi+mtrr_range.next], esi |
sub ebx, dword [edi+mtrr_range.start] |
sbb ebp, dword [edi+mtrr_range.start+4] |
mov dword [edi+mtrr_range.length], ebx |
mov dword [edi+mtrr_range.length+4], ebp |
.next_wb_range: |
; 5h. Continue the loop 5b-5g over all configured MTRRs. |
add ecx, 16 |
cmp ecx, [.mtrrs_end] |
jb .fill_wb_ranges |
; 6. Exclude all ranges marked as UC. |
; 6a. Prepare for the loop over configured MTRRs filled at step 4. |
lea ecx, [.mtrrs] |
.fill_uc_ranges: |
; 6b. Ignore non-UC MTRRs. |
mov ebx, [ecx] |
cmp bl, MEM_UC |
jnz .next_uc_range |
mov ebp, [ecx+4] |
and ebx, not 0xFFF ; clear memory type and reserved bits |
; ebp:ebx = start of the range described by the current MTRR. |
lea esi, [.first_range] |
; 6c. Find the first existing range containing a point greater than ebp:ebx. |
.find_range_uc: |
; If there is no next range, ignore this MTRR, |
; exit the loop and continue to next MTRR. |
mov edi, [esi] |
test edi, edi |
jz .next_uc_range |
; If start of the next range is greater than or equal to ebp:ebx, |
; exit the loop to 6e. |
mov eax, dword [edi+mtrr_range.start] |
mov edx, dword [edi+mtrr_range.start+4] |
sub eax, ebx |
sbb edx, ebp |
jnb .truncate_uc |
; Otherwise, continue the loop if end of the next range is less than ebp:ebx, |
; exit the loop to 6d otherwise. |
mov esi, edi |
add eax, dword [edi+mtrr_range.length] |
adc edx, dword [edi+mtrr_range.length+4] |
jnb .find_range_uc |
; 6d. ebp:ebx is inside (or at end of) an existing range. |
; Split the range. (The second range, maybe containing completely within UC-range, |
; maybe of zero length, can be removed at step 6e, if needed.) |
mov edi, [.first_free_range] |
test edi, edi |
jz .abort |
mov dword [edi+mtrr_range.start], ebx |
mov dword [edi+mtrr_range.start+4], ebp |
mov dword [edi+mtrr_range.length], eax |
mov dword [edi+mtrr_range.length+4], edx |
mov eax, [edi+mtrr_range.next] |
mov [.first_free_range], eax |
mov eax, [esi+mtrr_range.next] |
mov [edi+mtrr_range.next], eax |
; don't change [esi+mtrr_range.next] yet, it will be filled at step 6e |
mov eax, ebx |
mov edx, ebp |
sub eax, dword [esi+mtrr_range.start] |
sbb edx, dword [esi+mtrr_range.start+4] |
mov dword [esi+mtrr_range.length], eax |
mov dword [esi+mtrr_range.length+4], edx |
.truncate_uc: |
; 6e. edi is the first range after ebp:ebx, check it and next ranges |
; for intersection with the new range, truncate heads. |
add ebx, [ecx+8] |
adc ebp, [ecx+12] |
; ebp:ebx = end of the range described by the current MTRR. |
.truncate_uc_loop: |
; If start of the next range is greater than ebp:ebx, |
; exit the loop to 6g. |
mov eax, ebx |
mov edx, ebp |
sub eax, dword [edi+mtrr_range.start] |
sbb edx, dword [edi+mtrr_range.start+4] |
jb .truncate_uc_done |
; Otherwise, if end of the next range is greater than ebp:ebx, |
; exit the loop to 6f. |
sub eax, dword [edi+mtrr_range.length] |
sbb edx, dword [edi+mtrr_range.length+4] |
jb .truncate_uc_last |
; Otherwise, the current range is completely within the new range. |
; Free it and continue the loop if there is a next range. |
; If that was a last range, exit the loop to 6g. |
mov edx, [edi+mtrr_range.next] |
mov eax, [.first_free_range] |
mov [.first_free_range], edi |
mov [edi+mtrr_range.next], eax |
mov edi, edx |
test edi, edi |
jnz .truncate_uc_loop |
jmp .truncate_uc_done |
.truncate_uc_last: |
; 6f. The range at edi partially intersects with the UC-range described by MTRR. |
; Truncate it from the head. |
mov dword [edi+mtrr_range.start], ebx |
mov dword [edi+mtrr_range.start+4], ebp |
neg eax |
adc edx, 0 |
neg edx |
mov dword [edi+mtrr_range.length], eax |
mov dword [edi+mtrr_range.length+4], edx |
.truncate_uc_done: |
; 6g. We have found the next range (maybe 0) after intersection. |
; Write it to [esi+mtrr_range.next]. |
mov [esi+mtrr_range.next], edi |
.next_uc_range: |
; 6h. Continue the loop 6b-6g over all configured MTRRs. |
add ecx, 16 |
cmp ecx, [.mtrrs_end] |
jb .fill_uc_ranges |
; Sanity check: if there are no ranges after steps 5-6, |
; fallback to step 7. Otherwise, go to 8. |
cmp [.first_range], 0 |
jnz .ranges_ok |
.fill_ranges_from_memory_map: |
; 7. BIOS has not configured variable-range MTRRs. |
; Create one range from 0 to [MEM_AMOUNT]. |
mov eax, [.first_free_range] |
mov edx, [eax+mtrr_range.next] |
mov [.first_free_range], edx |
mov [.first_range], eax |
xor edx, edx |
mov [eax+mtrr_range.next], edx |
mov dword [eax+mtrr_range.start], edx |
mov dword [eax+mtrr_range.start+4], edx |
mov ecx, [MEM_AMOUNT] |
mov dword [eax+mtrr_range.length], ecx |
mov dword [eax+mtrr_range.length+4], edx |
.ranges_ok: |
; 8. We have calculated list of WB-ranges. |
; Now we should calculate a list of MTRRs so that |
; * every MTRR describes a range with length = power of 2 and start that is aligned, |
; * every MTRR can be WB or UC |
; * (sum of all WB ranges) minus (sum of all UC ranges) equals the calculated list |
; * top of 4G memory must not be covered by any ranges |
; Example: range [0,0xBC000000) can be converted to |
; [0,0x80000000)+[0x80000000,0xC0000000)-[0xBC000000,0xC0000000) |
; WB +WB -UC |
; but not to [0,0x100000000)-[0xC0000000,0x100000000)-[0xBC000000,0xC0000000). |
; 8a. Check that list of ranges is [0,something) plus, optionally, [4G,something). |
; This holds in practice (see mtrrtest.asm for real-life examples) |
; and significantly simplifies the code: ranges are independent, start of range |
; is almost always aligned (the only exception >4G upper memory can be easily covered), |
; there is no need to consider adding holes before start of range, only |
; append them to end of range. |
xor eax, eax |
mov edi, [.first_range] |
cmp dword [edi+mtrr_range.start], eax |
jnz .abort |
cmp dword [edi+mtrr_range.start+4], eax |
jnz .abort |
cmp dword [edi+mtrr_range.length+4], eax |
jnz .abort |
mov edx, [edi+mtrr_range.next] |
test edx, edx |
jz @f |
cmp dword [edx+mtrr_range.start], eax |
jnz .abort |
cmp dword [edx+mtrr_range.start+4], 1 |
jnz .abort |
cmp [edx+mtrr_range.next], eax |
jnz .abort |
@@: |
; 8b. Initialize: no MTRRs filled. |
mov [.num_used_mtrrs], eax |
lea esi, [.mtrrs] |
.range2mtrr_loop: |
; 8c. If we are dealing with upper-memory range (after 4G) |
; with length > start, create one WB MTRR with [start,2*start), |
; reset start to 2*start and return to this step. |
; Example: [4G,24G) -> [4G,8G) {returning} + [8G,16G) {returning} |
; + [16G,24G) {advancing to ?}. |
mov eax, dword [edi+mtrr_range.length+4] |
test eax, eax |
jz .less4G |
mov edx, dword [edi+mtrr_range.start+4] |
cmp eax, edx |
jb .start_aligned |
inc [.num_used_mtrrs] |
cmp [.num_used_mtrrs], MAX_USEFUL_MTRRS |
ja .abort |
mov dword [esi], MEM_WB |
mov dword [esi+4], edx |
mov dword [esi+8], 0 |
mov dword [esi+12], edx |
add esi, 16 |
add dword [edi+mtrr_range.start+4], edx |
sub dword [edi+mtrr_range.length+4], edx |
jnz .range2mtrr_loop |
cmp dword [edi+mtrr_range.length], 0 |
jz .range2mtrr_next |
.less4G: |
; 8d. If we are dealing with low-memory range (before 4G) |
; and appending a maximal-size hole would create a range covering top of 4G, |
; create a maximal-size WB range and return to this step. |
; Example: for [0,0xBC000000) the following steps would consider |
; variants [0,0x80000000)+(another range to be splitted) and |
; [0,0x100000000)-(another range to be splitted); we forbid the last variant, |
; so the first variant must be used. |
bsr ecx, dword [edi+mtrr_range.length] |
xor edx, edx |
inc edx |
shl edx, cl |
lea eax, [edx*2] |
add eax, dword [edi+mtrr_range.start] |
jnz .start_aligned |
inc [.num_used_mtrrs] |
cmp [.num_used_mtrrs], MAX_USEFUL_MTRRS |
ja .abort |
mov eax, dword [edi+mtrr_range.start] |
mov dword [esi], eax |
or dword [esi], MEM_WB |
mov dword [esi+4], 0 |
mov dword [esi+8], edx |
mov dword [esi+12], 0 |
add esi, 16 |
add dword [edi+mtrr_range.start], edx |
sub dword [edi+mtrr_range.length], edx |
jnz .less4G |
jmp .range2mtrr_next |
.start_aligned: |
; Start is aligned for any allowed length, maximum-size hole is allowed. |
; Select the best MTRR configuration for one range. |
; length=...101101 |
; Without hole at the end, we need one WB MTRR for every 1-bit in length: |
; length=...100000 + ...001000 + ...000100 + ...000001 |
; We can also append one hole at the end so that one 0-bit (selected by us) |
; becomes 1 and all lower bits become 0 for WB-range: |
; length=...110000 - (...00010 + ...00001) |
; In this way, we need one WB MTRR for every 1-bit higher than the selected bit, |
; one WB MTRR for the selected bit, one UC MTRR for every 0-bit between |
; the selected bit and lowest 1-bit (they become 1-bits after negation) |
; and one UC MTRR for lowest 1-bit. |
; So we need to select 0-bit with the maximal difference |
; (number of 0-bits) - (number of 1-bits) between selected and lowest 1-bit, |
; this equals the gain from using a hole. If the difference is negative for |
; all 0-bits, don't append hole. |
; Note that lowest 1-bit is not included when counting, but selected 0-bit is. |
; 8e. Find the optimal bit position for hole. |
; eax = current difference, ebx = best difference, |
; ecx = hole bit position, edx = current bit position. |
xor eax, eax |
xor ebx, ebx |
xor ecx, ecx |
bsf edx, dword [edi+mtrr_range.length] |
jnz @f |
bsf edx, dword [edi+mtrr_range.length+4] |
add edx, 32 |
@@: |
push edx ; save position of lowest 1-bit for step 8f |
.calc_stat: |
inc edx |
cmp edx, 64 |
jae .stat_done |
inc eax ; increment difference in hope for 1-bit |
; Note: bt conveniently works with both .length and .length+4, |
; depending on whether edx>=32. |
bt dword [edi+mtrr_range.length], edx |
jc .calc_stat |
dec eax ; hope was wrong, decrement difference to correct 'inc' |
dec eax ; and again, now getting the real difference |
cmp eax, ebx |
jle .calc_stat |
mov ebx, eax |
mov ecx, edx |
jmp .calc_stat |
.stat_done: |
; 8f. If we decided to create a hole, flip all bits between lowest and selected. |
pop edx ; restore position of lowest 1-bit saved at step 8e |
test ecx, ecx |
jz .fill_hi_init |
@@: |
inc edx |
cmp edx, ecx |
ja .fill_hi_init |
btc dword [edi+mtrr_range.length], edx |
jmp @b |
.fill_hi_init: |
; 8g. Create MTRR ranges corresponding to upper 32 bits. |
sub ecx, 32 |
.fill_hi_loop: |
bsr edx, dword [edi+mtrr_range.length+4] |
jz .fill_hi_done |
inc [.num_used_mtrrs] |
cmp [.num_used_mtrrs], MAX_USEFUL_MTRRS |
ja .abort |
mov eax, dword [edi+mtrr_range.start] |
mov [esi], eax |
mov eax, dword [edi+mtrr_range.start+4] |
mov [esi+4], eax |
xor eax, eax |
mov [esi+8], eax |
bts eax, edx |
mov [esi+12], eax |
cmp edx, ecx |
jl .fill_hi_uc |
or dword [esi], MEM_WB |
add dword [edi+mtrr_range.start+4], eax |
jmp @f |
.fill_hi_uc: |
sub dword [esi+4], eax |
sub dword [edi+mtrr_range.start+4], eax |
@@: |
add esi, 16 |
sub dword [edi+mtrr_range.length], eax |
jmp .fill_hi_loop |
.fill_hi_done: |
; 8h. Create MTRR ranges corresponding to lower 32 bits. |
add ecx, 32 |
.fill_lo_loop: |
bsr edx, dword [edi+mtrr_range.length] |
jz .range2mtrr_next |
inc [.num_used_mtrrs] |
cmp [.num_used_mtrrs], MAX_USEFUL_MTRRS |
ja .abort |
mov eax, dword [edi+mtrr_range.start] |
mov [esi], eax |
mov eax, dword [edi+mtrr_range.start+4] |
mov [esi+4], eax |
xor eax, eax |
mov [esi+12], eax |
bts eax, edx |
mov [esi+8], eax |
cmp edx, ecx |
jl .fill_lo_uc |
or dword [esi], MEM_WB |
add dword [edi+mtrr_range.start], eax |
jmp @f |
.fill_lo_uc: |
sub dword [esi], eax |
sub dword [edi+mtrr_range.start], eax |
@@: |
add esi, 16 |
sub dword [edi+mtrr_range.length], eax |
jmp .fill_lo_loop |
.range2mtrr_next: |
; 8i. Repeat the loop at 8c-8h for all ranges. |
mov edi, [edi+mtrr_range.next] |
test edi, edi |
jnz .range2mtrr_loop |
; 9. We have calculated needed MTRRs, now setup them in the CPU. |
; 9a. Abort if number of MTRRs is too large. |
mov eax, [num_variable_mtrrs] |
cmp [.num_used_mtrrs], eax |
ja .abort |
; 9b. Prepare for changes. |
call mtrr_begin_change |
; 9c. Prepare for loop over MTRRs. |
lea esi, [.mtrrs] |
mov ecx, 0x200 |
@@: |
; 9d. For every MTRR, copy PHYSBASEn as is: step 8 has configured |
; start value and type bits as needed. |
mov eax, [esi] |
mov edx, [esi+4] |
wrmsr |
inc ecx |
; 9e. For every MTRR, calculate PHYSMASKn = -(length) or 0x800 |
; with upper bits cleared, 0x800 = MTRR is valid. |
xor eax, eax |
xor edx, edx |
sub eax, [esi+8] |
sbb edx, [esi+12] |
or eax, 0x800 |
or edx, [.phys_reserved_mask] |
xor edx, [.phys_reserved_mask] |
wrmsr |
inc ecx |
; 9f. Continue steps 9d and 9e for all MTRRs calculated at step 8. |
add esi, 16 |
dec [.num_used_mtrrs] |
jnz @b |
; 9g. Zero other MTRRs. |
xor eax, eax |
xor edx, edx |
mov ebx, [num_variable_mtrrs] |
lea ebx, [0x200+ebx*2] |
@@: |
cmp ecx, ebx |
jae @f |
wrmsr |
inc ecx |
wrmsr |
inc ecx |
jmp @b |
@@: |
; 9i. Configure MTRR_DEF_TYPE. |
mov ecx, 0x2FF |
rdmsr |
or ah, 8 ; enable variable-ranges MTRR |
and al, 0xF0; default memtype = UC |
wrmsr |
; 9j. Changes are done. |
call mtrr_end_change |
.abort: |
add esp, .local_vars_size + MAX_RANGES * sizeof.mtrr_range |
pop ebp |
ret |
endp |
; Allocate&set one MTRR for given range. |
; size must be power of 2 that divides base. |
proc set_mtrr stdcall, base:dword,size:dword,mem_type:dword |
; find unused register |
mov ecx, 0x201 |
.scan: |
rdmsr |
dec ecx |
test ah, 8 |
jz .found |
rdmsr |
test edx, edx |
jnz @f |
and eax, not 0xFFF ; clear reserved bits |
cmp eax, [base] |
jz .ret |
@@: |
add ecx, 3 |
mov eax, [num_variable_mtrrs] |
lea eax, [0x200+eax*2] |
cmp ecx, eax |
jb .scan |
; no free registers, ignore the call |
.ret: |
ret |
.found: |
; found, write values |
call mtrr_begin_change |
xor edx, edx |
mov eax, [base] |
or eax, [mem_type] |
wrmsr |
mov al, [cpu_phys_addr_width] |
xor edx, edx |
bts edx, eax |
xor eax, eax |
sub eax, [size] |
sbb edx, 0 |
or eax, 0x800 |
inc ecx |
wrmsr |
call mtrr_end_change |
ret |
endp |
; Helper procedure for mtrr_validate. |
; Calculates memory type for given address according to variable-range MTRRs. |
; Assumes that MTRRs are enabled. |
; in: ebx = 32-bit physical address |
; out: eax = memory type for ebx |
proc mtrr_get_real_type |
; 1. Initialize: we have not yet found any MTRRs covering ebx. |
push 0 |
mov ecx, 0x201 |
.mtrr_loop: |
; 2. For every MTRR, check whether it is valid; if not, continue to the next MTRR. |
rdmsr |
dec ecx |
test ah, 8 |
jz .next |
; 3. For every valid MTRR, check whether (ebx and PHYSMASKn) == PHYSBASEn, |
; excluding low 12 bits. |
and eax, ebx |
push eax |
rdmsr |
test edx, edx |
pop edx |
jnz .next |
xor edx, eax |
and edx, not 0xFFF |
jnz .next |
; 4. If so, set the bit corresponding to memory type defined by this MTRR. |
and eax, 7 |
bts [esp], eax |
.next: |
; 5. Continue loop at 2-4 for all variable-range MTRRs. |
add ecx, 3 |
mov eax, [num_variable_mtrrs] |
lea eax, [0x200+eax*2] |
cmp ecx, eax |
jb .mtrr_loop |
; 6. If no MTRRs cover address in ebx, use default MTRR type from MTRR_DEF_CAP. |
pop edx |
test edx, edx |
jz .default |
; 7. Find&clear 1-bit in edx. |
bsf eax, edx |
btr edx, eax |
; 8. If there was only one 1-bit, then all MTRRs are consistent, return that bit. |
test edx, edx |
jz .nothing |
; Otherwise, return MEM_UC (e.g. WB+UC is UC). |
xor eax, eax |
.nothing: |
ret |
.default: |
mov ecx, 0x2FF |
rdmsr |
movzx eax, al |
ret |
endp |
; If MTRRs are configured improperly, this is not obvious to the user; |
; everything works, but the performance can be horrible. |
; Try to detect this and let the user know that the low performance |
; is caused by some problem and is not a global property of the system. |
; Let's hope he would report it to developers... |
proc mtrr_validate |
; 1. If MTRRs are not supported, they cannot be configured improperly. |
; Note: VirtualBox claims MTRR support in cpuid, but emulates MTRRCAP=0, |
; which is efficiently equivalent to absent MTRRs. |
; So check [num_variable_mtrrs] instead of CAPS_MTRR in [cpu_caps]. |
cmp [num_variable_mtrrs], 0 |
jz .exit |
; 2. If variable-range MTRRs are not configured, this is a problem. |
mov ecx, 0x2FF |
rdmsr |
test ah, 8 |
jz .fail |
; 3. Get the memory type for address somewhere inside working memory. |
; It must be write-back. |
mov ebx, 0x27FFFF |
call mtrr_get_real_type |
cmp al, MEM_WB |
jnz .fail |
; 4. If we're using a mode with LFB, |
; get the memory type for last pixel of the framebuffer. |
; It must be write-combined. |
test word [SCR_MODE], 0x4000 |
jz .exit |
mov eax, [_display.pitch] |
mul [_display.height] |
dec eax |
; LFB is mapped to virtual address LFB_BASE, |
; it uses global pages if supported by CPU. |
mov ebx, [sys_proc+PROC.pdt_0+(LFB_BASE shr 20)] |
test ebx, PG_LARGE |
jnz @f |
mov ebx, [page_tabs+(LFB_BASE shr 10)] |
@@: |
and ebx, not 0xFFF |
add ebx, eax |
call mtrr_get_real_type |
cmp al, MEM_WC |
jz .exit |
; 5. The check at step 4 fails on Bochs: |
; Bochs BIOS configures MTRRs in a strange way not respecting [cpu_phys_addr_width], |
; so mtrr_reconfigure avoids to touch anything. |
; However, Bochs core ignores MTRRs (keeping them only for rdmsr/wrmsr), |
; so we don't care about proper setting for Bochs. |
; Use northbridge PCI id to detect Bochs: it emulates either i440fx or i430fx |
; depending on configuration file. |
mov eax, [pcidev_list.fd] |
cmp eax, pcidev_list ; sanity check: fail if no PCI devices |
jz .fail |
cmp [eax+PCIDEV.vendor_device_id], 0x12378086 |
jz .exit |
cmp [eax+PCIDEV.vendor_device_id], 0x01228086 |
jnz .fail |
.exit: |
ret |
.fail: |
mov ebx, mtrr_user_message |
mov ebp, notifyapp |
call fs_execute_from_sysdir_param |
ret |
endp |
/kernel/branches/Kolibri-acpi/core/peload.inc |
---|
24,15 → 24,30 |
mov [image], eax |
mov edx, [eax+STRIPPED_PE_HEADER.SizeOfImage] |
; mov cl, [eax+STRIPPED_PE_HEADER.Subsystem] |
cmp word [eax], STRIPPED_PE_SIGNATURE |
jz @f |
mov edx, [eax+60] |
; mov cl, [eax+5Ch+edx] |
mov edx, [eax+80+edx] |
stdcall kernel_alloc, [eax+80+edx] |
@@: |
mov [entry], 0 |
; cmp cl, 1 |
; jnz .cleanup |
stdcall kernel_alloc, edx |
test eax, eax |
jz .cleanup |
mov [base], eax |
stdcall map_PE, eax, [image] |
push ebx ebp |
mov ebx, [image] |
mov ebp, eax |
call map_PE |
pop ebp ebx |
mov [entry], eax |
test eax, eax |
48,199 → 63,200 |
ret |
endp |
DWORD equ dword |
PTR equ |
align 4 |
map_PE: ;stdcall base:dword, image:dword |
cld |
push ebp |
map_PE: ;ebp=base:dword, ebx=image:dword |
push edi |
push esi |
push ebx |
sub esp, 60 |
mov ebx, DWORD PTR [esp+84] |
mov ebp, DWORD PTR [esp+80] |
sub esp, .locals_size |
virtual at esp |
.numsections dd ? |
.import_names dd ? |
.import_targets dd ? |
.peheader dd ? |
.bad_import dd ? |
.import_idx dd ? |
.import_descr dd ? |
.relocs_rva dd ? |
.relocs_size dd ? |
.section_header_size dd ? |
.AddressOfEntryPoint dd ? |
.ImageBase dd ? |
.locals_size = $ - esp |
end virtual |
cmp word [ebx], STRIPPED_PE_SIGNATURE |
jz .stripped |
mov edx, ebx |
add edx, [ebx+60] |
movzx eax, word [edx+6] |
mov [.numsections], eax |
mov eax, [edx+40] |
mov [.AddressOfEntryPoint], eax |
mov eax, [edx+52] |
mov [.ImageBase], eax |
mov ecx, [edx+84] |
mov [.section_header_size], 40 |
mov eax, [edx+128] |
mov [.import_descr], eax |
mov eax, [edx+160] |
mov [.relocs_rva], eax |
mov eax, [edx+164] |
mov [.relocs_size], eax |
add edx, 256 |
jmp .common |
.stripped: |
mov eax, [ebx+STRIPPED_PE_HEADER.AddressOfEntryPoint] |
mov [.AddressOfEntryPoint], eax |
mov eax, [ebx+STRIPPED_PE_HEADER.ImageBase] |
mov [.ImageBase], eax |
movzx eax, [ebx+STRIPPED_PE_HEADER.NumberOfSections] |
mov [.numsections], eax |
movzx ecx, [ebx+STRIPPED_PE_HEADER.NumberOfRvaAndSizes] |
xor eax, eax |
mov [.relocs_rva], eax |
mov [.relocs_size], eax |
test ecx, ecx |
jz @f |
mov eax, [ebx+sizeof.STRIPPED_PE_HEADER+SPE_DIRECTORY_IMPORT*8] |
@@: |
mov [.import_descr], eax |
cmp ecx, SPE_DIRECTORY_BASERELOC |
jbe @f |
mov eax, [ebx+sizeof.STRIPPED_PE_HEADER+SPE_DIRECTORY_BASERELOC*8] |
mov [.relocs_rva], eax |
mov eax, [ebx+sizeof.STRIPPED_PE_HEADER+SPE_DIRECTORY_BASERELOC*8+4] |
mov [.relocs_size], eax |
@@: |
mov [.section_header_size], 28 |
lea edx, [ebx+ecx*8+sizeof.STRIPPED_PE_HEADER+8] |
mov ecx, [ebx+STRIPPED_PE_HEADER.SizeOfHeaders] |
.common: |
mov esi, ebx |
add edx, DWORD PTR [ebx+60] |
mov edi, ebp |
mov DWORD PTR [esp+32], edx |
mov ecx, DWORD PTR [edx+84] |
shr ecx, 2 |
rep movsd |
movzx eax, WORD PTR [edx+6] |
mov DWORD PTR [esp+36], 0 |
mov DWORD PTR [esp+16], eax |
jmp L2 |
L3: |
mov eax, DWORD PTR [edx+264] |
cmp [.numsections], 0 |
jz .nosections |
.copy_sections: |
mov eax, [edx+8] |
test eax, eax |
je L4 |
je .no_section_data |
mov esi, ebx |
mov edi, ebp |
add esi, DWORD PTR [edx+268] |
add esi, [edx+12] |
mov ecx, eax |
add edi, DWORD PTR [edx+260] |
add edi, [edx+4] |
add ecx, 3 |
shr ecx, 2 |
rep movsd |
L4: |
mov ecx, DWORD PTR [edx+256] |
.no_section_data: |
mov ecx, [edx] |
cmp ecx, eax |
jbe L6 |
jbe .no_section_fill |
sub ecx, eax |
add eax, DWORD PTR [edx+260] |
add eax, [edx+4] |
lea edi, [eax+ebp] |
xor eax, eax |
rep stosb |
L6: |
inc DWORD PTR [esp+36] |
add edx, 40 |
L2: |
mov esi, DWORD PTR [esp+16] |
cmp DWORD PTR [esp+36], esi |
jne L3 |
mov edi, DWORD PTR [esp+32] |
cmp DWORD PTR [edi+164], 0 |
je L9 |
pushd [edi+164] |
.no_section_fill: |
add edx, [.section_header_size] |
dec [.numsections] |
jnz .copy_sections |
.nosections: |
cmp [.relocs_size], 0 |
je .no_relocations |
mov esi, ebp |
mov ecx, ebp |
sub esi, DWORD PTR [edi+52] |
add ecx, DWORD PTR [edi+160] |
mov eax, esi |
shr eax, 16 |
mov DWORD PTR [esp+16], eax |
L12: |
mov eax, [ecx+4] |
sub [esp], eax |
lea ebx, [eax-8] |
xor edi, edi |
sub esi, [.ImageBase] |
add ecx, [.relocs_rva] |
.relocs_block: |
mov edi, [ecx] |
add edi, ebp |
mov ebx, [ecx+4] |
add ecx, 8 |
sub [.relocs_size], ebx |
sub ebx, 8 |
shr ebx, 1 |
jmp L13 |
L14: |
movzx eax, WORD PTR [ecx+8+edi*2] |
jz .relocs_next_block |
.one_reloc: |
movzx eax, word [ecx] |
add ecx, 2 |
mov edx, eax |
shr eax, 12 |
and edx, 4095 |
add edx, DWORD PTR [ecx] |
cmp ax, 2 |
je L17 |
cmp ax, 3 |
je L18 |
dec ax |
jne L15 |
mov eax, DWORD PTR [esp+16] |
add WORD PTR [edx+ebp], ax |
L17: |
add WORD PTR [edx+ebp], si |
L18: |
add DWORD PTR [edx+ebp], esi |
L15: |
inc edi |
L13: |
cmp edi, ebx |
jne L14 |
add ecx, DWORD PTR [ecx+4] |
L11: |
cmp dword [esp], 0 |
jg L12 |
pop eax |
L9: |
mov edx, DWORD PTR [esp+32] |
cmp DWORD PTR [edx+132], 0 |
je L20 |
mov eax, ebp |
add eax, DWORD PTR [edx+128] |
mov DWORD PTR [esp+40], 0 |
add eax, 20 |
mov DWORD PTR [esp+56], eax |
L22: |
mov ecx, DWORD PTR [esp+56] |
cmp DWORD PTR [ecx-16], 0 |
jne L23 |
cmp DWORD PTR [ecx-8], 0 |
je L25 |
L23: |
mov edi, DWORD PTR [__exports+32] |
mov esi, DWORD PTR [__exports+28] |
mov eax, DWORD PTR [esp+56] |
mov DWORD PTR [esp+20], edi |
add edi, OS_BASE |
add esi, OS_BASE |
mov DWORD PTR [esp+44], esi |
mov ecx, DWORD PTR [eax-4] |
mov DWORD PTR [esp+48], edi |
mov edx, DWORD PTR [eax-20] |
cmp eax, 3 |
jne @f |
add [edx+edi], esi |
@@: |
dec ebx |
jnz .one_reloc |
.relocs_next_block: |
cmp [.relocs_size], 0 |
jg .relocs_block |
.no_relocations: |
cmp [.import_descr], 0 |
je .no_imports |
add [.import_descr], ebp |
mov [.bad_import], 0 |
.import_block: |
mov ecx, [.import_descr] |
cmp dword [ecx+4], 0 |
jne @f |
cmp dword [ecx+12], 0 |
je .done_imports |
@@: |
mov edx, dword [ecx] |
mov ecx, dword [ecx+16] |
test edx, edx |
jnz @f |
mov edx, ecx |
@@: |
mov DWORD PTR [esp+52], 0 |
mov [.import_idx], 0 |
add ecx, ebp |
add edx, ebp |
mov DWORD PTR [esp+24], edx |
mov DWORD PTR [esp+28], ecx |
L26: |
mov esi, DWORD PTR [esp+52] |
mov edi, DWORD PTR [esp+24] |
mov eax, DWORD PTR [edi+esi*4] |
mov [.import_names], edx |
mov [.import_targets], ecx |
.import_func: |
mov esi, [.import_idx] |
mov edi, [.import_names] |
mov eax, [edi+esi*4] |
test eax, eax |
je L27 |
test eax, eax |
js L27 |
je .next_import_block |
js .next_import_block |
lea edi, [ebp+eax] |
mov eax, DWORD PTR [esp+28] |
mov DWORD PTR [eax+esi*4], 0 |
mov eax, [.import_targets] |
mov dword [eax+esi*4], 0 |
lea esi, [edi+2] |
push eax |
movzx ebx, word [edi] |
push 32 |
movzx eax, WORD PTR [edi] |
mov edx, DWORD PTR [esp+56] |
mov eax, DWORD PTR [edx+eax*4] |
mov ecx, [__exports+32] |
mov eax, [ecx+OS_BASE+ebx*4] |
add eax, OS_BASE |
push eax |
push esi |
call strncmp |
pop ebx |
test eax, eax |
jz .import_func_found |
xor ebx, ebx |
test eax, eax |
jne L32 |
jmp L30 |
L33: |
push ecx |
.import_func_candidate: |
push 32 |
mov ecx, DWORD PTR [esp+28] |
mov eax, DWORD PTR [ecx+OS_BASE+ebx*4] |
mov ecx, [__exports+32] |
mov eax, [ecx+OS_BASE+ebx*4] |
add eax, OS_BASE |
push eax |
push esi |
call strncmp |
pop edx |
test eax, eax |
jne L34 |
mov esi, DWORD PTR [esp+44] |
mov edx, DWORD PTR [esp+52] |
mov ecx, DWORD PTR [esp+28] |
mov eax, DWORD PTR [esi+ebx*4] |
add eax, OS_BASE |
mov DWORD PTR [ecx+edx*4], eax |
jmp L36 |
L34: |
je .import_func_found |
inc ebx |
L32: |
cmp ebx, DWORD PTR [__exports+24] |
jb L33 |
L36: |
cmp ebx, DWORD PTR [__exports+24] |
jne L37 |
cmp ebx, [__exports+24] |
jb .import_func_candidate |
mov esi, msg_unresolved |
call sys_msg_board_str |
249,34 → 265,30 |
mov esi, msg_CR |
call sys_msg_board_str |
mov DWORD PTR [esp+40], 1 |
jmp L37 |
L30: |
movzx eax, WORD PTR [edi] |
mov esi, DWORD PTR [esp+44] |
mov edi, DWORD PTR [esp+52] |
mov edx, DWORD PTR [esp+28] |
mov eax, DWORD PTR [esi+eax*4] |
mov [.bad_import], 1 |
jmp .next_import_func |
.import_func_found: |
mov esi, [__exports+28] |
mov edx, [.import_idx] |
mov ecx, [.import_targets] |
mov eax, [esi+OS_BASE+ebx*4] |
add eax, OS_BASE |
mov DWORD PTR [edx+edi*4], eax |
L37: |
inc DWORD PTR [esp+52] |
jmp L26 |
L27: |
add DWORD PTR [esp+56], 20 |
jmp L22 |
L25: |
mov [ecx+edx*4], eax |
.next_import_func: |
inc [.import_idx] |
jmp .import_func |
.next_import_block: |
add [.import_descr], 20 |
jmp .import_block |
.done_imports: |
xor eax, eax |
cmp DWORD PTR [esp+40], 0 |
jne L40 |
L20: |
mov ecx, DWORD PTR [esp+32] |
cmp [.bad_import], 0 |
jne @f |
.no_imports: |
mov eax, ebp |
add eax, DWORD PTR [ecx+40] |
L40: |
add esp, 60 |
pop ebx |
add eax, [.AddressOfEntryPoint] |
@@: |
add esp, .locals_size |
pop esi |
pop edi |
pop ebp |
ret 8 |
ret |
/kernel/branches/Kolibri-acpi/core/sched.inc |
---|
29,8 → 29,7 |
.nocounter: |
xor ecx, ecx ; send End Of Interrupt signal |
call irq_eoi |
; btr dword[DONT_SWITCH], 0 |
; jc .return |
mov bl, SCHEDULE_ANY_PRIORITY |
call find_next_task |
jz .return ; if there is only one running process |
44,26 → 43,10 |
pushfd |
cli |
pushad |
if 0 |
; \begin{Mario79} ; <- must be refractoried, if used... |
cmp [dma_task_switched], 1 |
jne .find_next_task |
mov [dma_task_switched], 0 |
mov ebx, [dma_process] |
cmp [CURRENT_TASK], ebx |
je .return |
mov edi, [dma_slot_ptr] |
mov [CURRENT_TASK], ebx |
mov [TASK_BASE], edi |
jmp @f |
.find_next_task: |
; \end{Mario79} |
end if |
mov bl, SCHEDULE_ANY_PRIORITY |
call find_next_task |
jz .return ; the same task -> skip switch |
@@: |
; mov byte[DONT_SWITCH], 1 |
call do_change_task |
.return: |
popad |
121,10 → 104,11 |
Mov dword [page_tabs+((tss._io_map_0 and -4096) shr 10)],eax,[ebx+APPDATA.io_map] |
Mov dword [page_tabs+((tss._io_map_1 and -4096) shr 10)],eax,[ebx+APPDATA.io_map+4] |
; set new thread memory-map |
mov ecx, APPDATA.dir_table |
mov eax, [ebx+ecx] ;offset>0x7F |
cmp eax, [esi+ecx] ;offset>0x7F |
mov eax, [ebx+APPDATA.process] |
cmp eax, [current_process] |
je @f |
mov [current_process], eax |
mov eax, [eax+PROC.pdt_0_phys] |
mov cr3, eax |
@@: |
; set tss.esp0 |
159,7 → 143,7 |
jz @f |
xor eax, eax |
mov dr6, eax |
lea esi, [ebx+ecx+APPDATA.dbg_regs-APPDATA.dir_table];offset>0x7F |
lea esi, [ebx+APPDATA.dbg_regs] |
cld |
macro lodsReg [reg] { |
lodsd |
/kernel/branches/Kolibri-acpi/core/sys32-sp.inc |
---|
1,3 → 1,13 |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
;; ;; |
;; Copyright (C) KolibriOS team 2013-2014. All rights reserved. ;; |
;; Distributed under terms of the GNU General Public License ;; |
;; ;; |
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
$Revision: 4850 $ |
; Éste archivo debe ser editado con codificación CP866 |
msg_sel_ker cp850 "núcleo", 0 |
/kernel/branches/Kolibri-acpi/core/sys32.inc |
---|
413,23 → 413,26 |
align 4 |
terminate: ; terminate application |
destroy_thread: |
.slot equ esp ;locals |
.slot equ esp+4 ;locals |
.process equ esp ;ptr to parent process |
push esi ;save .slot |
shl esi, 8 |
cmp [SLOT_BASE+esi+APPDATA.dir_table], 0 |
jne @F |
mov edx, [SLOT_BASE+esi+APPDATA.process] |
test edx, edx |
jnz @F |
pop esi |
shl esi, 5 |
mov [CURRENT_TASK+esi+TASKDATA.state], 9 |
ret |
@@: |
push edx ;save .process |
lea edx, [SLOT_BASE+esi] |
call scheduler_remove_thread |
;mov esi,process_terminating |
;call sys_msg_board_str |
call lock_application_table |
; if the process is in V86 mode... |
442,7 → 445,7 |
; ...it has page directory for V86 mode |
mov esi, [eax+SLOT_BASE+APPDATA.saved_esp0] |
mov ecx, [esi+4] |
mov [eax+SLOT_BASE+APPDATA.dir_table], ecx |
mov [eax+SLOT_BASE+APPDATA.process], ecx |
; ...and I/O permission map for V86 mode |
mov ecx, [esi+12] |
mov [eax+SLOT_BASE+APPDATA.io_map], ecx |
449,7 → 452,7 |
mov ecx, [esi+8] |
mov [eax+SLOT_BASE+APPDATA.io_map+4], ecx |
.nov86: |
;destroy per-thread kernel objects |
mov esi, [.slot] |
shl esi, 8 |
add esi, SLOT_BASE+APP_OBJ_OFFSET |
467,11 → 470,6 |
pop esi |
jmp @B |
@@: |
mov eax, [.slot] |
shl eax, 8 |
stdcall destroy_app_space, [SLOT_BASE+eax+APPDATA.dir_table], [SLOT_BASE+eax+APPDATA.dlls_list_ptr] |
mov esi, [.slot] |
cmp [fpu_owner], esi ; if user fpu last -> fpu user = 2 |
jne @F |
630,6 → 628,9 |
je @F |
call free_page |
@@: |
lea ebx, [edi+APPDATA.list] |
list_del ebx ;destroys edx, ecx |
mov eax, 0x20202020 |
stosd |
stosd |
745,7 → 746,17 |
add ecx, 0x100 |
jmp .xd0 |
.xd1: |
; call systest |
;release slot |
bts [thr_slot_map], esi |
mov ecx, [.process] |
lea eax, [ecx+PROC.thr_list] |
cmp eax, [eax+LHEAD.next] |
jne @F |
call destroy_process.internal |
@@: |
sti ; .. and life goes on |
mov eax, [draw_limits.left] |
760,19 → 771,11 |
call unlock_application_table |
;mov esi,process_terminated |
;call sys_msg_board_str |
add esp, 4 |
add esp, 8 |
ret |
restore .slot |
restore .process |
;build_scheduler: |
; mov esi, boot_sched_1 |
; call boot_log |
; call build_process_gdt_tss_pointer |
; mov esi,boot_sched_2 |
; call boot_log |
; ret |
; Three following procedures are used to guarantee that |
; some part of kernel code will not be terminated from outside |
; while it is running. |
/kernel/branches/Kolibri-acpi/core/taskman.inc |
---|
70,7 → 70,7 |
filename rd 256 ;1024/4 |
flags dd ? |
save_cr3 dd ? |
save_proc dd ? |
slot dd ? |
slot_base dd ? |
file_base dd ? |
215,7 → 215,7 |
call lock_application_table |
call get_new_process_place |
call alloc_thread_slot |
test eax, eax |
mov esi, -0x20 ; too many processes |
jz .err |
248,19 → 248,24 |
loop .copy_process_name_loop |
.copy_process_name_done: |
mov ebx, cr3 |
mov [save_cr3], ebx |
mov ebx, [current_process] |
mov [save_proc], ebx |
stdcall create_app_space, [hdr_mem], [file_base], [file_size] |
stdcall create_process, [hdr_mem], [file_base], [file_size] |
mov esi, -30; no memory |
test eax, eax |
jz .failed |
mov ebx, [hdr_mem] |
mov [eax+PROC.mem_used], ebx |
mov ebx, [slot_base] |
mov [ebx+APPDATA.dir_table], eax |
mov eax, [hdr_mem] |
mov [ebx+APPDATA.mem_size], eax |
mov [ebx+APPDATA.process], eax |
lea edx, [ebx+APPDATA.list] |
lea ecx, [eax+PROC.thr_list] |
list_add_tail edx, ecx |
xor edx, edx |
cmp word [6], '02' |
jne @f |
292,7 → 297,7 |
lea ecx, [filename] |
stdcall set_app_params , [slot], eax, ebx, ecx, [flags] |
mov eax, [save_cr3] |
mov eax, [save_proc] |
call set_cr3 |
mov eax, [process_number];set result |
301,7 → 306,7 |
jmp .final |
.failed: |
mov eax, [save_cr3] |
mov eax, [save_proc] |
call set_cr3 |
.err: |
.err_hdr: |
385,53 → 390,55 |
ret |
align 4 |
proc get_new_process_place |
alloc_thread_slot: |
;input: |
; none |
;result: |
; eax=[new_process_place]<>0 - ok |
; eax=[new_thread_slot]<>0 - ok |
; 0 - failed. |
;This function find least empty slot. |
;It doesn't increase [TASK_COUNT]! |
mov eax, CURRENT_TASK |
mov ebx, [TASK_COUNT] |
inc ebx |
shl ebx, 5 |
add ebx, eax ;ebx - address of process information for (last+1) slot |
.newprocessplace: |
;eax = address of process information for current slot |
cmp eax, ebx |
jz .endnewprocessplace ;empty slot after high boundary |
add eax, 0x20 |
cmp word [eax+0xa], 9;check process state, 9 means that process slot is empty |
jnz .newprocessplace |
.endnewprocessplace: |
mov ebx, eax |
sub eax, CURRENT_TASK |
shr eax, 5 ;calculate slot index |
cmp eax, 256 |
jge .failed ;it should be <256 |
mov word [ebx+0xa], 9;set process state to 9 (for slot after hight boundary) |
ret |
.failed: |
mov edx, thr_slot_map |
pushfd |
cli |
.l1: |
bsf eax, [edx] |
jnz .found |
add edx, 4 |
cmp edx, thr_slot_map+32 |
jb .l1 |
popfd |
xor eax, eax |
ret |
endp |
.found: |
btr [edx], eax |
sub edx, thr_slot_map |
lea eax, [eax+edx*8] |
popfd |
ret |
align 4 |
proc create_app_space stdcall, app_size:dword,img_base:dword,img_size:dword |
proc create_process stdcall, app_size:dword,img_base:dword,img_size:dword |
locals |
app_pages dd ? |
img_pages dd ? |
dir_addr dd ? |
process dd ? |
app_tabs dd ? |
endl |
push ebx |
push esi |
push edi |
mov ecx, pg_data.mutex |
call mutex_lock |
xor eax, eax |
mov [dir_addr], eax |
mov [process], eax |
mov eax, [app_size] |
add eax, 4095 |
454,39 → 461,59 |
shr ecx, 12 |
mov [img_pages], ecx |
if GREEDY_KERNEL |
lea eax, [ecx+ebx+2];only image size |
else |
lea eax, [eax+ebx+2];all requested memory |
end if |
cmp eax, [pg_data.pages_free] |
ja .fail |
call alloc_page |
stdcall kernel_alloc, 0x2000 |
test eax, eax |
jz .fail |
mov [dir_addr], eax |
stdcall map_page, [tmp_task_pdir], eax, dword PG_SW |
mov [process], eax |
mov edi, [tmp_task_pdir] |
mov ecx, (OS_BASE shr 20)/4 |
lea edi, [eax+PROC.heap_lock] |
mov ecx, (PROC.ht_next-PROC.heap_lock)/4 |
list_init eax |
add eax, PROC.thr_list |
list_init eax |
xor eax, eax |
cld |
rep stosd |
mov ecx, (PROC.pdt_0 - PROC.htab)/4 |
@@: |
stosd |
inc eax |
cmp eax, ecx |
jbe @B |
mov [edi-4096+PROC.ht_next], 1 ;reserve handle 0 |
mov eax, edi |
call get_pg_addr |
mov [edi-4096+PROC.pdt_0_phys], eax |
mov ecx, (OS_BASE shr 20)/4 |
mov esi, sys_pgdir+(OS_BASE shr 20) |
xor eax, eax |
rep stosd |
mov ecx, (OS_BASE shr 20)/4 |
mov esi, sys_proc+PROC.pdt_0+(OS_BASE shr 20) |
rep movsd |
mov eax, [dir_addr] |
mov eax, [edi-8192+PROC.pdt_0_phys] |
or eax, PG_SW |
mov [edi-4096+(page_tabs shr 20)], eax |
and eax, -4096 |
lea eax, [edi-8192] |
call set_cr3 |
mov edx, [app_tabs] |
mov edi, new_app_base |
mov ecx, [app_tabs] |
test ecx, ecx |
jz .done |
xor edi, edi |
@@: |
call alloc_page |
test eax, eax |
494,12 → 521,9 |
stdcall map_page_table, edi, eax |
add edi, 0x00400000 |
dec edx |
jnz @B |
loop @B |
mov edi, new_app_base |
shr edi, 10 |
add edi, page_tabs |
mov edi, page_tabs |
mov ecx, [app_tabs] |
shl ecx, 10 |
506,15 → 530,17 |
xor eax, eax |
rep stosd |
xor edx, edx |
mov ecx, [img_pages] |
jcxz .bss |
sub [app_pages], ecx |
mov ebx, PG_UW |
mov edx, new_app_base |
mov esi, [img_base] |
mov edi, new_app_base |
shr esi, 10 |
shr edi, 10 |
add esi, page_tabs |
add edi, page_tabs |
mov edi, page_tabs |
.remap: |
lodsd |
and eax, 0xFFFFF000 |
521,20 → 547,13 |
or eax, ebx; force user level r/w access |
stosd |
add edx, 0x1000 |
dec [app_pages] |
dec ecx |
jnz .remap |
mov ecx, [app_pages] |
test ecx, ecx |
loop .remap |
.bss: |
mov ebx, [app_pages] |
test ebx, ebx |
jz .done |
if GREEDY_KERNEL |
mov eax, 0x02 |
rep stosd |
else |
.alloc: |
.map_bss: |
call alloc_page |
test eax, eax |
jz .fail |
541,34 → 560,42 |
stdcall map_page, edx, eax, dword PG_UW |
add edx, 0x1000 |
dec [app_pages] |
jnz .alloc |
end if |
dec ebx |
jnz .map_bss |
.done: |
stdcall map_page, [tmp_task_pdir], dword 0, dword PG_UNMAP |
mov ecx, pg_data.mutex |
call mutex_unlock |
mov eax, [dir_addr] |
mov eax, [process] |
pop edi |
pop esi |
pop ebx |
ret |
.fail: |
mov ecx, pg_data.mutex |
call mutex_unlock |
cmp [dir_addr], 0 |
cmp [process], 0 |
je @f |
stdcall destroy_app_space, [dir_addr], 0 |
;; stdcall destroy_app_space, [dir_addr], 0 |
@@: |
xor eax, eax |
pop edi |
pop esi |
pop ebx |
ret |
endp |
align 4 |
set_cr3: |
pushfd |
cli |
mov ebx, [current_slot] |
mov [ebx+APPDATA.dir_table], eax |
mov [current_process], eax |
mov [ebx+APPDATA.process], eax |
mov eax, [eax+PROC.pdt_0_phys] |
mov cr3, eax |
popfd |
ret |
align 4 |
582,6 → 609,8 |
mov eax, [esi] |
test eax, 1 |
jz .next |
test eax, 2 |
jz .next |
test eax, 1 shl 9 |
jnz .next ;skip shared pages |
call free_page |
594,46 → 623,25 |
endp |
align 4 |
proc destroy_app_space stdcall, pg_dir:dword, dlls_list:dword |
destroy_process: ;fastcall ecx= ptr to process |
xor edx, edx |
push edx |
mov eax, 0x1 |
mov ebx, [pg_dir] |
.loop: |
;eax = current slot of process |
mov ecx, eax |
shl ecx, 5 |
cmp byte [CURRENT_TASK+ecx+0xa], 9;if process running? |
jz @f ;skip empty slots |
shl ecx, 3 |
add ecx, SLOT_BASE |
cmp [ecx+APPDATA.dir_table], ebx;compare page directory addresses |
jnz @f |
mov [ebp-4], ecx |
inc edx ;thread found |
@@: |
inc eax |
cmp eax, [TASK_COUNT] ;exit loop if we look through all processes |
jle .loop |
lea eax, [ecx+PROC.thr_list] |
cmp eax, [eax+LHEAD.next] |
jne .exit |
;edx = number of threads |
;our process is zombi so it isn't counted |
pop ecx |
cmp edx, 1 |
jg .ret |
;if there isn't threads then clear memory. |
mov esi, [dlls_list] |
call destroy_all_hdlls;ecx=APPDATA |
align 4 |
.internal: |
push ecx |
mov ecx, pg_data.mutex |
call mutex_lock |
mov esi, [ecx+PROC.dlls_list_ptr] |
call destroy_all_hdlls |
mov eax, [pg_dir] |
and eax, not 0xFFF |
stdcall map_page, [tmp_task_pdir], eax, PG_SW |
mov esi, [tmp_task_pdir] |
mov edi, (OS_BASE shr 20)/4 |
; mov ecx, pg_data.mutex |
; call mutex_lock |
mov esi, [esp] |
add esi, PROC.pdt_0 |
mov edi, (0x80000000 shr 20)/4 |
.destroy: |
mov eax, [esi] |
test eax, 1 |
648,16 → 656,13 |
dec edi |
jnz .destroy |
mov eax, [pg_dir] |
call free_page |
call kernel_free ;ecx still in stack |
stdcall map_page, [tmp_task_ptab], 0, PG_UNMAP |
; mov ecx, pg_data.mutex |
; call mutex_unlock |
.exit: |
stdcall map_page, [tmp_task_ptab], 0, PG_UNMAP |
stdcall map_page, [tmp_task_pdir], 0, PG_UNMAP |
mov ecx, pg_data.mutex |
call mutex_unlock |
.ret: |
ret |
endp |
align 4 |
get_pid: |
708,6 → 713,10 |
;result: |
; eax = 1 region lays in app memory |
; eax = 0 region don't lays in app memory |
mov eax, 1 |
ret |
if 0 |
mov eax, [CURRENT_TASK] |
; jmp check_process_region |
;----------------------------------------------------------------------------- |
732,57 → 741,13 |
mov eax, 1 |
ret |
; call MEM_Get_Linear_Address |
; push ebx |
; push ecx |
; push edx |
; mov edx,ebx |
; and edx,not (4096-1) |
; sub ebx,edx |
; add ecx,ebx |
; mov ebx,edx |
; add ecx,(4096-1) |
; and ecx,not (4096-1) |
;.loop: |
;;eax - linear address of page directory |
;;ebx - current page |
;;ecx - current size |
; mov edx,ebx |
; shr edx,22 |
; mov edx,[eax+4*edx] |
; and edx,not (4096-1) |
; test edx,edx |
; jz .failed1 |
; push eax |
; mov eax,edx |
; call MEM_Get_Linear_Address |
; mov edx,ebx |
; shr edx,12 |
; and edx,(1024-1) |
; mov eax,[eax+4*edx] |
; and eax,not (4096-1) |
; test eax,eax |
; pop eax |
; jz .failed1 |
; add ebx,4096 |
; sub ecx,4096 |
; jg .loop |
; pop edx |
; pop ecx |
; pop ebx |
.ok: |
mov eax, 1 |
ret |
; |
;.failed1: |
; pop edx |
; pop ecx |
; pop ebx |
.failed: |
xor eax, eax |
ret |
end if |
align 4 |
proc read_process_memory |
954,7 → 919,7 |
call lock_application_table |
call get_new_process_place |
call alloc_thread_slot |
test eax, eax |
jz .failed |
976,21 → 941,13 |
mov ecx, 11 |
rep movsb ;copy process name |
mov eax, [ebx+APPDATA.heap_base] |
mov [edx+APPDATA.heap_base], eax |
mov eax, [ebx+APPDATA.process] |
mov [edx+APPDATA.process], eax |
mov ecx, [ebx+APPDATA.heap_top] |
mov [edx+APPDATA.heap_top], ecx |
lea ebx, [edx+APPDATA.list] |
lea ecx, [eax+PROC.thr_list] |
list_add_tail ebx, ecx ;add thread to process child's list |
mov eax, [ebx+APPDATA.mem_size] |
mov [edx+APPDATA.mem_size], eax |
mov ecx, [ebx+APPDATA.dir_table] |
mov [edx+APPDATA.dir_table], ecx;copy page directory |
mov eax, [ebx+APPDATA.dlls_list_ptr] |
mov [edx+APPDATA.dlls_list_ptr], eax |
mov eax, [ebx+APPDATA.tls_base] |
test eax, eax |
jz @F |
1118,8 → 1075,8 |
add eax, 256 |
jc @f |
cmp eax, [SLOT_BASE+APPDATA.mem_size+ebx*8] |
ja @f |
; cmp eax, [SLOT_BASE+APPDATA.mem_size+ebx*8] |
; ja @f |
mov eax, [cmd_line] |
1158,8 → 1115,8 |
mov eax, edx |
add eax, 1024 |
jc @f |
cmp eax, [SLOT_BASE+APPDATA.mem_size+ebx*8] |
ja @f |
; cmp eax, [SLOT_BASE+APPDATA.mem_size+ebx*8] |
; ja @f |
stdcall strncpy, edx, [app_path], 1024 |
@@: |
mov ebx, [slot] |
1188,9 → 1145,9 |
xor eax, eax |
mov [ecx+0], dword eax |
mov [ecx+4], dword eax |
mov eax, [_display.width] |
mov eax, [Screen_Max_X] |
mov [ecx+8], eax |
mov eax, [_display.height] |
mov eax, [Screen_Max_Y] |
mov [ecx+12], eax |
mov ebx, [pl0_stack] |
/kernel/branches/Kolibri-acpi/core/v86.inc |
---|
14,9 → 14,7 |
struct V86_machine |
; page directory |
pagedir dd ? |
; translation table: V86 address -> flat linear address |
pages dd ? |
process dd ? |
; mutex to protect all data from writing by multiple threads at one time |
mutex dd ? |
; i/o permission map |
38,91 → 36,87 |
and dword [eax+V86_machine.mutex], 0 |
; allocate tables |
mov ebx, eax |
; We allocate 4 pages. |
; First is main page directory for V86 mode. |
; Second page: |
; first half (0x800 bytes) is page table for addresses 0 - 0x100000, |
; second half is for V86-to-linear translation. |
; Third and fourth are for I/O permission map. |
push 8000h ; blocks less than 8 pages are discontinuous |
stdcall create_process, 4096, eax, 4096 ;FIXME |
test eax, eax |
jz .fail2 |
mov [eax+PROC.mem_used], 4096 |
mov [ebx+V86_machine.process], eax |
push 2000h |
call kernel_alloc |
test eax, eax |
jz .fail2 |
mov [ebx+V86_machine.pagedir], eax |
push edi eax |
mov [ebx+V86_machine.iopm], eax |
; initialize tables |
push edi |
mov edi, eax |
add eax, 1800h |
mov [ebx+V86_machine.pages], eax |
; initialize tables |
mov eax, -1 |
mov ecx, 2000h/4 |
xor eax, eax |
rep stosd |
mov [ebx+V86_machine.iopm], edi |
dec eax |
mov ecx, 2000h/4 |
rep stosd |
pop eax |
; page directory: first entry is page table... |
mov edi, eax |
add eax, 1000h |
push eax |
call get_pg_addr |
or al, PG_UW |
stosd |
; ...and also copy system page tables |
; thx to Serge, system is located at high addresses |
add edi, (OS_BASE shr 20) - 4 |
push esi |
mov esi, (OS_BASE shr 20) + sys_pgdir |
mov ecx, 0x80000000 shr 22 |
rep movsd |
mov eax, [ebx+V86_machine.pagedir] ;root dir also is |
call get_pg_addr ;used as page table |
or al, PG_SW |
mov [edi-4096+(page_tabs shr 20)], eax |
mov eax, [ebx+V86_machine.process] |
mov eax, [eax+PROC.pdt_0_phys] |
pop esi |
pushfd |
cli |
mov cr3, eax |
; now V86 specific: initialize known addresses in first Mb |
pop eax |
; first page - BIOS data (shared between all machines!) |
; physical address = 0 |
; linear address = OS_BASE |
mov dword [eax], 111b |
mov dword [eax+800h], OS_BASE |
; page before 0xA0000 - Extended BIOS Data Area (shared between all machines!) |
; physical address = 0x9C000 |
; linear address = 0x8009C000 |
; (I have seen one computer with EBDA segment = 0x9D80, |
; all other computers use less memory) |
mov ecx, 4 |
mov edx, 0x9C000 |
push eax |
lea edi, [eax+0x9C*4] |
mov eax, PG_UW |
mov [page_tabs], eax |
invlpg [eax] |
mov byte [0x500], 0xCD |
mov byte [0x501], 0x13 |
mov byte [0x502], 0xF4 |
mov byte [0x503], 0xCD |
mov byte [0x504], 0x10 |
mov byte [0x505], 0xF4 |
mov eax, 0x99000+PG_UW |
mov edi, page_tabs+0x99*4 |
mov edx, 0x1000 |
mov ecx, 7 |
@@: |
lea eax, [edx + OS_BASE] |
mov [edi+800h], eax |
lea eax, [edx + 111b] |
stosd |
add edx, 0x1000 |
add eax, edx |
loop @b |
pop eax |
pop edi |
; addresses 0xC0000 - 0xFFFFF - BIOS code (shared between all machines!) |
; physical address = 0xC0000 |
; linear address = 0x800C0000 |
mov ecx, 0xC0 |
mov eax, 0xC0000+PG_UW |
mov edi, page_tabs+0xC0*4 |
mov ecx, 64 |
@@: |
mov edx, ecx |
shl edx, 12 |
push edx |
or edx, 111b |
mov [eax+ecx*4], edx |
pop edx |
add edx, OS_BASE |
mov [eax+ecx*4+0x800], edx |
inc cl |
jnz @b |
stosd |
add eax, edx |
loop @b |
mov eax, sys_proc |
push ebx |
call set_cr3 |
pop ebx |
popfd |
pop edi |
mov eax, ebx |
ret |
.fail2: |
132,15 → 126,16 |
xor eax, eax |
ret |
;not used |
; Destroy V86 machine |
; in: eax = handle |
; out: nothing |
; destroys: eax, ebx, ecx, edx (due to free) |
v86_destroy: |
push eax |
stdcall kernel_free, [eax+V86_machine.pagedir] |
pop eax |
jmp free |
;v86_destroy: |
; push eax |
; stdcall kernel_free, [eax+V86_machine.pagedir] |
; pop eax |
; jmp free |
; Translate V86-address to linear address |
; in: eax=V86 address |
150,13 → 145,15 |
v86_get_lin_addr: |
push ecx edx |
mov ecx, eax |
mov edx, [esi+V86_machine.pages] |
shr ecx, 12 |
mov edx, [page_tabs+ecx*4] |
and eax, 0xFFF |
add eax, [edx+ecx*4] ; atomic operation, no mutex needed |
and edx, 0xFFFFF000 |
or eax, edx |
pop edx ecx |
ret |
;not used |
; Sets linear address for V86-page |
; in: eax=linear address (must be page-aligned) |
; ecx=V86 page (NOT address!) |
163,15 → 160,15 |
; esi=handle |
; out: nothing |
; destroys: nothing |
v86_set_page: |
push eax ebx |
mov ebx, [esi+V86_machine.pagedir] |
mov [ebx+ecx*4+0x1800], eax |
call get_pg_addr |
or al, 111b |
mov [ebx+ecx*4+0x1000], eax |
pop ebx eax |
ret |
;v86_set_page: |
; push eax ebx |
; mov ebx, [esi+V86_machine.pagedir] |
; mov [ebx+ecx*4+0x1800], eax |
; call get_pg_addr |
; or al, 111b |
; mov [ebx+ecx*4+0x1000], eax |
; pop ebx eax |
; ret |
; Allocate memory in V86 machine |
; in: eax=size (in bytes) |
214,21 → 211,7 |
mov [sys_v86_machine], eax |
test eax, eax |
jz .ret |
mov byte [OS_BASE + 0x500], 0xCD |
mov byte [OS_BASE + 0x501], 0x13 |
mov byte [OS_BASE + 0x502], 0xF4 |
mov byte [OS_BASE + 0x503], 0xCD |
mov byte [OS_BASE + 0x504], 0x10 |
mov byte [OS_BASE + 0x505], 0xF4 |
mov esi, eax |
mov ebx, [eax+V86_machine.pagedir] |
; one page for stack, two pages for results (0x2000 bytes = 16 sectors) |
mov dword [ebx+0x99*4+0x1000], 0x99000 or 111b |
mov dword [ebx+0x99*4+0x1800], OS_BASE + 0x99000 |
mov dword [ebx+0x9A*4+0x1000], 0x9A000 or 111b |
mov dword [ebx+0x9A*4+0x1800], OS_BASE + 0x9A000 |
mov dword [ebx+0x9B*4+0x1000], 0x9B000 or 111b |
mov dword [ebx+0x9B*4+0x1800], OS_BASE + 0x9B000 |
if ~DEBUG_SHOW_IO |
; allow access to all ports |
mov ecx, [esi+V86_machine.iopm] |
272,37 → 255,39 |
; eax = 3 - IRQ is already hooked by another VM |
; destroys: nothing |
v86_start: |
pushad |
cli |
mov ecx, [CURRENT_TASK] |
shl ecx, 8 |
add ecx, SLOT_BASE |
mov ecx, [current_slot] |
push dword [ecx+APPDATA.io_map] |
push dword [ecx+APPDATA.io_map+4] |
push [ecx+APPDATA.process] |
push [ecx+APPDATA.saved_esp0] |
mov [ecx+APPDATA.saved_esp0], esp |
mov [tss._esp0], esp |
mov eax, [esi+V86_machine.iopm] |
call get_pg_addr |
inc eax |
push dword [ecx+APPDATA.io_map] |
push dword [ecx+APPDATA.io_map+4] |
mov dword [ecx+APPDATA.io_map], eax |
mov dword [page_tabs + (tss._io_map_0 shr 10)], eax |
mov eax, [esi+V86_machine.iopm] |
add eax, 0x1000 |
call get_pg_addr |
inc eax |
mov dword [ecx+APPDATA.io_map+4], eax |
mov dword [page_tabs + (tss._io_map_1 shr 10)], eax |
push [ecx+APPDATA.dir_table] |
push [ecx+APPDATA.saved_esp0] |
mov [ecx+APPDATA.saved_esp0], esp |
mov [tss._esp0], esp |
mov eax, [esi+V86_machine.pagedir] |
call get_pg_addr |
mov [ecx+APPDATA.dir_table], eax |
mov eax, [esi+V86_machine.process] |
mov [ecx+APPDATA.process], eax |
mov [current_process], eax |
mov eax, [eax+PROC.pdt_0_phys] |
mov cr3, eax |
; mov [irq_tab+5*4], my05 |
; We do not enable interrupts, because V86 IRQ redirector assumes that |
; machine is running |
; They will be enabled by IRET. |
782,19 → 767,21 |
mov esp, esi |
cli |
mov ecx, [CURRENT_TASK] |
shl ecx, 8 |
mov ecx, [current_slot] |
pop eax |
mov [SLOT_BASE+ecx+APPDATA.saved_esp0], eax |
mov [ecx+APPDATA.saved_esp0], eax |
mov [tss._esp0], eax |
pop eax |
mov [SLOT_BASE+ecx+APPDATA.dir_table], eax |
mov [ecx+APPDATA.process], eax |
mov [current_process], eax |
pop ebx |
mov dword [SLOT_BASE+ecx+APPDATA.io_map+4], ebx |
mov dword [ecx+APPDATA.io_map+4], ebx |
mov dword [page_tabs + (tss._io_map_1 shr 10)], ebx |
pop ebx |
mov dword [SLOT_BASE+ecx+APPDATA.io_map], ebx |
mov dword [ecx+APPDATA.io_map], ebx |
mov dword [page_tabs + (tss._io_map_0 shr 10)], ebx |
mov eax, [eax+PROC.pdt_0_phys] |
mov cr3, eax |
sti |
843,11 → 830,10 |
pop eax |
v86_irq2: |
mov esi, [v86_irqhooks+edi*8] ; get VM handle |
mov eax, [esi+V86_machine.pagedir] |
call get_pg_addr |
mov eax, [esi+V86_machine.process] |
mov ecx, [CURRENT_TASK] |
shl ecx, 8 |
cmp [SLOT_BASE+ecx+APPDATA.dir_table], eax |
cmp [SLOT_BASE+ecx+APPDATA.process], eax |
jnz .notcurrent |
lea eax, [edi+8] |
cmp al, 10h |
860,7 → 846,7 |
mov ebx, SLOT_BASE + 0x100 |
mov ecx, [TASK_COUNT] |
.scan: |
cmp [ebx+APPDATA.dir_table], eax |
cmp [ebx+APPDATA.process], eax |
jnz .cont |
push ecx |
mov ecx, [ebx+APPDATA.saved_esp0] |
895,6 → 881,7 |
popad |
iretd |
.found: |
mov eax, [eax+PROC.pdt_0_phys] |
mov cr3, eax |
mov esi, [ebx+APPDATA.saved_esp0] |
sub word [esi-sizeof.v86_regs+v86_regs.esp], 6 |