Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6527 → Rev 6521

/contrib/toolchain/gcc/5x/libgcc/Makefile
29,7 → 29,7
 
DECNUMINC = -Iconfig/libbid -DENABLE_DECIMAL_BID_FORMAT
 
INCLUDES = -I. -I../gcc -I../include -I$(SDK_DIR)/sources/newlib/libc/include $(DECNUMINC)
INCLUDES = -I. -I../gcc -I../include $(DECNUMINC)
 
gcc_compile = $(CC) $(INCLUDES) $(CFLAGS)
 
66,10 → 66,7
LIB2ADDEH = unwind-dw2.c unwind-dw2-fde.c unwind-sjlj.c unwind-c.c
LIB2ADDEHSTATIC = $(LIB2ADDEH)
 
LIB2ADD = config/i386/gthr-kos32.c \
config/i386/kos32-app.c \
config/i386/libc-loader.c
 
LIB2ADD = config/i386/gthr-kos32.c
EH_MODEL = dw2
CUSTOM_CRTSTUFF = yes
 
100,15 → 97,13
 
$(gcc_compile) -mfxsr -msse -c $<
 
LIB1ASMSRC = i386/start.S
LIB1ASMFUNCS = _chkstk _chkstk_ms _start
LIB1ASMSRC = i386/chkstk.S
LIB1ASMFUNCS = _chkstk _chkstk_ms
 
DFP_ENABLE = true
 
LIB2ADD += config/i386/cpuinfo.c
LIB2ADD += config/i386/sfp-exceptions.c
 
 
softfp_float_modes := tf
softfp_int_modes := si di ti
 
/contrib/toolchain/gcc/5x/libgcc/config/i386/libc-loader.c
File deleted
/contrib/toolchain/gcc/5x/libgcc/config/i386/kos32-app.c
File deleted
/contrib/toolchain/gcc/5x/libgcc/config/i386/pe.h
File deleted
/contrib/toolchain/gcc/5x/libgcc/config/i386/start.S
File deleted
/contrib/toolchain/gcc/5x/libgcc/config/i386/list.h
File deleted
/contrib/toolchain/gcc/5x/libgcc/config/i386/gthr-kos32.c
20,7 → 20,6
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
#include <kos32sys.h>
#include "gthr-kos32.h"
 
#define FUTEX_INIT 0
28,6 → 27,11
#define FUTEX_WAIT 2
#define FUTEX_WAKE 3
 
unsigned int tls_alloc(void);
int tls_free(unsigned int key);
void *tls_get(unsigned int key);
void *tls_set(unsigned int key, void *val);
 
#define exchange_acquire(ptr, new) \
__atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
 
34,6 → 38,16
#define exchange_release(ptr, new) \
__atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
 
 
static inline void yield(void)
{
__asm__ __volatile__(
"int $0x40"
::"a"(68), "b"(1));
};
 
 
 
int __gthr_kos32_once (__gthread_once_t *once, void (*func) (void))
{
if (once == NULL || func == NULL)
81,12 → 95,15
 
void* __gthr_kos32_getspecific (__gthread_key_t key)
{
return tls_get(key);
void *ptr;
ptr = tls_get(key);
return ptr;
}
 
int __gthr_kos32_setspecific (__gthread_key_t key, const void *ptr)
{
return tls_set(key, CONST_CAST2(void *, const void *, ptr));
tls_set(key, CONST_CAST2(void *, const void *, ptr));
return 0;
}
 
void __gthr_kos32_mutex_init_function (__gthread_mutex_t *mutex)
134,7 → 151,7
{
int zero = 0;
 
return !__atomic_compare_exchange_4(&mutex->lock, &zero, 1,0,__ATOMIC_ACQUIRE,__ATOMIC_RELAXED);
return __atomic_compare_exchange_4(&mutex->lock, &zero, 1,0,__ATOMIC_ACQUIRE,__ATOMIC_RELAXED);
}
 
int __gthr_kos32_mutex_unlock (__gthread_mutex_t *mutex)
156,57 → 173,47
 
void __gthr_kos32_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
{
int handle;
 
mutex->lock = 0;
 
__asm__ volatile(
"int $0x40\t"
:"=a"(handle)
:"a"(77),"b"(FUTEX_INIT),"c"(mutex));
mutex->handle = handle;
 
// mutex->counter = -1;
mutex->depth = 0;
mutex->owner = 0;
// mutex->sema = CreateSemaphoreW (NULL, 0, 65535, NULL);
}
 
int __gthr_kos32_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
#if 0
int
__gthr_win32_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
int tmp;
 
unsigned long me = (unsigned long)tls_get(TLS_KEY_LOW_STACK);
 
if( __sync_fetch_and_add(&mutex->lock, 1) == 0)
DWORD me = GetCurrentThreadId();
if (InterlockedIncrement (&mutex->counter) == 0)
{
mutex->depth = 1;
mutex->owner = me;
return 0;
}
else if (mutex->owner == me)
{
__sync_fetch_and_sub(&mutex->lock, 1);
InterlockedDecrement (&mutex->counter);
++(mutex->depth);
}
else while (exchange_acquire (&mutex->lock, 2) != 0)
else if (WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
{
__asm__ volatile(
"int $0x40\t\n"
:"=a"(tmp)
:"a"(77),"b"(FUTEX_WAIT),
"c"(mutex->handle),"d"(2),"S"(0));
mutex->depth = 1;
mutex->owner = me;
};
 
}
else
{
/* WaitForSingleObject returns WAIT_FAILED, and we can only do
some best-effort cleanup here. */
InterlockedDecrement (&mutex->counter);
return 1;
}
return 0;
}
 
int __gthr_kos32_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
int
__gthr_win32_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
unsigned long me = (unsigned long)tls_get(TLS_KEY_LOW_STACK);
int zero = 0;
 
if(__atomic_compare_exchange_4(&mutex->lock, &zero, 1,0,__ATOMIC_ACQUIRE,__ATOMIC_RELAXED))
DWORD me = GetCurrentThreadId();
if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
{
mutex->depth = 1;
mutex->owner = me;
219,39 → 226,26
return 0;
}
 
int __gthr_kos32_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
int
__gthr_win32_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
--(mutex->depth);
 
if (mutex->depth == 0)
{
int prev;
mutex->owner = 0;
 
prev = exchange_release (&mutex->lock, 0);
if (InterlockedDecrement (&mutex->counter) >= 0)
return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
}
 
if (prev != 1)
{
__asm__ volatile(
"int $0x40\t"
:"=a"(prev)
:"a"(77),"b"(FUTEX_WAKE),
"c"(mutex->handle),"d"(1));
};
mutex->owner = 0;
};
 
return 0;
}
 
int __gthr_kos32_recursive_mutex_destroy (__gthread_recursive_mutex_t *mutex)
int
__gthr_win32_recursive_mutex_destroy (__gthread_recursive_mutex_t *mutex)
{
int retval;
 
__asm__ volatile(
"int $0x40\t"
:"=a"(retval)
:"a"(77),"b"(FUTEX_DESTROY),"c"(mutex->handle));
 
CloseHandle ((HANDLE) mutex->sema);
return 0;
}
 
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/chkstk.S
0,0 → 1,60
 
.section .text
 
#ifdef L_chkstk
.global ___chkstk
.global __alloca
 
___chkstk:
__alloca:
pushl %ecx /* save temp */
leal 8(%esp), %ecx /* point past return addr */
subl %eax, %ecx
cmpl %fs:8, %ecx # check low stack limit
jb 1f
 
movl %esp, %eax /* save old stack pointer */
movl %ecx, %esp /* decrement stack */
movl (%eax), %ecx /* recover saved temp */
movl 4(%eax), %eax /* recover return address */
 
/* Push the return value back. Doing this instead of just
jumping to %eax preserves the cached call-return stack
used by most modern processors. */
pushl %eax
ret
1:
int3 #trap to debugger
.ascii "Stack overflow"
#endif
 
#ifdef L_chkstk_ms
.global ___chkstk_ms
 
___chkstk_ms:
pushl %ecx /* save temp */
pushl %eax
cmpl $0x1000, %eax /* > 4k ?*/
leal 12(%esp), %ecx /* point past return addr */
jb 2f
1:
subl $0x1000, %ecx /* yes, move pointer down 4k*/
cmpl %fs:8, %ecx /* check low stack limit */
jb 3f
 
orl $0x0, (%ecx) /* probe there */
subl $0x1000, %eax /* decrement count */
cmpl $0x1000, %eax
ja 1b /* and do it again */
 
2:
subl %eax, %ecx
orl $0x0, (%ecx) /* less than 4k, just peek here */
 
popl %eax
popl %ecx
ret
3:
int3 #trap to debugger
.ascii "Stack overflow"
#endif