Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
6429 siemargl 1
/* TCC runtime library.
2
   Parts of this code are (c) 2002 Fabrice Bellard
3
 
4
   Copyright (C) 1987, 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
5
 
6
This file is free software; you can redistribute it and/or modify it
7
under the terms of the GNU General Public License as published by the
8
Free Software Foundation; either version 2, or (at your option) any
9
later version.
10
 
11
In addition to the permissions in the GNU General Public License, the
12
Free Software Foundation gives you unlimited permission to link the
13
compiled version of this file into combinations with other programs,
14
and to distribute those combinations without any restriction coming
15
from the use of this file.  (The General Public License restrictions
16
do apply in other respects; for example, they cover modification of
17
the file, and distribution when not linked into a combine
18
executable.)
19
 
20
This file is distributed in the hope that it will be useful, but
21
WITHOUT ANY WARRANTY; without even the implied warranty of
22
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23
General Public License for more details.
24
 
25
You should have received a copy of the GNU General Public License
26
along with this program; see the file COPYING.  If not, write to
27
the Free Software Foundation, 59 Temple Place - Suite 330,
28
Boston, MA 02111-1307, USA.
29
*/
30
 
31
//#include 
32
 
33
#define W_TYPE_SIZE   32
34
#define BITS_PER_UNIT 8
35
 
36
typedef int Wtype;
37
typedef unsigned int UWtype;
38
typedef unsigned int USItype;
39
typedef long long DWtype;
40
typedef unsigned long long UDWtype;
41
 
42
struct DWstruct {
43
    Wtype low, high;
44
};
45
 
46
typedef union
47
{
48
  struct DWstruct s;
49
  DWtype ll;
50
} DWunion;
51
 
52
typedef long double XFtype;
53
#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
54
#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
55
 
56
/* the following deal with IEEE single-precision numbers */
57
#define EXCESS		126
58
#define SIGNBIT		0x80000000
59
#define HIDDEN		(1 << 23)
60
#define SIGN(fp)	((fp) & SIGNBIT)
61
#define EXP(fp)		(((fp) >> 23) & 0xFF)
62
#define MANT(fp)	(((fp) & 0x7FFFFF) | HIDDEN)
63
#define PACK(s,e,m)	((s) | ((e) << 23) | (m))
64
 
65
/* the following deal with IEEE double-precision numbers */
66
#define EXCESSD		1022
67
#define HIDDEND		(1 << 20)
68
#define EXPD(fp)	(((fp.l.upper) >> 20) & 0x7FF)
69
#define SIGND(fp)	((fp.l.upper) & SIGNBIT)
70
#define MANTD(fp)	(((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
71
				(fp.l.lower >> 22))
72
#define HIDDEND_LL	((long long)1 << 52)
73
#define MANTD_LL(fp)	((fp.ll & (HIDDEND_LL-1)) | HIDDEND_LL)
74
#define PACKD_LL(s,e,m)	(((long long)((s)+((e)<<20))<<32)|(m))
75
 
76
/* the following deal with x86 long double-precision numbers */
77
#define EXCESSLD	16382
78
#define EXPLD(fp)	(fp.l.upper & 0x7fff)
79
#define SIGNLD(fp)	((fp.l.upper) & 0x8000)
80
 
81
/* only for x86 */
82
union ldouble_long {
83
    long double ld;
84
    struct {
85
        unsigned long long lower;
86
        unsigned short upper;
87
    } l;
88
};
89
 
90
union double_long {
91
    double d;
92
#if 1
93
    struct {
94
        unsigned int lower;
95
        int upper;
96
    } l;
97
#else
98
    struct {
99
        int upper;
100
        unsigned int lower;
101
    } l;
102
#endif
103
    long long ll;
104
};
105
 
106
union float_long {
107
    float f;
108
    unsigned int l;
109
};
110
 
111
/* XXX: we don't support several builtin supports for now */
112
#if !defined(TCC_TARGET_X86_64) && !defined(TCC_TARGET_ARM)
113
 
114
/* XXX: use gcc/tcc intrinsic ? */
115
#if defined(TCC_TARGET_I386)
116
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
117
  __asm__ ("subl %5,%1\n\tsbbl %3,%0"					\
118
	   : "=r" ((USItype) (sh)),					\
119
	     "=&r" ((USItype) (sl))					\
120
	   : "0" ((USItype) (ah)),					\
121
	     "g" ((USItype) (bh)),					\
122
	     "1" ((USItype) (al)),					\
123
	     "g" ((USItype) (bl)))
124
#define umul_ppmm(w1, w0, u, v) \
125
  __asm__ ("mull %3"							\
126
	   : "=a" ((USItype) (w0)),					\
127
	     "=d" ((USItype) (w1))					\
128
	   : "%0" ((USItype) (u)),					\
129
	     "rm" ((USItype) (v)))
130
#define udiv_qrnnd(q, r, n1, n0, dv) \
131
  __asm__ ("divl %4"							\
132
	   : "=a" ((USItype) (q)),					\
133
	     "=d" ((USItype) (r))					\
134
	   : "0" ((USItype) (n0)),					\
135
	     "1" ((USItype) (n1)),					\
136
	     "rm" ((USItype) (dv)))
137
#define count_leading_zeros(count, x) \
138
  do {									\
139
    USItype __cbtmp;							\
140
    __asm__ ("bsrl %1,%0"						\
141
	     : "=r" (__cbtmp) : "rm" ((USItype) (x)));			\
142
    (count) = __cbtmp ^ 31;						\
143
  } while (0)
144
#else
145
#error unsupported CPU type
146
#endif
147
 
148
/* most of this code is taken from libgcc2.c from gcc */
149
 
150
static UDWtype __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
151
{
152
  DWunion ww;
153
  DWunion nn, dd;
154
  DWunion rr;
155
  UWtype d0, d1, n0, n1, n2;
156
  UWtype q0, q1;
157
  UWtype b, bm;
158
 
159
  nn.ll = n;
160
  dd.ll = d;
161
 
162
  d0 = dd.s.low;
163
  d1 = dd.s.high;
164
  n0 = nn.s.low;
165
  n1 = nn.s.high;
166
 
167
#if !defined(UDIV_NEEDS_NORMALIZATION)
168
  if (d1 == 0)
169
    {
170
      if (d0 > n1)
171
	{
172
	  /* 0q = nn / 0D */
173
 
174
	  udiv_qrnnd (q0, n0, n1, n0, d0);
175
	  q1 = 0;
176
 
177
	  /* Remainder in n0.  */
178
	}
179
      else
180
	{
181
	  /* qq = NN / 0d */
182
 
183
	  if (d0 == 0)
184
	    d0 = 1 / d0;	/* Divide intentionally by zero.  */
185
 
186
	  udiv_qrnnd (q1, n1, 0, n1, d0);
187
	  udiv_qrnnd (q0, n0, n1, n0, d0);
188
 
189
	  /* Remainder in n0.  */
190
	}
191
 
192
      if (rp != 0)
193
	{
194
	  rr.s.low = n0;
195
	  rr.s.high = 0;
196
	  *rp = rr.ll;
197
	}
198
    }
199
 
200
#else /* UDIV_NEEDS_NORMALIZATION */
201
 
202
  if (d1 == 0)
203
    {
204
      if (d0 > n1)
205
	{
206
	  /* 0q = nn / 0D */
207
 
208
	  count_leading_zeros (bm, d0);
209
 
210
	  if (bm != 0)
211
	    {
212
	      /* Normalize, i.e. make the most significant bit of the
213
		 denominator set.  */
214
 
215
	      d0 = d0 << bm;
216
	      n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
217
	      n0 = n0 << bm;
218
	    }
219
 
220
	  udiv_qrnnd (q0, n0, n1, n0, d0);
221
	  q1 = 0;
222
 
223
	  /* Remainder in n0 >> bm.  */
224
	}
225
      else
226
	{
227
	  /* qq = NN / 0d */
228
 
229
	  if (d0 == 0)
230
	    d0 = 1 / d0;	/* Divide intentionally by zero.  */
231
 
232
	  count_leading_zeros (bm, d0);
233
 
234
	  if (bm == 0)
235
	    {
236
	      /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
237
		 conclude (the most significant bit of n1 is set) /\ (the
238
		 leading quotient digit q1 = 1).
239
 
240
		 This special case is necessary, not an optimization.
241
		 (Shifts counts of W_TYPE_SIZE are undefined.)  */
242
 
243
	      n1 -= d0;
244
	      q1 = 1;
245
	    }
246
	  else
247
	    {
248
	      /* Normalize.  */
249
 
250
	      b = W_TYPE_SIZE - bm;
251
 
252
	      d0 = d0 << bm;
253
	      n2 = n1 >> b;
254
	      n1 = (n1 << bm) | (n0 >> b);
255
	      n0 = n0 << bm;
256
 
257
	      udiv_qrnnd (q1, n1, n2, n1, d0);
258
	    }
259
 
260
	  /* n1 != d0...  */
261
 
262
	  udiv_qrnnd (q0, n0, n1, n0, d0);
263
 
264
	  /* Remainder in n0 >> bm.  */
265
	}
266
 
267
      if (rp != 0)
268
	{
269
	  rr.s.low = n0 >> bm;
270
	  rr.s.high = 0;
271
	  *rp = rr.ll;
272
	}
273
    }
274
#endif /* UDIV_NEEDS_NORMALIZATION */
275
 
276
  else
277
    {
278
      if (d1 > n1)
279
	{
280
	  /* 00 = nn / DD */
281
 
282
	  q0 = 0;
283
	  q1 = 0;
284
 
285
	  /* Remainder in n1n0.  */
286
	  if (rp != 0)
287
	    {
288
	      rr.s.low = n0;
289
	      rr.s.high = n1;
290
	      *rp = rr.ll;
291
	    }
292
	}
293
      else
294
	{
295
	  /* 0q = NN / dd */
296
 
297
	  count_leading_zeros (bm, d1);
298
	  if (bm == 0)
299
	    {
300
	      /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
301
		 conclude (the most significant bit of n1 is set) /\ (the
302
		 quotient digit q0 = 0 or 1).
303
 
304
		 This special case is necessary, not an optimization.  */
305
 
306
	      /* The condition on the next line takes advantage of that
307
		 n1 >= d1 (true due to program flow).  */
308
	      if (n1 > d1 || n0 >= d0)
309
		{
310
		  q0 = 1;
311
		  sub_ddmmss (n1, n0, n1, n0, d1, d0);
312
		}
313
	      else
314
		q0 = 0;
315
 
316
	      q1 = 0;
317
 
318
	      if (rp != 0)
319
		{
320
		  rr.s.low = n0;
321
		  rr.s.high = n1;
322
		  *rp = rr.ll;
323
		}
324
	    }
325
	  else
326
	    {
327
	      UWtype m1, m0;
328
	      /* Normalize.  */
329
 
330
	      b = W_TYPE_SIZE - bm;
331
 
332
	      d1 = (d1 << bm) | (d0 >> b);
333
	      d0 = d0 << bm;
334
	      n2 = n1 >> b;
335
	      n1 = (n1 << bm) | (n0 >> b);
336
	      n0 = n0 << bm;
337
 
338
	      udiv_qrnnd (q0, n1, n2, n1, d1);
339
	      umul_ppmm (m1, m0, q0, d0);
340
 
341
	      if (m1 > n1 || (m1 == n1 && m0 > n0))
342
		{
343
		  q0--;
344
		  sub_ddmmss (m1, m0, m1, m0, d1, d0);
345
		}
346
 
347
	      q1 = 0;
348
 
349
	      /* Remainder in (n1n0 - m1m0) >> bm.  */
350
	      if (rp != 0)
351
		{
352
		  sub_ddmmss (n1, n0, n1, n0, m1, m0);
353
		  rr.s.low = (n1 << b) | (n0 >> bm);
354
		  rr.s.high = n1 >> bm;
355
		  *rp = rr.ll;
356
		}
357
	    }
358
	}
359
    }
360
 
361
  ww.s.low = q0;
362
  ww.s.high = q1;
363
  return ww.ll;
364
}
365
 
366
#define __negdi2(a) (-(a))
367
 
368
long long __divdi3(long long u, long long v)
369
{
370
    int c = 0;
371
    DWunion uu, vv;
372
    DWtype w;
373
 
374
    uu.ll = u;
375
    vv.ll = v;
376
 
377
    if (uu.s.high < 0) {
378
        c = ~c;
379
        uu.ll = __negdi2 (uu.ll);
380
    }
381
    if (vv.s.high < 0) {
382
        c = ~c;
383
        vv.ll = __negdi2 (vv.ll);
384
    }
385
    w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
386
    if (c)
387
        w = __negdi2 (w);
388
    return w;
389
}
390
 
391
long long __moddi3(long long u, long long v)
392
{
393
    int c = 0;
394
    DWunion uu, vv;
395
    DWtype w;
396
 
397
    uu.ll = u;
398
    vv.ll = v;
399
 
400
    if (uu.s.high < 0) {
401
        c = ~c;
402
        uu.ll = __negdi2 (uu.ll);
403
    }
404
    if (vv.s.high < 0)
405
        vv.ll = __negdi2 (vv.ll);
406
 
407
    __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) &w);
408
    if (c)
409
        w = __negdi2 (w);
410
    return w;
411
}
412
 
413
unsigned long long __udivdi3(unsigned long long u, unsigned long long v)
414
{
415
    return __udivmoddi4 (u, v, (UDWtype *) 0);
416
}
417
 
418
unsigned long long __umoddi3(unsigned long long u, unsigned long long v)
419
{
420
    UDWtype w;
421
 
422
    __udivmoddi4 (u, v, &w);
423
    return w;
424
}
425
 
426
/* XXX: fix tcc's code generator to do this instead */
427
long long __ashrdi3(long long a, int b)
428
{
429
#ifdef __TINYC__
430
    DWunion u;
431
    u.ll = a;
432
    if (b >= 32) {
433
        u.s.low = u.s.high >> (b - 32);
434
        u.s.high = u.s.high >> 31;
435
    } else if (b != 0) {
436
        u.s.low = ((unsigned)u.s.low >> b) | (u.s.high << (32 - b));
437
        u.s.high = u.s.high >> b;
438
    }
439
    return u.ll;
440
#else
441
    return a >> b;
442
#endif
443
}
444
 
445
/* XXX: fix tcc's code generator to do this instead */
446
unsigned long long __lshrdi3(unsigned long long a, int b)
447
{
448
#ifdef __TINYC__
449
    DWunion u;
450
    u.ll = a;
451
    if (b >= 32) {
452
        u.s.low = (unsigned)u.s.high >> (b - 32);
453
        u.s.high = 0;
454
    } else if (b != 0) {
455
        u.s.low = ((unsigned)u.s.low >> b) | (u.s.high << (32 - b));
456
        u.s.high = (unsigned)u.s.high >> b;
457
    }
458
    return u.ll;
459
#else
460
    return a >> b;
461
#endif
462
}
463
 
464
/* XXX: fix tcc's code generator to do this instead */
465
long long __ashldi3(long long a, int b)
466
{
467
#ifdef __TINYC__
468
    DWunion u;
469
    u.ll = a;
470
    if (b >= 32) {
471
        u.s.high = (unsigned)u.s.low << (b - 32);
472
        u.s.low = 0;
473
    } else if (b != 0) {
474
        u.s.high = ((unsigned)u.s.high << b) | ((unsigned)u.s.low >> (32 - b));
475
        u.s.low = (unsigned)u.s.low << b;
476
    }
477
    return u.ll;
478
#else
479
    return a << b;
480
#endif
481
}
482
 
483
#ifndef COMMIT_4ad186c5ef61_IS_FIXED
484
long long __tcc_cvt_ftol(long double x)
485
{
486
    unsigned c0, c1;
487
    long long ret;
488
    __asm__ __volatile__ ("fnstcw %0" : "=m" (c0));
489
    c1 = c0 | 0x0C00;
490
    __asm__ __volatile__ ("fldcw %0" : : "m" (c1));
491
    __asm__ __volatile__ ("fistpll %0"  : "=m" (ret));
492
    __asm__ __volatile__ ("fldcw %0" : : "m" (c0));
493
    return ret;
494
}
495
#endif
496
 
497
#endif /* !__x86_64__ */
498
 
499
/* XXX: fix tcc's code generator to do this instead */
500
float __floatundisf(unsigned long long a)
501
{
502
    DWunion uu;
503
    XFtype r;
504
 
505
    uu.ll = a;
506
    if (uu.s.high >= 0) {
507
        return (float)uu.ll;
508
    } else {
509
        r = (XFtype)uu.ll;
510
        r += 18446744073709551616.0;
511
        return (float)r;
512
    }
513
}
514
 
515
double __floatundidf(unsigned long long a)
516
{
517
    DWunion uu;
518
    XFtype r;
519
 
520
    uu.ll = a;
521
    if (uu.s.high >= 0) {
522
        return (double)uu.ll;
523
    } else {
524
        r = (XFtype)uu.ll;
525
        r += 18446744073709551616.0;
526
        return (double)r;
527
    }
528
}
529
 
530
long double __floatundixf(unsigned long long a)
531
{
532
    DWunion uu;
533
    XFtype r;
534
 
535
    uu.ll = a;
536
    if (uu.s.high >= 0) {
537
        return (long double)uu.ll;
538
    } else {
539
        r = (XFtype)uu.ll;
540
        r += 18446744073709551616.0;
541
        return (long double)r;
542
    }
543
}
544
 
545
unsigned long long __fixunssfdi (float a1)
546
{
547
    register union float_long fl1;
548
    register int exp;
549
    register unsigned long l;
550
 
551
    fl1.f = a1;
552
 
553
    if (fl1.l == 0)
554
	return (0);
555
 
556
    exp = EXP (fl1.l) - EXCESS - 24;
557
 
558
    l = MANT(fl1.l);
559
    if (exp >= 41)
560
	return (unsigned long long)-1;
561
    else if (exp >= 0)
562
        return (unsigned long long)l << exp;
563
    else if (exp >= -23)
564
        return l >> -exp;
565
    else
566
        return 0;
567
}
568
 
569
unsigned long long __fixunsdfdi (double a1)
570
{
571
    register union double_long dl1;
572
    register int exp;
573
    register unsigned long long l;
574
 
575
    dl1.d = a1;
576
 
577
    if (dl1.ll == 0)
578
	return (0);
579
 
580
    exp = EXPD (dl1) - EXCESSD - 53;
581
 
582
    l = MANTD_LL(dl1);
583
 
584
    if (exp >= 12)
585
	return (unsigned long long)-1;
586
    else if (exp >= 0)
587
        return l << exp;
588
    else if (exp >= -52)
589
        return l >> -exp;
590
    else
591
        return 0;
592
}
593
 
594
unsigned long long __fixunsxfdi (long double a1)
595
{
596
    register union ldouble_long dl1;
597
    register int exp;
598
    register unsigned long long l;
599
 
600
    dl1.ld = a1;
601
 
602
    if (dl1.l.lower == 0 && dl1.l.upper == 0)
603
	return (0);
604
 
605
    exp = EXPLD (dl1) - EXCESSLD - 64;
606
 
607
    l = dl1.l.lower;
608
 
609
    if (exp > 0)
610
	return (unsigned long long)-1;
611
    else if (exp >= -63)
612
        return l >> -exp;
613
    else
614
        return 0;
615
}
616
 
617
long long __fixsfdi (float a1)
618
{
619
    long long ret; int s;
620
    ret = __fixunssfdi((s = a1 >= 0) ? a1 : -a1);
621
    return s ? ret : -ret;
622
}
623
 
624
long long __fixdfdi (double a1)
625
{
626
    long long ret; int s;
627
    ret = __fixunsdfdi((s = a1 >= 0) ? a1 : -a1);
628
    return s ? ret : -ret;
629
}
630
 
631
long long __fixxfdi (long double a1)
632
{
633
    long long ret; int s;
634
    ret = __fixunsxfdi((s = a1 >= 0) ? a1 : -a1);
635
    return s ? ret : -ret;
636
}
637
 
638
#if defined(TCC_TARGET_X86_64) && !defined(_WIN64)
639
 
640
#ifndef __TINYC__
641
#include 
642
#include 
643
#include 
644
#else
645
/* Avoid including stdlib.h because it is not easily available when
646
   cross compiling */
647
#include  /* size_t definition is needed for a x86_64-tcc to parse memset() */
648
extern void *malloc(unsigned long long);
649
extern void *memset(void *s, int c, size_t n);
650
extern void free(void*);
651
extern void abort(void);
652
#endif
653
 
654
enum __va_arg_type {
655
    __va_gen_reg, __va_float_reg, __va_stack
656
};
657
 
658
//This should be in sync with the declaration on our include/stdarg.h
659
/* GCC compatible definition of va_list. */
660
typedef struct {
661
    unsigned int gp_offset;
662
    unsigned int fp_offset;
663
    union {
664
        unsigned int overflow_offset;
665
        char *overflow_arg_area;
666
    };
667
    char *reg_save_area;
668
} __va_list_struct;
669
 
670
#undef __va_start
671
#undef __va_arg
672
#undef __va_copy
673
#undef __va_end
674
 
675
void __va_start(__va_list_struct *ap, void *fp)
676
{
677
    memset(ap, 0, sizeof(__va_list_struct));
678
    *ap = *(__va_list_struct *)((char *)fp - 16);
679
    ap->overflow_arg_area = (char *)fp + ap->overflow_offset;
680
    ap->reg_save_area = (char *)fp - 176 - 16;
681
}
682
 
683
void *__va_arg(__va_list_struct *ap,
684
               enum __va_arg_type arg_type,
685
               int size, int align)
686
{
687
    size = (size + 7) & ~7;
688
    align = (align + 7) & ~7;
689
    switch (arg_type) {
690
    case __va_gen_reg:
691
        if (ap->gp_offset + size <= 48) {
692
            ap->gp_offset += size;
693
            return ap->reg_save_area + ap->gp_offset - size;
694
        }
695
        goto use_overflow_area;
696
 
697
    case __va_float_reg:
698
        if (ap->fp_offset < 128 + 48) {
699
            ap->fp_offset += 16;
700
            return ap->reg_save_area + ap->fp_offset - 16;
701
        }
702
        size = 8;
703
        goto use_overflow_area;
704
 
705
    case __va_stack:
706
    use_overflow_area:
707
        ap->overflow_arg_area += size;
708
        ap->overflow_arg_area = (char*)((intptr_t)(ap->overflow_arg_area + align - 1) & -(intptr_t)align);
709
        return ap->overflow_arg_area - size;
710
 
711
    default:
712
#ifndef __TINYC__
713
        fprintf(stderr, "unknown ABI type for __va_arg\n");
714
#endif
715
        abort();
716
    }
717
}
718
 
719
#endif /* __x86_64__ */
720
 
721
/* Flushing for tccrun */
722
#if defined(TCC_TARGET_X86_64) || defined(TCC_TARGET_I386)
723
 
724
void __clear_cache(void *beginning, void *end)
725
{
726
}
727
 
728
#elif defined(TCC_TARGET_ARM)
729
 
730
#define _GNU_SOURCE
731
#include 
732
#include 
733
#include 
734
 
735
void __clear_cache(void *beginning, void *end)
736
{
737
/* __ARM_NR_cacheflush is kernel private and should not be used in user space.
738
 * However, there is no ARM asm parser in tcc so we use it for now */
739
#if 1
740
    syscall(__ARM_NR_cacheflush, beginning, end, 0);
741
#else
742
    __asm__ ("push {r7}\n\t"
743
             "mov r7, #0xf0002\n\t"
744
             "mov r2, #0\n\t"
745
             "swi 0\n\t"
746
             "pop {r7}\n\t"
747
             "ret");
748
#endif
749
}
750
 
751
#else
752
#warning __clear_cache not defined for this architecture, avoid using tcc -run
753
#endif