Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
8718 turbocat 1
/* TCC runtime library.
2
   Parts of this code are (c) 2002 Fabrice Bellard
3
 
4
   Copyright (C) 1987, 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
5
 
6
This file is free software; you can redistribute it and/or modify it
7
under the terms of the GNU General Public License as published by the
8
Free Software Foundation; either version 2, or (at your option) any
9
later version.
10
 
11
In addition to the permissions in the GNU General Public License, the
12
Free Software Foundation gives you unlimited permission to link the
13
compiled version of this file into combinations with other programs,
14
and to distribute those combinations without any restriction coming
15
from the use of this file.  (The General Public License restrictions
16
do apply in other respects; for example, they cover modification of
17
the file, and distribution when not linked into a combine
18
executable.)
19
 
20
This file is distributed in the hope that it will be useful, but
21
WITHOUT ANY WARRANTY; without even the implied warranty of
22
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23
General Public License for more details.
24
 
25
You should have received a copy of the GNU General Public License
26
along with this program; see the file COPYING.  If not, write to
27
the Free Software Foundation, 59 Temple Place - Suite 330,
28
Boston, MA 02111-1307, USA.
29
*/
30
 
31
//#include 
32
#define TCC_TARGET_I386
33
 
34
#define W_TYPE_SIZE   32
35
#define BITS_PER_UNIT 8
36
 
37
typedef int Wtype;
38
typedef unsigned int UWtype;
39
typedef unsigned int USItype;
40
typedef long long DWtype;
41
typedef unsigned long long UDWtype;
42
 
43
struct DWstruct {
44
    Wtype low, high;
45
};
46
 
47
typedef union
48
{
49
  struct DWstruct s;
50
  DWtype ll;
51
} DWunion;
52
 
53
typedef long double XFtype;
54
#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
55
#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
56
 
57
/* the following deal with IEEE single-precision numbers */
58
#define EXCESS		126
59
#define SIGNBIT		0x80000000
60
#define HIDDEN		(1 << 23)
61
#define SIGN(fp)	((fp) & SIGNBIT)
62
#define EXP(fp)		(((fp) >> 23) & 0xFF)
63
#define MANT(fp)	(((fp) & 0x7FFFFF) | HIDDEN)
64
#define PACK(s,e,m)	((s) | ((e) << 23) | (m))
65
 
66
/* the following deal with IEEE double-precision numbers */
67
#define EXCESSD		1022
68
#define HIDDEND		(1 << 20)
69
#define EXPD(fp)	(((fp.l.upper) >> 20) & 0x7FF)
70
#define SIGND(fp)	((fp.l.upper) & SIGNBIT)
71
#define MANTD(fp)	(((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
72
				(fp.l.lower >> 22))
73
#define HIDDEND_LL	((long long)1 << 52)
74
#define MANTD_LL(fp)	((fp.ll & (HIDDEND_LL-1)) | HIDDEND_LL)
75
#define PACKD_LL(s,e,m)	(((long long)((s)+((e)<<20))<<32)|(m))
76
 
77
/* the following deal with x86 long double-precision numbers */
78
#define EXCESSLD	16382
79
#define EXPLD(fp)	(fp.l.upper & 0x7fff)
80
#define SIGNLD(fp)	((fp.l.upper) & 0x8000)
81
 
82
/* only for x86 */
83
union ldouble_long {
84
    long double ld;
85
    struct {
86
        unsigned long long lower;
87
        unsigned short upper;
88
    } l;
89
};
90
 
91
union double_long {
92
    double d;
93
#if 1
94
    struct {
95
        unsigned int lower;
96
        int upper;
97
    } l;
98
#else
99
    struct {
100
        int upper;
101
        unsigned int lower;
102
    } l;
103
#endif
104
    long long ll;
105
};
106
 
107
union float_long {
108
    float f;
109
    unsigned int l;
110
};
111
 
112
/* XXX: we don't support several builtin supports for now */
113
#if !defined(TCC_TARGET_X86_64) && !defined(TCC_TARGET_ARM)
114
 
115
/* XXX: use gcc/tcc intrinsic ? */
116
#if defined(TCC_TARGET_I386)
117
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
118
  __asm__ ("subl %5,%1\n\tsbbl %3,%0"					\
119
	   : "=r" ((USItype) (sh)),					\
120
	     "=&r" ((USItype) (sl))					\
121
	   : "0" ((USItype) (ah)),					\
122
	     "g" ((USItype) (bh)),					\
123
	     "1" ((USItype) (al)),					\
124
	     "g" ((USItype) (bl)))
125
#define umul_ppmm(w1, w0, u, v) \
126
  __asm__ ("mull %3"							\
127
	   : "=a" ((USItype) (w0)),					\
128
	     "=d" ((USItype) (w1))					\
129
	   : "%0" ((USItype) (u)),					\
130
	     "rm" ((USItype) (v)))
131
#define udiv_qrnnd(q, r, n1, n0, dv) \
132
  __asm__ ("divl %4"							\
133
	   : "=a" ((USItype) (q)),					\
134
	     "=d" ((USItype) (r))					\
135
	   : "0" ((USItype) (n0)),					\
136
	     "1" ((USItype) (n1)),					\
137
	     "rm" ((USItype) (dv)))
138
#define count_leading_zeros(count, x) \
139
  do {									\
140
    USItype __cbtmp;							\
141
    __asm__ ("bsrl %1,%0"						\
142
	     : "=r" (__cbtmp) : "rm" ((USItype) (x)));			\
143
    (count) = __cbtmp ^ 31;						\
144
  } while (0)
145
#else
146
#error unsupported CPU type
147
#endif
148
 
149
/* most of this code is taken from libgcc2.c from gcc */
150
UDWtype __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
151
{
152
  DWunion ww;
153
  DWunion nn, dd;
154
  DWunion rr;
155
  UWtype d0, d1, n0, n1, n2;
156
  UWtype q0, q1;
157
  UWtype b, bm;
158
 
159
  nn.ll = n;
160
  dd.ll = d;
161
 
162
  d0 = dd.s.low;
163
  d1 = dd.s.high;
164
  n0 = nn.s.low;
165
  n1 = nn.s.high;
166
 
167
#if !defined(UDIV_NEEDS_NORMALIZATION)
168
  if (d1 == 0)
169
    {
170
      if (d0 > n1)
171
	{
172
	  /* 0q = nn / 0D */
173
 
174
	  udiv_qrnnd (q0, n0, n1, n0, d0);
175
	  q1 = 0;
176
 
177
	  /* Remainder in n0.  */
178
	}
179
      else
180
	{
181
	  /* qq = NN / 0d */
182
 
183
	  if (d0 == 0)
184
	    d0 = 1 / d0;	/* Divide intentionally by zero.  */
185
 
186
	  udiv_qrnnd (q1, n1, 0, n1, d0);
187
	  udiv_qrnnd (q0, n0, n1, n0, d0);
188
 
189
	  /* Remainder in n0.  */
190
	}
191
 
192
      if (rp != 0)
193
	{
194
	  rr.s.low = n0;
195
	  rr.s.high = 0;
196
	  *rp = rr.ll;
197
	}
198
    }
199
 
200
#else /* UDIV_NEEDS_NORMALIZATION */
201
 
202
  if (d1 == 0)
203
    {
204
      if (d0 > n1)
205
	{
206
	  /* 0q = nn / 0D */
207
 
208
	  count_leading_zeros (bm, d0);
209
 
210
	  if (bm != 0)
211
	    {
212
	      /* Normalize, i.e. make the most significant bit of the
213
		 denominator set.  */
214
 
215
	      d0 = d0 << bm;
216
	      n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
217
	      n0 = n0 << bm;
218
	    }
219
 
220
	  udiv_qrnnd (q0, n0, n1, n0, d0);
221
	  q1 = 0;
222
 
223
	  /* Remainder in n0 >> bm.  */
224
	}
225
      else
226
	{
227
	  /* qq = NN / 0d */
228
 
229
	  if (d0 == 0)
230
	    d0 = 1 / d0;	/* Divide intentionally by zero.  */
231
 
232
	  count_leading_zeros (bm, d0);
233
 
234
	  if (bm == 0)
235
	    {
236
	      /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
237
		 conclude (the most significant bit of n1 is set) /\ (the
238
		 leading quotient digit q1 = 1).
239
 
240
		 This special case is necessary, not an optimization.
241
		 (Shifts counts of W_TYPE_SIZE are undefined.)  */
242
 
243
	      n1 -= d0;
244
	      q1 = 1;
245
	    }
246
	  else
247
	    {
248
	      /* Normalize.  */
249
 
250
	      b = W_TYPE_SIZE - bm;
251
 
252
	      d0 = d0 << bm;
253
	      n2 = n1 >> b;
254
	      n1 = (n1 << bm) | (n0 >> b);
255
	      n0 = n0 << bm;
256
 
257
	      udiv_qrnnd (q1, n1, n2, n1, d0);
258
	    }
259
 
260
	  /* n1 != d0...  */
261
 
262
	  udiv_qrnnd (q0, n0, n1, n0, d0);
263
 
264
	  /* Remainder in n0 >> bm.  */
265
	}
266
 
267
      if (rp != 0)
268
	{
269
	  rr.s.low = n0 >> bm;
270
	  rr.s.high = 0;
271
	  *rp = rr.ll;
272
	}
273
    }
274
#endif /* UDIV_NEEDS_NORMALIZATION */
275
 
276
  else
277
    {
278
      if (d1 > n1)
279
	{
280
	  /* 00 = nn / DD */
281
 
282
	  q0 = 0;
283
	  q1 = 0;
284
 
285
	  /* Remainder in n1n0.  */
286
	  if (rp != 0)
287
	    {
288
	      rr.s.low = n0;
289
	      rr.s.high = n1;
290
	      *rp = rr.ll;
291
	    }
292
	}
293
      else
294
	{
295
	  /* 0q = NN / dd */
296
 
297
	  count_leading_zeros (bm, d1);
298
	  if (bm == 0)
299
	    {
300
	      /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
301
		 conclude (the most significant bit of n1 is set) /\ (the
302
		 quotient digit q0 = 0 or 1).
303
 
304
		 This special case is necessary, not an optimization.  */
305
 
306
	      /* The condition on the next line takes advantage of that
307
		 n1 >= d1 (true due to program flow).  */
308
	      if (n1 > d1 || n0 >= d0)
309
		{
310
		  q0 = 1;
311
		  sub_ddmmss (n1, n0, n1, n0, d1, d0);
312
		}
313
	      else
314
		q0 = 0;
315
 
316
	      q1 = 0;
317
 
318
	      if (rp != 0)
319
		{
320
		  rr.s.low = n0;
321
		  rr.s.high = n1;
322
		  *rp = rr.ll;
323
		}
324
	    }
325
	  else
326
	    {
327
	      UWtype m1, m0;
328
	      /* Normalize.  */
329
 
330
	      b = W_TYPE_SIZE - bm;
331
 
332
	      d1 = (d1 << bm) | (d0 >> b);
333
	      d0 = d0 << bm;
334
	      n2 = n1 >> b;
335
	      n1 = (n1 << bm) | (n0 >> b);
336
	      n0 = n0 << bm;
337
 
338
	      udiv_qrnnd (q0, n1, n2, n1, d1);
339
	      umul_ppmm (m1, m0, q0, d0);
340
 
341
	      if (m1 > n1 || (m1 == n1 && m0 > n0))
342
		{
343
		  q0--;
344
		  sub_ddmmss (m1, m0, m1, m0, d1, d0);
345
		}
346
 
347
	      q1 = 0;
348
 
349
	      /* Remainder in (n1n0 - m1m0) >> bm.  */
350
	      if (rp != 0)
351
		{
352
		  sub_ddmmss (n1, n0, n1, n0, m1, m0);
353
		  rr.s.low = (n1 << b) | (n0 >> bm);
354
		  rr.s.high = n1 >> bm;
355
		  *rp = rr.ll;
356
		}
357
	    }
358
	}
359
    }
360
 
361
  ww.s.low = q0;
362
  ww.s.high = q1;
363
  return ww.ll;
364
}
365
 
366
#define __negdi2(a) (-(a))
367
 
368
long long __divdi3(long long u, long long v)
369
{
370
    int c = 0;
371
    DWunion uu, vv;
372
    DWtype w;
373
 
374
    uu.ll = u;
375
    vv.ll = v;
376
 
377
    if (uu.s.high < 0) {
378
        c = ~c;
379
        uu.ll = __negdi2 (uu.ll);
380
    }
381
    if (vv.s.high < 0) {
382
        c = ~c;
383
        vv.ll = __negdi2 (vv.ll);
384
    }
385
    w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
386
    if (c)
387
        w = __negdi2 (w);
388
    return w;
389
}
390
 
391
// https://github.com/KaMeHb-UA/UE4m/blob/1d9ad5bfead06520570c7f24dad062f9f8717c1a/\
392
Engine/Extras/ThirdPartyNotUE/emsdk/emscripten/incoming/system/lib/compiler-rt/lib/\
393
builtins/divmoddi4.c
394
long long __divmoddi4(long long a, long long b, long long* rem)
395
{
396
    long long d = __divdi3(a, b);
397
    *rem = a - (d * b);
398
    return d;
399
}
400
 
401
long long __moddi3(long long u, long long v)
402
{
403
    int c = 0;
404
    DWunion uu, vv;
405
    DWtype w;
406
 
407
    uu.ll = u;
408
    vv.ll = v;
409
 
410
    if (uu.s.high < 0) {
411
        c = ~c;
412
        uu.ll = __negdi2 (uu.ll);
413
    }
414
    if (vv.s.high < 0)
415
        vv.ll = __negdi2 (vv.ll);
416
 
417
    __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) &w);
418
    if (c)
419
        w = __negdi2 (w);
420
    return w;
421
}
422
 
423
unsigned long long __udivdi3(unsigned long long u, unsigned long long v)
424
{
425
    return __udivmoddi4 (u, v, (UDWtype *) 0);
426
}
427
 
428
unsigned long long __umoddi3(unsigned long long u, unsigned long long v)
429
{
430
    UDWtype w;
431
 
432
    __udivmoddi4 (u, v, &w);
433
    return w;
434
}
435
 
436
/* XXX: fix tcc's code generator to do this instead */
437
long long __ashrdi3(long long a, int b)
438
{
439
#ifdef __TINYC__
440
    DWunion u;
441
    u.ll = a;
442
    if (b >= 32) {
443
        u.s.low = u.s.high >> (b - 32);
444
        u.s.high = u.s.high >> 31;
445
    } else if (b != 0) {
446
        u.s.low = ((unsigned)u.s.low >> b) | (u.s.high << (32 - b));
447
        u.s.high = u.s.high >> b;
448
    }
449
    return u.ll;
450
#else
451
    return a >> b;
452
#endif
453
}
454
 
455
/* XXX: fix tcc's code generator to do this instead */
456
unsigned long long __lshrdi3(unsigned long long a, int b)
457
{
458
#ifdef __TINYC__
459
    DWunion u;
460
    u.ll = a;
461
    if (b >= 32) {
462
        u.s.low = (unsigned)u.s.high >> (b - 32);
463
        u.s.high = 0;
464
    } else if (b != 0) {
465
        u.s.low = ((unsigned)u.s.low >> b) | (u.s.high << (32 - b));
466
        u.s.high = (unsigned)u.s.high >> b;
467
    }
468
    return u.ll;
469
#else
470
    return a >> b;
471
#endif
472
}
473
 
474
/* XXX: fix tcc's code generator to do this instead */
475
long long __ashldi3(long long a, int b)
476
{
477
#ifdef __TINYC__
478
    DWunion u;
479
    u.ll = a;
480
    if (b >= 32) {
481
        u.s.high = (unsigned)u.s.low << (b - 32);
482
        u.s.low = 0;
483
    } else if (b != 0) {
484
        u.s.high = ((unsigned)u.s.high << b) | ((unsigned)u.s.low >> (32 - b));
485
        u.s.low = (unsigned)u.s.low << b;
486
    }
487
    return u.ll;
488
#else
489
    return a << b;
490
#endif
491
}
492
 
493
#ifndef COMMIT_4ad186c5ef61_IS_FIXED
494
long long __tcc_cvt_ftol(long double x)
495
{
496
    unsigned c0, c1;
497
    long long ret;
498
    __asm__ __volatile__ ("fnstcw %0" : "=m" (c0));
499
    c1 = c0 | 0x0C00;
500
    __asm__ __volatile__ ("fldcw %0" : : "m" (c1));
501
    __asm__ __volatile__ ("fistpll %0"  : "=m" (ret));
502
    __asm__ __volatile__ ("fldcw %0" : : "m" (c0));
503
    return ret;
504
}
505
#endif
506
 
507
#endif /* !__x86_64__ */
508
 
509
/* XXX: fix tcc's code generator to do this instead */
510
float __floatundisf(unsigned long long a)
511
{
512
    DWunion uu;
513
    XFtype r;
514
 
515
    uu.ll = a;
516
    if (uu.s.high >= 0) {
517
        return (float)uu.ll;
518
    } else {
519
        r = (XFtype)uu.ll;
520
        r += 18446744073709551616.0;
521
        return (float)r;
522
    }
523
}
524
 
525
double __floatundidf(unsigned long long a)
526
{
527
    DWunion uu;
528
    XFtype r;
529
 
530
    uu.ll = a;
531
    if (uu.s.high >= 0) {
532
        return (double)uu.ll;
533
    } else {
534
        r = (XFtype)uu.ll;
535
        r += 18446744073709551616.0;
536
        return (double)r;
537
    }
538
}
539
 
540
long double __floatundixf(unsigned long long a)
541
{
542
    DWunion uu;
543
    XFtype r;
544
 
545
    uu.ll = a;
546
    if (uu.s.high >= 0) {
547
        return (long double)uu.ll;
548
    } else {
549
        r = (XFtype)uu.ll;
550
        r += 18446744073709551616.0;
551
        return (long double)r;
552
    }
553
}
554
 
555
unsigned long long __fixunssfdi (float a1)
556
{
557
    register union float_long fl1;
558
    register int exp;
559
    register unsigned long l;
560
 
561
    fl1.f = a1;
562
 
563
    if (fl1.l == 0)
564
	return (0);
565
 
566
    exp = EXP (fl1.l) - EXCESS - 24;
567
 
568
    l = MANT(fl1.l);
569
    if (exp >= 41)
570
	return (unsigned long long)-1;
571
    else if (exp >= 0)
572
        return (unsigned long long)l << exp;
573
    else if (exp >= -23)
574
        return l >> -exp;
575
    else
576
        return 0;
577
}
578
 
579
unsigned long long __fixunsdfdi (double a1)
580
{
581
    register union double_long dl1;
582
    register int exp;
583
    register unsigned long long l;
584
 
585
    dl1.d = a1;
586
 
587
    if (dl1.ll == 0)
588
	return (0);
589
 
590
    exp = EXPD (dl1) - EXCESSD - 53;
591
 
592
    l = MANTD_LL(dl1);
593
 
594
    if (exp >= 12)
595
	return (unsigned long long)-1;
596
    else if (exp >= 0)
597
        return l << exp;
598
    else if (exp >= -52)
599
        return l >> -exp;
600
    else
601
        return 0;
602
}
603
 
604
unsigned long long __fixunsxfdi (long double a1)
605
{
606
    register union ldouble_long dl1;
607
    register int exp;
608
    register unsigned long long l;
609
 
610
    dl1.ld = a1;
611
 
612
    if (dl1.l.lower == 0 && dl1.l.upper == 0)
613
	return (0);
614
 
615
    exp = EXPLD (dl1) - EXCESSLD - 64;
616
 
617
    l = dl1.l.lower;
618
 
619
    if (exp > 0)
620
	return (unsigned long long)-1;
621
    else if (exp >= -63)
622
        return l >> -exp;
623
    else
624
        return 0;
625
}
626
 
627
long long __fixsfdi (float a1)
628
{
629
    long long ret; int s;
630
    ret = __fixunssfdi((s = a1 >= 0) ? a1 : -a1);
631
    return s ? ret : -ret;
632
}
633
 
634
long long __fixdfdi (double a1)
635
{
636
    long long ret; int s;
637
    ret = __fixunsdfdi((s = a1 >= 0) ? a1 : -a1);
638
    return s ? ret : -ret;
639
}
640
 
641
long long __fixxfdi (long double a1)
642
{
643
    long long ret; int s;
644
    ret = __fixunsxfdi((s = a1 >= 0) ? a1 : -a1);
645
    return s ? ret : -ret;
646
}
647
 
648
#if defined(TCC_TARGET_X86_64) && !defined(_WIN64)
649
 
650
#ifndef __TINYC__
651
#include 
652
#include 
653
#include 
654
#else
655
/* Avoid including stdlib.h because it is not easily available when
656
   cross compiling */
657
#include  /* size_t definition is needed for a x86_64-tcc to parse memset() */
658
 void *malloc(unsigned long long);
659
 void *memset(void *s, int c, size_t n);
660
 void free(void*);
661
 void abort(void);
662
#endif
663
 
664
enum __va_arg_type {
665
    __va_gen_reg, __va_float_reg, __va_stack
666
};
667
 
668
//This should be in sync with the declaration on our include/stdarg.h
669
/* GCC compatible definition of va_list. */
670
typedef struct {
671
    unsigned int gp_offset;
672
    unsigned int fp_offset;
673
    union {
674
        unsigned int overflow_offset;
675
        char *overflow_arg_area;
676
    };
677
    char *reg_save_area;
678
} __va_list_struct;
679
 
680
#undef __va_start
681
#undef __va_arg
682
#undef __va_copy
683
#undef __va_end
684
 
685
void __va_start(__va_list_struct *ap, void *fp)
686
{
687
    memset(ap, 0, sizeof(__va_list_struct));
688
    *ap = *(__va_list_struct *)((char *)fp - 16);
689
    ap->overflow_arg_area = (char *)fp + ap->overflow_offset;
690
    ap->reg_save_area = (char *)fp - 176 - 16;
691
}
692
 
693
void *__va_arg(__va_list_struct *ap,
694
               enum __va_arg_type arg_type,
695
               int size, int align)
696
{
697
    size = (size + 7) & ~7;
698
    align = (align + 7) & ~7;
699
    switch (arg_type) {
700
    case __va_gen_reg:
701
        if (ap->gp_offset + size <= 48) {
702
            ap->gp_offset += size;
703
            return ap->reg_save_area + ap->gp_offset - size;
704
        }
705
        goto use_overflow_area;
706
 
707
    case __va_float_reg:
708
        if (ap->fp_offset < 128 + 48) {
709
            ap->fp_offset += 16;
710
            return ap->reg_save_area + ap->fp_offset - 16;
711
        }
712
        size = 8;
713
        goto use_overflow_area;
714
 
715
    case __va_stack:
716
    use_overflow_area:
717
        ap->overflow_arg_area += size;
718
        ap->overflow_arg_area = (char*)((intptr_t)(ap->overflow_arg_area + align - 1) & -(intptr_t)align);
719
        return ap->overflow_arg_area - size;
720
 
721
    default:
722
#ifndef __TINYC__
723
        fprintf(stderr, "unknown ABI type for __va_arg\n");
724
#endif
725
        abort();
726
    }
727
}
728
 
729
#endif /* __x86_64__ */
730
 
731
/* Flushing for tccrun */
732
#if defined(TCC_TARGET_X86_64) || defined(TCC_TARGET_I386)
733
 
734
void __clear_cache(void *beginning, void *end)
735
{
736
}
737
 
738
#elif defined(TCC_TARGET_ARM)
739
 
740
#define _GNU_SOURCE
741
#include 
742
#include 
743
#include 
744
 
745
void __clear_cache(void *beginning, void *end)
746
{
747
/* __ARM_NR_cacheflush is kernel private and should not be used in user space.
748
 * However, there is no ARM asm parser in tcc so we use it for now */
749
#if 1
750
    syscall(__ARM_NR_cacheflush, beginning, end, 0);
751
#else
752
    __asm__ ("push {r7}\n\t"
753
             "mov r7, #0xf0002\n\t"
754
             "mov r2, #0\n\t"
755
             "swi 0\n\t"
756
             "pop {r7}\n\t"
757
             "ret");
758
#endif
759
}
760
 
761
#else
762
#warning __clear_cache not defined for this architecture, avoid using tcc -run
763
#endif