Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6514 → Rev 6515

/contrib/toolchain/gcc/5x/libgcc/config/i386/32/sfp-machine.h
0,0 → 1,113
#define _FP_W_TYPE_SIZE 32
#define _FP_W_TYPE unsigned int
#define _FP_WS_TYPE signed int
#define _FP_I_TYPE int
 
#define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
__asm__ ("add{l} {%11,%3|%3,%11}\n\t" \
"adc{l} {%9,%2|%2,%9}\n\t" \
"adc{l} {%7,%1|%1,%7}\n\t" \
"adc{l} {%5,%0|%0,%5}" \
: "=r" ((USItype) (r3)), \
"=&r" ((USItype) (r2)), \
"=&r" ((USItype) (r1)), \
"=&r" ((USItype) (r0)) \
: "%0" ((USItype) (x3)), \
"g" ((USItype) (y3)), \
"%1" ((USItype) (x2)), \
"g" ((USItype) (y2)), \
"%2" ((USItype) (x1)), \
"g" ((USItype) (y1)), \
"%3" ((USItype) (x0)), \
"g" ((USItype) (y0)))
#define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
__asm__ ("add{l} {%8,%2|%2,%8}\n\t" \
"adc{l} {%6,%1|%1,%6}\n\t" \
"adc{l} {%4,%0|%0,%4}" \
: "=r" ((USItype) (r2)), \
"=&r" ((USItype) (r1)), \
"=&r" ((USItype) (r0)) \
: "%0" ((USItype) (x2)), \
"g" ((USItype) (y2)), \
"%1" ((USItype) (x1)), \
"g" ((USItype) (y1)), \
"%2" ((USItype) (x0)), \
"g" ((USItype) (y0)))
#define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
__asm__ ("sub{l} {%11,%3|%3,%11}\n\t" \
"sbb{l} {%9,%2|%2,%9}\n\t" \
"sbb{l} {%7,%1|%1,%7}\n\t" \
"sbb{l} {%5,%0|%0,%5}" \
: "=r" ((USItype) (r3)), \
"=&r" ((USItype) (r2)), \
"=&r" ((USItype) (r1)), \
"=&r" ((USItype) (r0)) \
: "0" ((USItype) (x3)), \
"g" ((USItype) (y3)), \
"1" ((USItype) (x2)), \
"g" ((USItype) (y2)), \
"2" ((USItype) (x1)), \
"g" ((USItype) (y1)), \
"3" ((USItype) (x0)), \
"g" ((USItype) (y0)))
#define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
__asm__ ("sub{l} {%8,%2|%2,%8}\n\t" \
"sbb{l} {%6,%1|%1,%6}\n\t" \
"sbb{l} {%4,%0|%0,%4}" \
: "=r" ((USItype) (r2)), \
"=&r" ((USItype) (r1)), \
"=&r" ((USItype) (r0)) \
: "0" ((USItype) (x2)), \
"g" ((USItype) (y2)), \
"1" ((USItype) (x1)), \
"g" ((USItype) (y1)), \
"2" ((USItype) (x0)), \
"g" ((USItype) (y0)))
#define __FP_FRAC_ADDI_4(x3,x2,x1,x0,i) \
__asm__ ("add{l} {%4,%3|%3,%4}\n\t" \
"adc{l} {$0,%2|%2,0}\n\t" \
"adc{l} {$0,%1|%1,0}\n\t" \
"adc{l} {$0,%0|%0,0}" \
: "+r" ((USItype) (x3)), \
"+&r" ((USItype) (x2)), \
"+&r" ((USItype) (x1)), \
"+&r" ((USItype) (x0)) \
: "g" ((USItype) (i)))
 
 
#define _FP_MUL_MEAT_S(R,X,Y) \
_FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_D(R,X,Y) \
_FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_Q(R,X,Y) \
_FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
 
#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y)
#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
 
#define _FP_NANFRAC_S _FP_QNANBIT_S
#define _FP_NANFRAC_D _FP_QNANBIT_D, 0
/* Even if XFmode is 12byte, we have to pad it to
16byte since soft-fp emulation is done in 16byte. */
#define _FP_NANFRAC_E _FP_QNANBIT_E, 0, 0, 0
#define _FP_NANFRAC_Q _FP_QNANBIT_Q, 0, 0, 0
 
#ifndef _SOFT_FLOAT
#define FP_EX_SHIFT 0
 
#define _FP_DECL_EX \
unsigned short _fcw __attribute__ ((unused)) = FP_RND_NEAREST;
 
#define FP_RND_NEAREST 0
#define FP_RND_ZERO 0xc00
#define FP_RND_PINF 0x800
#define FP_RND_MINF 0x400
 
#define FP_RND_MASK 0xc00
 
#define FP_INIT_ROUNDMODE \
do { \
__asm__ __volatile__ ("fnstcw\t%0" : "=m" (_fcw)); \
} while (0)
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/32/tf-signs.c
0,0 → 1,62
/* Copyright (C) 2008-2015 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
union _FP_UNION_Q
{
__float128 flt;
struct
{
unsigned long frac0 : 32;
unsigned long frac1 : 32;
unsigned long frac2 : 32;
unsigned long frac3 : 16;
unsigned exp : 15;
unsigned sign : 1;
} bits __attribute__((packed));
};
 
__float128 __copysigntf3 (__float128, __float128);
__float128 __fabstf2 (__float128);
 
__float128
__copysigntf3 (__float128 a, __float128 b)
{
union _FP_UNION_Q A, B;
 
A.flt = a;
B.flt = b;
A.bits.sign = B.bits.sign;
 
return A.flt;
}
 
__float128
__fabstf2 (__float128 a)
{
union _FP_UNION_Q A;
 
A.flt = a;
A.bits.sign = 0;
 
return A.flt;
}
/contrib/toolchain/gcc/5x/libgcc/config/i386/cpuinfo.c
0,0 → 1,429
/* Get CPU type and Features for x86 processors.
Copyright (C) 2012-2015 Free Software Foundation, Inc.
Contributed by Sriraman Tallam (tmsriram@google.com)
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
#include "cpuid.h"
#include "tsystem.h"
#include "auto-target.h"
 
#ifdef HAVE_INIT_PRIORITY
#define CONSTRUCTOR_PRIORITY (101)
#else
#define CONSTRUCTOR_PRIORITY
#endif
 
int __cpu_indicator_init (void)
__attribute__ ((constructor CONSTRUCTOR_PRIORITY));
 
/* Processor Vendor and Models. */
 
enum processor_vendor
{
VENDOR_INTEL = 1,
VENDOR_AMD,
VENDOR_OTHER,
VENDOR_MAX
};
 
/* Any new types or subtypes have to be inserted at the end. */
 
enum processor_types
{
INTEL_BONNELL = 1,
INTEL_CORE2,
INTEL_COREI7,
AMDFAM10H,
AMDFAM15H,
INTEL_SILVERMONT,
AMD_BTVER1,
AMD_BTVER2,
CPU_TYPE_MAX
};
 
enum processor_subtypes
{
INTEL_COREI7_NEHALEM = 1,
INTEL_COREI7_WESTMERE,
INTEL_COREI7_SANDYBRIDGE,
AMDFAM10H_BARCELONA,
AMDFAM10H_SHANGHAI,
AMDFAM10H_ISTANBUL,
AMDFAM15H_BDVER1,
AMDFAM15H_BDVER2,
AMDFAM15H_BDVER3,
AMDFAM15H_BDVER4,
INTEL_COREI7_IVYBRIDGE,
INTEL_COREI7_HASWELL,
INTEL_COREI7_BROADWELL,
CPU_SUBTYPE_MAX
};
 
/* ISA Features supported. */
 
enum processor_features
{
FEATURE_CMOV = 0,
FEATURE_MMX,
FEATURE_POPCNT,
FEATURE_SSE,
FEATURE_SSE2,
FEATURE_SSE3,
FEATURE_SSSE3,
FEATURE_SSE4_1,
FEATURE_SSE4_2,
FEATURE_AVX,
FEATURE_AVX2,
FEATURE_SSE4_A,
FEATURE_FMA4,
FEATURE_XOP,
FEATURE_FMA,
FEATURE_AVX512F,
FEATURE_BMI,
FEATURE_BMI2
};
 
struct __processor_model
{
unsigned int __cpu_vendor;
unsigned int __cpu_type;
unsigned int __cpu_subtype;
unsigned int __cpu_features[1];
} __cpu_model;
 
 
/* Get the specific type of AMD CPU. */
 
static void
get_amd_cpu (unsigned int family, unsigned int model)
{
switch (family)
{
/* AMD Family 10h. */
case 0x10:
__cpu_model.__cpu_type = AMDFAM10H;
switch (model)
{
case 0x2:
/* Barcelona. */
__cpu_model.__cpu_subtype = AMDFAM10H_BARCELONA;
break;
case 0x4:
/* Shanghai. */
__cpu_model.__cpu_subtype = AMDFAM10H_SHANGHAI;
break;
case 0x8:
/* Istanbul. */
__cpu_model.__cpu_subtype = AMDFAM10H_ISTANBUL;
break;
default:
break;
}
break;
/* AMD Family 14h "btver1". */
case 0x14:
__cpu_model.__cpu_type = AMD_BTVER1;
break;
/* AMD Family 15h "Bulldozer". */
case 0x15:
__cpu_model.__cpu_type = AMDFAM15H;
/* Bulldozer version 1. */
if ( model <= 0xf)
__cpu_model.__cpu_subtype = AMDFAM15H_BDVER1;
/* Bulldozer version 2 "Piledriver" */
if (model >= 0x10 && model <= 0x2f)
__cpu_model.__cpu_subtype = AMDFAM15H_BDVER2;
/* Bulldozer version 3 "Steamroller" */
if (model >= 0x30 && model <= 0x4f)
__cpu_model.__cpu_subtype = AMDFAM15H_BDVER3;
/* Bulldozer version 4 "Excavator" */
if (model >= 0x60 && model <= 0x7f)
__cpu_model.__cpu_subtype = AMDFAM15H_BDVER4;
break;
/* AMD Family 16h "btver2" */
case 0x16:
__cpu_model.__cpu_type = AMD_BTVER2;
break;
default:
break;
}
}
 
/* Get the specific type of Intel CPU. */
 
static void
get_intel_cpu (unsigned int family, unsigned int model, unsigned int brand_id)
{
/* Parse family and model only if brand ID is 0. */
if (brand_id == 0)
{
switch (family)
{
case 0x5:
/* Pentium. */
break;
case 0x6:
switch (model)
{
case 0x1c:
case 0x26:
/* Bonnell. */
__cpu_model.__cpu_type = INTEL_BONNELL;
break;
case 0x37:
case 0x4a:
case 0x4d:
case 0x5a:
case 0x5d:
/* Silvermont. */
__cpu_model.__cpu_type = INTEL_SILVERMONT;
break;
case 0x1a:
case 0x1e:
case 0x1f:
case 0x2e:
/* Nehalem. */
__cpu_model.__cpu_type = INTEL_COREI7;
__cpu_model.__cpu_subtype = INTEL_COREI7_NEHALEM;
break;
case 0x25:
case 0x2c:
case 0x2f:
/* Westmere. */
__cpu_model.__cpu_type = INTEL_COREI7;
__cpu_model.__cpu_subtype = INTEL_COREI7_WESTMERE;
break;
case 0x2a:
case 0x2d:
/* Sandy Bridge. */
__cpu_model.__cpu_type = INTEL_COREI7;
__cpu_model.__cpu_subtype = INTEL_COREI7_SANDYBRIDGE;
break;
case 0x3a:
case 0x3e:
/* Ivy Bridge. */
__cpu_model.__cpu_type = INTEL_COREI7;
__cpu_model.__cpu_subtype = INTEL_COREI7_IVYBRIDGE;
break;
case 0x3c:
case 0x3f:
case 0x45:
case 0x46:
/* Haswell. */
__cpu_model.__cpu_type = INTEL_COREI7;
__cpu_model.__cpu_subtype = INTEL_COREI7_HASWELL;
break;
case 0x3d:
case 0x4f:
case 0x56:
/* Broadwell. */
__cpu_model.__cpu_type = INTEL_COREI7;
__cpu_model.__cpu_subtype = INTEL_COREI7_BROADWELL;
break;
case 0x17:
case 0x1d:
/* Penryn. */
case 0x0f:
/* Merom. */
__cpu_model.__cpu_type = INTEL_CORE2;
break;
default:
break;
}
break;
default:
/* We have no idea. */
break;
}
}
}
 
/* ECX and EDX are output of CPUID at level one. MAX_CPUID_LEVEL is
the max possible level of CPUID insn. */
static void
get_available_features (unsigned int ecx, unsigned int edx,
int max_cpuid_level)
{
unsigned int features = 0;
 
if (edx & bit_CMOV)
features |= (1 << FEATURE_CMOV);
if (edx & bit_MMX)
features |= (1 << FEATURE_MMX);
if (edx & bit_SSE)
features |= (1 << FEATURE_SSE);
if (edx & bit_SSE2)
features |= (1 << FEATURE_SSE2);
if (ecx & bit_POPCNT)
features |= (1 << FEATURE_POPCNT);
if (ecx & bit_SSE3)
features |= (1 << FEATURE_SSE3);
if (ecx & bit_SSSE3)
features |= (1 << FEATURE_SSSE3);
if (ecx & bit_SSE4_1)
features |= (1 << FEATURE_SSE4_1);
if (ecx & bit_SSE4_2)
features |= (1 << FEATURE_SSE4_2);
if (ecx & bit_AVX)
features |= (1 << FEATURE_AVX);
if (ecx & bit_FMA)
features |= (1 << FEATURE_FMA);
 
/* Get Advanced Features at level 7 (eax = 7, ecx = 0). */
if (max_cpuid_level >= 7)
{
unsigned int eax, ebx, ecx, edx;
__cpuid_count (7, 0, eax, ebx, ecx, edx);
if (ebx & bit_BMI)
features |= (1 << FEATURE_BMI);
if (ebx & bit_AVX2)
features |= (1 << FEATURE_AVX2);
if (ebx & bit_BMI2)
features |= (1 << FEATURE_BMI2);
if (ebx & bit_AVX512F)
features |= (1 << FEATURE_AVX512F);
}
 
unsigned int ext_level;
unsigned int eax, ebx;
/* Check cpuid level of extended features. */
__cpuid (0x80000000, ext_level, ebx, ecx, edx);
 
if (ext_level > 0x80000000)
{
__cpuid (0x80000001, eax, ebx, ecx, edx);
 
if (ecx & bit_SSE4a)
features |= (1 << FEATURE_SSE4_A);
if (ecx & bit_FMA4)
features |= (1 << FEATURE_FMA4);
if (ecx & bit_XOP)
features |= (1 << FEATURE_XOP);
}
__cpu_model.__cpu_features[0] = features;
}
 
/* A noinline function calling __get_cpuid. Having many calls to
cpuid in one function in 32-bit mode causes GCC to complain:
"can't find a register in class CLOBBERED_REGS". This is
related to PR rtl-optimization 44174. */
 
static int __attribute__ ((noinline))
__get_cpuid_output (unsigned int __level,
unsigned int *__eax, unsigned int *__ebx,
unsigned int *__ecx, unsigned int *__edx)
{
return __get_cpuid (__level, __eax, __ebx, __ecx, __edx);
}
 
 
/* A constructor function that is sets __cpu_model and __cpu_features with
the right values. This needs to run only once. This constructor is
given the highest priority and it should run before constructors without
the priority set. However, it still runs after ifunc initializers and
needs to be called explicitly there. */
 
int __attribute__ ((constructor CONSTRUCTOR_PRIORITY))
__cpu_indicator_init (void)
{
unsigned int eax, ebx, ecx, edx;
 
int max_level = 5;
unsigned int vendor;
unsigned int model, family, brand_id;
unsigned int extended_model, extended_family;
 
/* This function needs to run just once. */
if (__cpu_model.__cpu_vendor)
return 0;
 
/* Assume cpuid insn present. Run in level 0 to get vendor id. */
if (!__get_cpuid_output (0, &eax, &ebx, &ecx, &edx))
{
__cpu_model.__cpu_vendor = VENDOR_OTHER;
return -1;
}
 
vendor = ebx;
max_level = eax;
 
if (max_level < 1)
{
__cpu_model.__cpu_vendor = VENDOR_OTHER;
return -1;
}
 
if (!__get_cpuid_output (1, &eax, &ebx, &ecx, &edx))
{
__cpu_model.__cpu_vendor = VENDOR_OTHER;
return -1;
}
 
model = (eax >> 4) & 0x0f;
family = (eax >> 8) & 0x0f;
brand_id = ebx & 0xff;
extended_model = (eax >> 12) & 0xf0;
extended_family = (eax >> 20) & 0xff;
 
if (vendor == signature_INTEL_ebx)
{
/* Adjust model and family for Intel CPUS. */
if (family == 0x0f)
{
family += extended_family;
model += extended_model;
}
else if (family == 0x06)
model += extended_model;
 
/* Get CPU type. */
get_intel_cpu (family, model, brand_id);
/* Find available features. */
get_available_features (ecx, edx, max_level);
__cpu_model.__cpu_vendor = VENDOR_INTEL;
}
else if (vendor == signature_AMD_ebx)
{
/* Adjust model and family for AMD CPUS. */
if (family == 0x0f)
{
family += extended_family;
model += extended_model;
}
 
/* Get CPU type. */
get_amd_cpu (family, model);
/* Find available features. */
get_available_features (ecx, edx, max_level);
__cpu_model.__cpu_vendor = VENDOR_AMD;
}
else
__cpu_model.__cpu_vendor = VENDOR_OTHER;
 
gcc_assert (__cpu_model.__cpu_vendor < VENDOR_MAX);
gcc_assert (__cpu_model.__cpu_type < CPU_TYPE_MAX);
gcc_assert (__cpu_model.__cpu_subtype < CPU_SUBTYPE_MAX);
 
return 0;
}
/contrib/toolchain/gcc/5x/libgcc/config/i386/crtfastmath.c
0,0 → 1,96
/*
* Copyright (C) 2005-2015 Free Software Foundation, Inc.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
 
#ifndef _SOFT_FLOAT
#define MXCSR_DAZ (1 << 6) /* Enable denormals are zero mode */
#define MXCSR_FTZ (1 << 15) /* Enable flush to zero mode */
 
#ifndef __x86_64__
/* All 64-bit targets have SSE and DAZ;
only check them explicitly for 32-bit ones. */
#include "cpuid.h"
#endif
 
static void __attribute__((constructor))
#ifndef __x86_64__
/* The i386 ABI only requires 4-byte stack alignment, so this is necessary
to make sure the fxsave struct gets correct alignment.
See PR27537 and PR28621. */
__attribute__ ((force_align_arg_pointer))
#endif
set_fast_math (void)
{
#ifndef __x86_64__
unsigned int eax, ebx, ecx, edx;
 
if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx))
return;
 
if (edx & bit_SSE)
{
unsigned int mxcsr;
if (edx & bit_FXSAVE)
{
/* Check if DAZ is available. */
struct
{
unsigned short cwd;
unsigned short swd;
unsigned short twd;
unsigned short fop;
unsigned int fip;
unsigned int fcs;
unsigned int foo;
unsigned int fos;
unsigned int mxcsr;
unsigned int mxcsr_mask;
unsigned int st_space[32];
unsigned int xmm_space[32];
unsigned int padding[56];
} __attribute__ ((aligned (16))) fxsave;
 
/* This is necessary since some implementations of FXSAVE
do not modify reserved areas within the image. */
fxsave.mxcsr_mask = 0;
 
__builtin_ia32_fxsave (&fxsave);
 
mxcsr = fxsave.mxcsr;
 
if (fxsave.mxcsr_mask & MXCSR_DAZ)
mxcsr |= MXCSR_DAZ;
}
else
mxcsr = __builtin_ia32_stmxcsr ();
 
mxcsr |= MXCSR_FTZ;
__builtin_ia32_ldmxcsr (mxcsr);
}
#else
unsigned int mxcsr = __builtin_ia32_stmxcsr ();
mxcsr |= MXCSR_DAZ | MXCSR_FTZ;
__builtin_ia32_ldmxcsr (mxcsr);
#endif
}
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/crti.S
0,0 → 1,40
/* crti.S for x86.
 
Copyright (C) 1993-2015 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
 
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
 
/* This file just supplies labeled starting points for the .init and .fini
sections. It is linked in before the values-Xx.o files and also before
crtbegin.o. */
.ident "GNU C crti.s"
 
.section .init
.globl _init
.type _init,@function
_init:
 
.section .fini
.globl _fini
.type _fini,@function
_fini:
/contrib/toolchain/gcc/5x/libgcc/config/i386/crtn.S
0,0 → 1,35
/* crtn.S for x86.
 
Copyright (C) 1993-2015 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
 
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
 
/* This file just supplies returns for the .init and .fini sections. It is
linked in after all other files. */
 
.ident "GNU C crtn.o"
 
.section .init
ret $0x0
 
.section .fini
ret $0x0
/contrib/toolchain/gcc/5x/libgcc/config/i386/crtprec.c
0,0 → 1,49
/*
* Copyright (C) 2007-2015 Free Software Foundation, Inc.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
 
#ifndef _SOFT_FLOAT
#if __PREC == 32
#define X87CW (0 << 8) /* Single precision (24 bits) */
#elif __PREC == 64
#define X87CW (2 << 8) /* Double precision (53 bits) */
#elif __PREC == 80
#define X87CW (3 << 8) /* Extended precision (64 bits) */
#else
#error "Wrong precision requested."
#endif
 
#define X87CW_PCMASK (3 << 8)
 
static void __attribute__((constructor))
set_precision (void)
{
unsigned short int cwd;
 
asm volatile ("fstcw\t%0" : "=m" (cwd));
 
cwd &= ~X87CW_PCMASK;
cwd |= X87CW;
 
asm volatile ("fldcw\t%0" : : "m" (cwd));
}
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/cygming-crtbegin.c
0,0 → 1,199
/* crtbegin object for windows32 targets.
Copyright (C) 2007-2015 Free Software Foundation, Inc.
 
Contributed by Danny Smith <dannysmith@users.sourceforge.net>
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
/* Target machine header files require this define. */
#define IN_LIBGCC2
 
#include "auto-host.h"
#include "tconfig.h"
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
#include "libgcc_tm.h"
#include "unwind-dw2-fde.h"
 
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
 
#ifndef LIBGCC_SONAME
#define LIBGCC_SONAME "libgcc_s.dll"
#endif
 
#ifndef LIBGCJ_SONAME
#define LIBGCJ_SONAME "libgcj_s.dll"
#endif
 
#if DWARF2_UNWIND_INFO
/* Make the declarations weak. This is critical for
_Jv_RegisterClasses because it lives in libgcj.a */
extern void __register_frame_info (__attribute__((unused)) const void *,
__attribute__((unused)) struct object *)
TARGET_ATTRIBUTE_WEAK;
extern void *__deregister_frame_info (__attribute__((unused)) const void *)
TARGET_ATTRIBUTE_WEAK;
 
/* Work around for current cygwin32 build problems (Bug gas/16858).
Compile weak default functions only for 64-bit systems,
when absolutely necessary. */
#ifdef __x86_64__
TARGET_ATTRIBUTE_WEAK void
__register_frame_info (__attribute__((unused)) const void *p,
__attribute__((unused)) struct object *o)
{
}
 
TARGET_ATTRIBUTE_WEAK void *
__deregister_frame_info (__attribute__((unused)) const void *p)
{
return (void*) 0;
}
#endif
#endif /* DWARF2_UNWIND_INFO */
 
#if TARGET_USE_JCR_SECTION
extern void _Jv_RegisterClasses (__attribute__((unused)) const void *)
TARGET_ATTRIBUTE_WEAK;
 
#ifdef __x86_64__
TARGET_ATTRIBUTE_WEAK void
_Jv_RegisterClasses (__attribute__((unused)) const void *p)
{
}
#endif
#endif /* TARGET_USE_JCR_SECTION */
 
#if defined(HAVE_LD_RO_RW_SECTION_MIXING)
# define EH_FRAME_SECTION_CONST const
#else
# define EH_FRAME_SECTION_CONST
#endif
 
/* Stick a label at the beginning of the frame unwind info so we can
register/deregister it with the exception handling library code. */
#if DWARF2_UNWIND_INFO
static EH_FRAME_SECTION_CONST char __EH_FRAME_BEGIN__[]
__attribute__((used, section(__LIBGCC_EH_FRAME_SECTION_NAME__), aligned(4)))
= { };
 
static struct object obj;
 
/* Handle of libgcc's DLL reference. */
HANDLE hmod_libgcc;
static void * (*deregister_frame_fn) (const void *) = NULL;
#endif
 
#if TARGET_USE_JCR_SECTION
static void *__JCR_LIST__[]
__attribute__ ((used, section(__LIBGCC_JCR_SECTION_NAME__), aligned(4)))
= { };
#endif
 
#ifdef __CYGWIN__
/* Declare the __dso_handle variable. It should have a unique value
in every shared-object; in a main program its value is zero. The
object should in any case be protected. This means the instance
in one DSO or the main program is not used in another object. The
dynamic linker takes care of this. */
 
#ifdef CRTSTUFFS_O
extern void *__ImageBase;
void *__dso_handle = &__ImageBase;
#else
void *__dso_handle = 0;
#endif
 
#endif /* __CYGWIN__ */
 
 
/* Pull in references from libgcc.a(unwind-dw2-fde.o) in the
startfile. These are referenced by a ctor and dtor in crtend.o. */
extern void __gcc_register_frame (void);
extern void __gcc_deregister_frame (void);
 
void
__gcc_register_frame (void)
{
#if DWARF2_UNWIND_INFO
/* Weak undefined symbols won't be pulled in from dlls; hence
we first test if the dll is already loaded and, if so,
get the symbol's address at run-time. If the dll is not loaded,
fallback to weak linkage to static archive. */
 
void (*register_frame_fn) (const void *, struct object *);
HANDLE h = GetModuleHandle (LIBGCC_SONAME);
 
if (h)
{
/* Increasing the load-count of LIBGCC_SONAME DLL. */
hmod_libgcc = LoadLibrary (LIBGCC_SONAME);
register_frame_fn = (void (*) (const void *, struct object *))
GetProcAddress (h, "__register_frame_info");
deregister_frame_fn = (void* (*) (const void *))
GetProcAddress (h, "__deregister_frame_info");
}
else
{
register_frame_fn = __register_frame_info;
deregister_frame_fn = __deregister_frame_info;
}
if (register_frame_fn)
register_frame_fn (__EH_FRAME_BEGIN__, &obj);
#endif
 
#if TARGET_USE_JCR_SECTION
if (__JCR_LIST__[0])
{
void (*register_class_fn) (const void *);
HANDLE h = GetModuleHandle (LIBGCJ_SONAME);
if (h)
register_class_fn = (void (*) (const void *))
GetProcAddress (h, "_Jv_RegisterClasses");
else
register_class_fn = _Jv_RegisterClasses;
 
if (register_class_fn)
register_class_fn (__JCR_LIST__);
}
#endif
 
#if DEFAULT_USE_CXA_ATEXIT
/* If we use the __cxa_atexit method to register C++ dtors
at object construction, also use atexit to register eh frame
info cleanup. */
atexit(__gcc_deregister_frame);
#endif /* DEFAULT_USE_CXA_ATEXIT */
}
 
void
__gcc_deregister_frame (void)
{
#if DWARF2_UNWIND_INFO
if (deregister_frame_fn)
deregister_frame_fn (__EH_FRAME_BEGIN__);
if (hmod_libgcc)
FreeLibrary (hmod_libgcc);
#endif
}
/contrib/toolchain/gcc/5x/libgcc/config/i386/cygming-crtend.c
0,0 → 1,83
/* crtend object for windows32 targets.
Copyright (C) 2007-2015 Free Software Foundation, Inc.
 
Contributed by Danny Smith <dannysmith@users.sourceforge.net>
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
/* Target machine header files require this define. */
#define IN_LIBGCC2
 
/* auto-host.h is needed by cygming.h for HAVE_GAS_WEAK and here
for HAVE_LD_RO_RW_SECTION_MIXING. */
#include "auto-host.h"
#include "tconfig.h"
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
#include "libgcc_tm.h"
#include "unwind-dw2-fde.h"
 
#if defined(HAVE_LD_RO_RW_SECTION_MIXING)
# define EH_FRAME_SECTION_CONST const
#else
# define EH_FRAME_SECTION_CONST
#endif
 
#if DWARF2_UNWIND_INFO
/* Terminate the frame unwind info section with a 0 as a sentinel;
this would be the 'length' field in a real FDE. */
 
static EH_FRAME_SECTION_CONST int __FRAME_END__[]
__attribute__ ((used, section(__LIBGCC_EH_FRAME_SECTION_NAME__),
aligned(4)))
= { 0 };
#endif
 
#if TARGET_USE_JCR_SECTION
/* Null terminate the .jcr section array. */
static void *__JCR_END__[1]
__attribute__ ((used, section(__LIBGCC_JCR_SECTION_NAME__),
aligned(sizeof(void *))))
= { 0 };
#endif
 
extern void __gcc_register_frame (void);
extern void __gcc_deregister_frame (void);
 
static void register_frame_ctor (void) __attribute__ ((constructor (0)));
 
static void
register_frame_ctor (void)
{
__gcc_register_frame ();
}
 
#if !DEFAULT_USE_CXA_ATEXIT
static void deregister_frame_dtor (void) __attribute__ ((destructor (0)));
 
static void
deregister_frame_dtor (void)
{
__gcc_deregister_frame ();
}
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/cygwin.S
0,0 → 1,187
/* stuff needed for libgcc on win32.
*
* Copyright (C) 1996-2015 Free Software Foundation, Inc.
* Written By Steve Chamberlain
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
 
#include "auto-host.h"
 
#ifdef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
.cfi_sections .debug_frame
# define cfi_startproc() .cfi_startproc
# define cfi_endproc() .cfi_endproc
# define cfi_adjust_cfa_offset(X) .cfi_adjust_cfa_offset X
# define cfi_def_cfa_register(X) .cfi_def_cfa_register X
# define cfi_register(D,S) .cfi_register D, S
# ifdef __x86_64__
# define cfi_push(X) .cfi_adjust_cfa_offset 8; .cfi_rel_offset X, 0
# define cfi_pop(X) .cfi_adjust_cfa_offset -8; .cfi_restore X
# else
# define cfi_push(X) .cfi_adjust_cfa_offset 4; .cfi_rel_offset X, 0
# define cfi_pop(X) .cfi_adjust_cfa_offset -4; .cfi_restore X
# endif
#else
# define cfi_startproc()
# define cfi_endproc()
# define cfi_adjust_cfa_offset(X)
# define cfi_def_cfa_register(X)
# define cfi_register(D,S)
# define cfi_push(X)
# define cfi_pop(X)
#endif /* HAVE_GAS_CFI_SECTIONS_DIRECTIVE */
 
#ifdef L_chkstk
/* Function prologue calls __chkstk to probe the stack when allocating more
than CHECK_STACK_LIMIT bytes in one go. Touching the stack at 4K
increments is necessary to ensure that the guard pages used
by the OS virtual memory manger are allocated in correct sequence. */
 
.global ___chkstk
.global __alloca
#ifdef __x86_64__
/* __alloca is a normal function call, which uses %rcx as the argument. */
cfi_startproc()
__alloca:
movq %rcx, %rax
/* FALLTHRU */
 
/* ___chkstk is a *special* function call, which uses %rax as the argument.
We avoid clobbering the 4 integer argument registers, %rcx, %rdx,
%r8 and %r9, which leaves us with %rax, %r10, and %r11 to use. */
.align 4
___chkstk:
popq %r11 /* pop return address */
cfi_adjust_cfa_offset(-8) /* indicate return address in r11 */
cfi_register(%rip, %r11)
movq %rsp, %r10
cmpq $0x1000, %rax /* > 4k ?*/
jb 2f
 
1: subq $0x1000, %r10 /* yes, move pointer down 4k*/
orl $0x0, (%r10) /* probe there */
subq $0x1000, %rax /* decrement count */
cmpq $0x1000, %rax
ja 1b /* and do it again */
 
2: subq %rax, %r10
movq %rsp, %rax /* hold CFA until return */
cfi_def_cfa_register(%rax)
orl $0x0, (%r10) /* less than 4k, just peek here */
movq %r10, %rsp /* decrement stack */
 
/* Push the return value back. Doing this instead of just
jumping to %r11 preserves the cached call-return stack
used by most modern processors. */
pushq %r11
ret
cfi_endproc()
#else
cfi_startproc()
___chkstk:
__alloca:
pushl %ecx /* save temp */
cfi_push(%eax)
leal 8(%esp), %ecx /* point past return addr */
cmpl $0x1000, %eax /* > 4k ?*/
jb 2f
 
1: subl $0x1000, %ecx /* yes, move pointer down 4k*/
orl $0x0, (%ecx) /* probe there */
subl $0x1000, %eax /* decrement count */
cmpl $0x1000, %eax
ja 1b /* and do it again */
 
2: subl %eax, %ecx
orl $0x0, (%ecx) /* less than 4k, just peek here */
movl %esp, %eax /* save current stack pointer */
cfi_def_cfa_register(%eax)
movl %ecx, %esp /* decrement stack */
movl (%eax), %ecx /* recover saved temp */
 
/* Copy the return register. Doing this instead of just jumping to
the address preserves the cached call-return stack used by most
modern processors. */
pushl 4(%eax)
ret
cfi_endproc()
#endif /* __x86_64__ */
#endif /* L_chkstk */
 
#ifdef L_chkstk_ms
/* ___chkstk_ms is a *special* function call, which uses %rax as the argument.
We avoid clobbering any registers. Unlike ___chkstk, it just probes the
stack and does no stack allocation. */
.global ___chkstk_ms
#ifdef __x86_64__
cfi_startproc()
___chkstk_ms:
pushq %rcx /* save temps */
cfi_push(%rcx)
pushq %rax
cfi_push(%rax)
cmpq $0x1000, %rax /* > 4k ?*/
leaq 24(%rsp), %rcx /* point past return addr */
jb 2f
 
1: subq $0x1000, %rcx /* yes, move pointer down 4k */
orq $0x0, (%rcx) /* probe there */
subq $0x1000, %rax /* decrement count */
cmpq $0x1000, %rax
ja 1b /* and do it again */
 
2: subq %rax, %rcx
orq $0x0, (%rcx) /* less than 4k, just peek here */
 
popq %rax
cfi_pop(%rax)
popq %rcx
cfi_pop(%rcx)
ret
cfi_endproc()
#else
cfi_startproc()
___chkstk_ms:
pushl %ecx /* save temp */
cfi_push(%ecx)
pushl %eax
cfi_push(%eax)
cmpl $0x1000, %eax /* > 4k ?*/
leal 12(%esp), %ecx /* point past return addr */
jb 2f
 
1: subl $0x1000, %ecx /* yes, move pointer down 4k*/
orl $0x0, (%ecx) /* probe there */
subl $0x1000, %eax /* decrement count */
cmpl $0x1000, %eax
ja 1b /* and do it again */
 
2: subl %eax, %ecx
orl $0x0, (%ecx) /* less than 4k, just peek here */
 
popl %eax
cfi_pop(%eax)
popl %ecx
cfi_pop(%ecx)
ret
cfi_endproc()
#endif /* __x86_64__ */
#endif /* L_chkstk_ms */
/contrib/toolchain/gcc/5x/libgcc/config/i386/darwin-lib.h
0,0 → 1,32
/* Target definitions for x86 running Darwin, library renames.
Copyright (C) 2011-2015 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
/* The system ___divdc3 routine in libSystem on darwin10 is not
accurate to 1ulp, ours is, so we avoid ever using the system name
for this routine and instead install a non-conflicting name that is
accurate. See darwin_rename_builtins. */
#ifdef L_divdc3
#define DECLARE_LIBRARY_RENAMES \
asm(".text; ___divdc3: jmp ___ieee_divdc3 ; .globl ___divdc3");
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/dragonfly-unwind.h
0,0 → 1,180
/* DWARF2 EH unwinding support for DragonFly BSD: AMD x86-64 and x86.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
Contributed by John Marino <gnugcc@marino.st>
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
/* Do code reading to identify a signal frame, and set the frame
state data appropriately. See unwind-dw2.c for the structs. */
 
#include <sys/types.h>
#include <sys/sysctl.h>
#include <signal.h>
#include <sys/ucontext.h>
#include <machine/sigframe.h>
 
 
#define REG_NAME(reg) sf_uc.uc_mcontext.mc_## reg
 
#ifdef __x86_64__
#define MD_FALLBACK_FRAME_STATE_FOR x86_64_dragonfly_fallback_frame_state
 
 
static void
x86_64_sigtramp_range (unsigned char **start, unsigned char **end)
{
unsigned long ps_strings;
int mib[2];
size_t len;
 
mib[0] = CTL_KERN;
mib[1] = KERN_PS_STRINGS;
len = sizeof (ps_strings);
sysctl (mib, 2, &ps_strings, &len, NULL, 0);
 
*start = (unsigned char *)ps_strings - 32;
*end = (unsigned char *)ps_strings;
}
 
 
static _Unwind_Reason_Code
x86_64_dragonfly_fallback_frame_state
(struct _Unwind_Context *context, _Unwind_FrameState *fs)
{
unsigned char *pc = context->ra;
unsigned char *sigtramp_start, *sigtramp_end;
struct sigframe *sf;
long new_cfa;
 
x86_64_sigtramp_range(&sigtramp_start, &sigtramp_end);
if (pc >= sigtramp_end || pc < sigtramp_start)
return _URC_END_OF_STACK;
 
sf = (struct sigframe *) context->cfa;
new_cfa = sf->REG_NAME(rsp);
fs->regs.cfa_how = CFA_REG_OFFSET;
/* Register 7 is rsp */
fs->regs.cfa_reg = 7;
fs->regs.cfa_offset = new_cfa - (long) context->cfa;
 
/* The SVR4 register numbering macros aren't usable in libgcc. */
fs->regs.reg[0].how = REG_SAVED_OFFSET;
fs->regs.reg[0].loc.offset = (long)&sf->REG_NAME(rax) - new_cfa;
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = (long)&sf->REG_NAME(rdx) - new_cfa;
fs->regs.reg[2].how = REG_SAVED_OFFSET;
fs->regs.reg[2].loc.offset = (long)&sf->REG_NAME(rcx) - new_cfa;
fs->regs.reg[3].how = REG_SAVED_OFFSET;
fs->regs.reg[3].loc.offset = (long)&sf->REG_NAME(rbx) - new_cfa;
fs->regs.reg[4].how = REG_SAVED_OFFSET;
fs->regs.reg[4].loc.offset = (long)&sf->REG_NAME(rsi) - new_cfa;
fs->regs.reg[5].how = REG_SAVED_OFFSET;
fs->regs.reg[5].loc.offset = (long)&sf->REG_NAME(rdi) - new_cfa;
fs->regs.reg[6].how = REG_SAVED_OFFSET;
fs->regs.reg[6].loc.offset = (long)&sf->REG_NAME(rbp) - new_cfa;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = (long)&sf->REG_NAME(r8) - new_cfa;
fs->regs.reg[9].how = REG_SAVED_OFFSET;
fs->regs.reg[9].loc.offset = (long)&sf->REG_NAME(r9) - new_cfa;
fs->regs.reg[10].how = REG_SAVED_OFFSET;
fs->regs.reg[10].loc.offset = (long)&sf->REG_NAME(r10) - new_cfa;
fs->regs.reg[11].how = REG_SAVED_OFFSET;
fs->regs.reg[11].loc.offset = (long)&sf->REG_NAME(r11) - new_cfa;
fs->regs.reg[12].how = REG_SAVED_OFFSET;
fs->regs.reg[12].loc.offset = (long)&sf->REG_NAME(r12) - new_cfa;
fs->regs.reg[13].how = REG_SAVED_OFFSET;
fs->regs.reg[13].loc.offset = (long)&sf->REG_NAME(r13) - new_cfa;
fs->regs.reg[14].how = REG_SAVED_OFFSET;
fs->regs.reg[14].loc.offset = (long)&sf->REG_NAME(r14) - new_cfa;
fs->regs.reg[15].how = REG_SAVED_OFFSET;
fs->regs.reg[15].loc.offset = (long)&sf->REG_NAME(r15) - new_cfa;
fs->regs.reg[16].how = REG_SAVED_OFFSET;
fs->regs.reg[16].loc.offset = (long)&sf->REG_NAME(rip) - new_cfa;
fs->retaddr_column = 16;
fs->signal_frame = 1;
return _URC_NO_REASON;
}
 
#else /* Next section is for i386 */
 
#define MD_FALLBACK_FRAME_STATE_FOR x86_dragonfly_fallback_frame_state
 
 
static void
x86_sigtramp_range (unsigned char **start, unsigned char **end)
{
unsigned long ps_strings;
int mib[2];
size_t len;
 
mib[0] = CTL_KERN;
mib[1] = KERN_PS_STRINGS;
len = sizeof (ps_strings);
sysctl (mib, 2, &ps_strings, &len, NULL, 0);
 
*start = (unsigned char *)ps_strings - 128;
*end = (unsigned char *)ps_strings;
}
 
 
static _Unwind_Reason_Code
x86_dragonfly_fallback_frame_state
(struct _Unwind_Context *context, _Unwind_FrameState *fs)
{
unsigned char *pc = context->ra;
unsigned char *sigtramp_start, *sigtramp_end;
struct sigframe *sf;
long new_cfa;
 
x86_sigtramp_range(&sigtramp_start, &sigtramp_end);
 
if (pc >= sigtramp_end || pc < sigtramp_start)
return _URC_END_OF_STACK;
 
sf = (struct sigframe *) context->cfa;
new_cfa = sf->REG_NAME(esp);
fs->regs.cfa_how = CFA_REG_OFFSET;
fs->regs.cfa_reg = 4;
fs->regs.cfa_offset = new_cfa - (long) context->cfa;
 
/* The SVR4 register numbering macros aren't usable in libgcc. */
fs->regs.reg[0].how = REG_SAVED_OFFSET;
fs->regs.reg[0].loc.offset = (long)&sf->REG_NAME(eax) - new_cfa;
fs->regs.reg[3].how = REG_SAVED_OFFSET;
fs->regs.reg[3].loc.offset = (long)&sf->REG_NAME(ebx) - new_cfa;
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = (long)&sf->REG_NAME(ecx) - new_cfa;
fs->regs.reg[2].how = REG_SAVED_OFFSET;
fs->regs.reg[2].loc.offset = (long)&sf->REG_NAME(edx) - new_cfa;
fs->regs.reg[6].how = REG_SAVED_OFFSET;
fs->regs.reg[6].loc.offset = (long)&sf->REG_NAME(esi) - new_cfa;
fs->regs.reg[7].how = REG_SAVED_OFFSET;
fs->regs.reg[7].loc.offset = (long)&sf->REG_NAME(edi) - new_cfa;
fs->regs.reg[5].how = REG_SAVED_OFFSET;
fs->regs.reg[5].loc.offset = (long)&sf->REG_NAME(ebp) - new_cfa;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = (long)&sf->REG_NAME(eip) - new_cfa;
fs->retaddr_column = 8;
fs->signal_frame = 1;
return _URC_NO_REASON;
}
#endif /* ifdef __x86_64__ */
/contrib/toolchain/gcc/5x/libgcc/config/i386/elf-lib.h
0,0 → 1,36
/* Definitions for Intel 386 ELF systems.
Copyright (C) 2015 Free Software Foundation, Inc.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
#ifdef __i386__
/* Used by crtstuff.c to initialize the base of data-relative relocations.
These are GOT relative on x86, so return the pic register. */
#define CRT_GET_RFIB_DATA(BASE) \
__asm__ ("call\t.LPR%=\n" \
".LPR%=:\n\t" \
"pop{l}\t%0\n\t" \
/* Due to a GAS bug, this cannot use EAX. That encodes \
smaller than the traditional EBX, which results in the \
offset being off by one. */ \
"add{l}\t{$_GLOBAL_OFFSET_TABLE_+[.-.LPR%=],%0" \
"|%0,_GLOBAL_OFFSET_TABLE_+(.-.LPR%=)}" \
: "=d"(BASE))
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/enable-execute-stack-mingw32.c
0,0 → 1,38
/* Implement __enable_execute_stack for Windows32.
Copyright (C) 2011-2015 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
#include <windows.h>
 
extern void __enable_execute_stack (void *);
 
void
__enable_execute_stack (void *addr)
{
MEMORY_BASIC_INFORMATION b;
 
if (!VirtualQuery (addr, &b, sizeof(b)))
abort ();
VirtualProtect (b.BaseAddress, b.RegionSize, PAGE_EXECUTE_READWRITE,
&b.Protect);
}
/contrib/toolchain/gcc/5x/libgcc/config/i386/freebsd-unwind.h
0,0 → 1,173
/* DWARF2 EH unwinding support for FreeBSD: AMD x86-64 and x86.
Copyright (C) 2015 Free Software Foundation, Inc.
Contributed by John Marino <gnugcc@marino.st>
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
/* Do code reading to identify a signal frame, and set the frame
state data appropriately. See unwind-dw2.c for the structs. */
 
#include <sys/types.h>
#include <signal.h>
#include <sys/ucontext.h>
#include <machine/sigframe.h>
 
#define REG_NAME(reg) sf_uc.uc_mcontext.mc_## reg
 
#ifdef __x86_64__
#define MD_FALLBACK_FRAME_STATE_FOR x86_64_freebsd_fallback_frame_state
 
static _Unwind_Reason_Code
x86_64_freebsd_fallback_frame_state
(struct _Unwind_Context *context, _Unwind_FrameState *fs)
{
struct sigframe *sf;
long new_cfa;
 
/* Prior to FreeBSD 9, the signal trampoline was located immediately
before the ps_strings. To support non-executable stacks on AMD64,
the sigtramp was moved to a shared page for FreeBSD 9. Unfortunately
this means looking frame patterns again (sys/amd64/amd64/sigtramp.S)
rather than using the robust and convenient KERN_PS_STRINGS trick.
 
<pc + 00>: lea 0x10(%rsp),%rdi
<pc + 05>: pushq $0x0
<pc + 17>: mov $0x1a1,%rax
<pc + 14>: syscall
 
If we can't find this pattern, we're at the end of the stack.
*/
 
if (!( *(unsigned int *)(context->ra) == 0x247c8d48
&& *(unsigned int *)(context->ra + 4) == 0x48006a10
&& *(unsigned int *)(context->ra + 8) == 0x01a1c0c7
&& *(unsigned int *)(context->ra + 12) == 0x050f0000 ))
return _URC_END_OF_STACK;
 
sf = (struct sigframe *) context->cfa;
new_cfa = sf->REG_NAME(rsp);
fs->regs.cfa_how = CFA_REG_OFFSET;
/* Register 7 is rsp */
fs->regs.cfa_reg = 7;
fs->regs.cfa_offset = new_cfa - (long) context->cfa;
 
/* The SVR4 register numbering macros aren't usable in libgcc. */
fs->regs.reg[0].how = REG_SAVED_OFFSET;
fs->regs.reg[0].loc.offset = (long)&sf->REG_NAME(rax) - new_cfa;
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = (long)&sf->REG_NAME(rdx) - new_cfa;
fs->regs.reg[2].how = REG_SAVED_OFFSET;
fs->regs.reg[2].loc.offset = (long)&sf->REG_NAME(rcx) - new_cfa;
fs->regs.reg[3].how = REG_SAVED_OFFSET;
fs->regs.reg[3].loc.offset = (long)&sf->REG_NAME(rbx) - new_cfa;
fs->regs.reg[4].how = REG_SAVED_OFFSET;
fs->regs.reg[4].loc.offset = (long)&sf->REG_NAME(rsi) - new_cfa;
fs->regs.reg[5].how = REG_SAVED_OFFSET;
fs->regs.reg[5].loc.offset = (long)&sf->REG_NAME(rdi) - new_cfa;
fs->regs.reg[6].how = REG_SAVED_OFFSET;
fs->regs.reg[6].loc.offset = (long)&sf->REG_NAME(rbp) - new_cfa;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = (long)&sf->REG_NAME(r8) - new_cfa;
fs->regs.reg[9].how = REG_SAVED_OFFSET;
fs->regs.reg[9].loc.offset = (long)&sf->REG_NAME(r9) - new_cfa;
fs->regs.reg[10].how = REG_SAVED_OFFSET;
fs->regs.reg[10].loc.offset = (long)&sf->REG_NAME(r10) - new_cfa;
fs->regs.reg[11].how = REG_SAVED_OFFSET;
fs->regs.reg[11].loc.offset = (long)&sf->REG_NAME(r11) - new_cfa;
fs->regs.reg[12].how = REG_SAVED_OFFSET;
fs->regs.reg[12].loc.offset = (long)&sf->REG_NAME(r12) - new_cfa;
fs->regs.reg[13].how = REG_SAVED_OFFSET;
fs->regs.reg[13].loc.offset = (long)&sf->REG_NAME(r13) - new_cfa;
fs->regs.reg[14].how = REG_SAVED_OFFSET;
fs->regs.reg[14].loc.offset = (long)&sf->REG_NAME(r14) - new_cfa;
fs->regs.reg[15].how = REG_SAVED_OFFSET;
fs->regs.reg[15].loc.offset = (long)&sf->REG_NAME(r15) - new_cfa;
fs->regs.reg[16].how = REG_SAVED_OFFSET;
fs->regs.reg[16].loc.offset = (long)&sf->REG_NAME(rip) - new_cfa;
fs->retaddr_column = 16;
fs->signal_frame = 1;
return _URC_NO_REASON;
}
 
#else /* Next section is for i386 */
 
#define MD_FALLBACK_FRAME_STATE_FOR x86_freebsd_fallback_frame_state
 
/*
* We can't use KERN_PS_STRINGS anymore if we want to support FreeBSD32
* compat on AMD64. The sigtramp is in a shared page in that case so the
* x86_sigtramp_range only works on a true i386 system. We have to
* search for the sigtramp frame if we want it working everywhere.
*/
 
static _Unwind_Reason_Code
x86_freebsd_fallback_frame_state
(struct _Unwind_Context *context, _Unwind_FrameState *fs)
{
struct sigframe *sf;
long new_cfa;
 
/*
* i386 sigtramp frame we are looking for follows.
* Apparently PSL_VM is variable, so we can't look past context->ra + 4
* <sigcode>:
* 0: ff 54 24 10 call *0x10(%esp) *SIGF_HANDLER
* 4: 8d 44 24 20 lea 0x20(%esp),%eax SIGF_UC
* 8: 50 push %eax
* 9: f7 40 54 00 00 02 00 testl $0x20000,0x54(%eax) $PSL_VM
* 10: 75 03 jne 15 <sigcode+0x15>
* 12: 8e 68 14 mov 0x14(%eax),%gs UC_GS
* 15: b8 a1 01 00 00 mov 0x1a1,%eax $SYS_sigreturn
*/
 
if (!( *(unsigned int *)(context->ra - 4) == 0x102454ff
&& *(unsigned int *)(context->ra) == 0x2024448d ))
return _URC_END_OF_STACK;
 
sf = (struct sigframe *) context->cfa;
new_cfa = sf->REG_NAME(esp);
fs->regs.cfa_how = CFA_REG_OFFSET;
fs->regs.cfa_reg = 4;
fs->regs.cfa_offset = new_cfa - (long) context->cfa;
 
/* The SVR4 register numbering macros aren't usable in libgcc. */
fs->regs.reg[0].how = REG_SAVED_OFFSET;
fs->regs.reg[0].loc.offset = (long)&sf->REG_NAME(eax) - new_cfa;
fs->regs.reg[3].how = REG_SAVED_OFFSET;
fs->regs.reg[3].loc.offset = (long)&sf->REG_NAME(ebx) - new_cfa;
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = (long)&sf->REG_NAME(ecx) - new_cfa;
fs->regs.reg[2].how = REG_SAVED_OFFSET;
fs->regs.reg[2].loc.offset = (long)&sf->REG_NAME(edx) - new_cfa;
fs->regs.reg[6].how = REG_SAVED_OFFSET;
fs->regs.reg[6].loc.offset = (long)&sf->REG_NAME(esi) - new_cfa;
fs->regs.reg[7].how = REG_SAVED_OFFSET;
fs->regs.reg[7].loc.offset = (long)&sf->REG_NAME(edi) - new_cfa;
fs->regs.reg[5].how = REG_SAVED_OFFSET;
fs->regs.reg[5].loc.offset = (long)&sf->REG_NAME(ebp) - new_cfa;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = (long)&sf->REG_NAME(eip) - new_cfa;
fs->retaddr_column = 8;
fs->signal_frame = 1;
return _URC_NO_REASON;
}
#endif /* ifdef __x86_64__ */
/contrib/toolchain/gcc/5x/libgcc/config/i386/gthr-win32.c
0,0 → 1,267
/* Implementation of W32-specific threads compatibility routines for
libgcc2. */
 
/* Copyright (C) 1999-2015 Free Software Foundation, Inc.
Contributed by Mumit Khan <khan@xraylith.wisc.edu>.
Modified and moved to separate file by Danny Smith
<dannysmith@users.sourceforge.net>.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
#include <windows.h>
#ifndef __GTHREAD_HIDE_WIN32API
# define __GTHREAD_HIDE_WIN32API 1
#endif
#undef __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
#define __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
#include "gthr-win32.h"
 
/* Windows32 threads specific definitions. The windows32 threading model
does not map well into pthread-inspired gcc's threading model, and so
there are caveats one needs to be aware of.
 
1. The destructor supplied to __gthread_key_create is ignored for
generic x86-win32 ports. This will certainly cause memory leaks
due to unreclaimed eh contexts (sizeof (eh_context) is at least
24 bytes for x86 currently).
 
This memory leak may be significant for long-running applications
that make heavy use of C++ EH.
 
However, Mingw runtime (version 0.3 or newer) provides a mechanism
to emulate pthreads key dtors; the runtime provides a special DLL,
linked in if -mthreads option is specified, that runs the dtors in
the reverse order of registration when each thread exits. If
-mthreads option is not given, a stub is linked in instead of the
DLL, which results in memory leak. Other x86-win32 ports can use
the same technique of course to avoid the leak.
 
2. The error codes returned are non-POSIX like, and cast into ints.
This may cause incorrect error return due to truncation values on
hw where sizeof (DWORD) > sizeof (int).
3. We are currently using a special mutex instead of the Critical
Sections, since Win9x does not support TryEnterCriticalSection
(while NT does).
The basic framework should work well enough. In the long term, GCC
needs to use Structured Exception Handling on Windows32. */
 
int
__gthr_win32_once (__gthread_once_t *once, void (*func) (void))
{
if (once == NULL || func == NULL)
return EINVAL;
 
if (! once->done)
{
if (InterlockedIncrement (&(once->started)) == 0)
{
(*func) ();
once->done = TRUE;
}
else
{
/* Another thread is currently executing the code, so wait for it
to finish; yield the CPU in the meantime. If performance
does become an issue, the solution is to use an Event that
we wait on here (and set above), but that implies a place to
create the event before this routine is called. */
while (! once->done)
Sleep (0);
}
}
return 0;
}
 
/* Windows32 thread local keys don't support destructors; this leads to
leaks, especially in threaded applications making extensive use of
C++ EH. Mingw uses a thread-support DLL to work-around this problem. */
 
int
__gthr_win32_key_create (__gthread_key_t *key,
void (*dtor) (void *) __attribute__((unused)))
{
int status = 0;
DWORD tls_index = TlsAlloc ();
if (tls_index != 0xFFFFFFFF)
{
*key = tls_index;
#ifdef MINGW32_SUPPORTS_MT_EH
/* Mingw runtime will run the dtors in reverse order for each thread
when the thread exits. */
status = __mingwthr_key_dtor (*key, dtor);
#endif
}
else
status = (int) GetLastError ();
return status;
}
 
int
__gthr_win32_key_delete (__gthread_key_t key)
{
return (TlsFree (key) != 0) ? 0 : (int) GetLastError ();
}
 
void *
__gthr_win32_getspecific (__gthread_key_t key)
{
DWORD lasterror;
void *ptr;
lasterror = GetLastError();
ptr = TlsGetValue(key);
SetLastError( lasterror );
return ptr;
}
 
int
__gthr_win32_setspecific (__gthread_key_t key, const void *ptr)
{
if (TlsSetValue (key, CONST_CAST2(void *, const void *, ptr)) != 0)
return 0;
else
return GetLastError ();
}
 
void
__gthr_win32_mutex_init_function (__gthread_mutex_t *mutex)
{
mutex->counter = -1;
mutex->sema = CreateSemaphoreW (NULL, 0, 65535, NULL);
}
 
void
__gthr_win32_mutex_destroy (__gthread_mutex_t *mutex)
{
CloseHandle ((HANDLE) mutex->sema);
}
 
int
__gthr_win32_mutex_lock (__gthread_mutex_t *mutex)
{
if (InterlockedIncrement (&mutex->counter) == 0 ||
WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
return 0;
else
{
/* WaitForSingleObject returns WAIT_FAILED, and we can only do
some best-effort cleanup here. */
InterlockedDecrement (&mutex->counter);
return 1;
}
}
 
int
__gthr_win32_mutex_trylock (__gthread_mutex_t *mutex)
{
if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
return 0;
else
return 1;
}
 
int
__gthr_win32_mutex_unlock (__gthread_mutex_t *mutex)
{
if (InterlockedDecrement (&mutex->counter) >= 0)
return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
else
return 0;
}
 
void
__gthr_win32_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
{
mutex->counter = -1;
mutex->depth = 0;
mutex->owner = 0;
mutex->sema = CreateSemaphoreW (NULL, 0, 65535, NULL);
}
 
int
__gthr_win32_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
DWORD me = GetCurrentThreadId();
if (InterlockedIncrement (&mutex->counter) == 0)
{
mutex->depth = 1;
mutex->owner = me;
}
else if (mutex->owner == me)
{
InterlockedDecrement (&mutex->counter);
++(mutex->depth);
}
else if (WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
{
mutex->depth = 1;
mutex->owner = me;
}
else
{
/* WaitForSingleObject returns WAIT_FAILED, and we can only do
some best-effort cleanup here. */
InterlockedDecrement (&mutex->counter);
return 1;
}
return 0;
}
 
int
__gthr_win32_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
DWORD me = GetCurrentThreadId();
if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
{
mutex->depth = 1;
mutex->owner = me;
}
else if (mutex->owner == me)
++(mutex->depth);
else
return 1;
 
return 0;
}
 
int
__gthr_win32_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
--(mutex->depth);
if (mutex->depth == 0)
{
mutex->owner = 0;
 
if (InterlockedDecrement (&mutex->counter) >= 0)
return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
}
 
return 0;
}
 
int
__gthr_win32_recursive_mutex_destroy (__gthread_recursive_mutex_t *mutex)
{
CloseHandle ((HANDLE) mutex->sema);
return 0;
}
/contrib/toolchain/gcc/5x/libgcc/config/i386/gthr-win32.h
0,0 → 1,786
/* Threads compatibility routines for libgcc2 and libobjc. */
/* Compile this one with gcc. */
 
/* Copyright (C) 1999-2015 Free Software Foundation, Inc.
Contributed by Mumit Khan <khan@xraylith.wisc.edu>.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
#ifndef GCC_GTHR_WIN32_H
#define GCC_GTHR_WIN32_H
 
/* Make sure CONST_CAST2 (origin in system.h) is declared. */
#ifndef CONST_CAST2
#define CONST_CAST2(TOTYPE,FROMTYPE,X) ((__extension__(union {FROMTYPE _q; TOTYPE _nq;})(X))._nq)
#endif
 
/* Windows32 threads specific definitions. The windows32 threading model
does not map well into pthread-inspired gcc's threading model, and so
there are caveats one needs to be aware of.
 
1. The destructor supplied to __gthread_key_create is ignored for
generic x86-win32 ports. This will certainly cause memory leaks
due to unreclaimed eh contexts (sizeof (eh_context) is at least
24 bytes for x86 currently).
 
This memory leak may be significant for long-running applications
that make heavy use of C++ EH.
 
However, Mingw runtime (version 0.3 or newer) provides a mechanism
to emulate pthreads key dtors; the runtime provides a special DLL,
linked in if -mthreads option is specified, that runs the dtors in
the reverse order of registration when each thread exits. If
-mthreads option is not given, a stub is linked in instead of the
DLL, which results in memory leak. Other x86-win32 ports can use
the same technique of course to avoid the leak.
 
2. The error codes returned are non-POSIX like, and cast into ints.
This may cause incorrect error return due to truncation values on
hw where sizeof (DWORD) > sizeof (int).
 
3. We are currently using a special mutex instead of the Critical
Sections, since Win9x does not support TryEnterCriticalSection
(while NT does).
 
The basic framework should work well enough. In the long term, GCC
needs to use Structured Exception Handling on Windows32. */
 
#define __GTHREADS 1
 
#include <errno.h>
#ifdef __MINGW32__
#include <_mingw.h>
#endif
 
#ifndef __UNUSED_PARAM
#define __UNUSED_PARAM(x) x
#endif
 
#ifdef _LIBOBJC
 
/* This is necessary to prevent windef.h (included from windows.h) from
defining its own BOOL as a typedef. */
#ifndef __OBJC__
#define __OBJC__
#endif
#include <windows.h>
/* Now undef the windows BOOL. */
#undef BOOL
 
/* Key structure for maintaining thread specific storage */
static DWORD __gthread_objc_data_tls = (DWORD) -1;
 
/* Backend initialization functions */
 
/* Initialize the threads subsystem. */
int
__gthread_objc_init_thread_system (void)
{
/* Initialize the thread storage key. */
if ((__gthread_objc_data_tls = TlsAlloc ()) != (DWORD) -1)
return 0;
else
return -1;
}
 
/* Close the threads subsystem. */
int
__gthread_objc_close_thread_system (void)
{
if (__gthread_objc_data_tls != (DWORD) -1)
TlsFree (__gthread_objc_data_tls);
return 0;
}
 
/* Backend thread functions */
 
/* Create a new thread of execution. */
objc_thread_t
__gthread_objc_thread_detach (void (*func)(void *arg), void *arg)
{
DWORD thread_id = 0;
HANDLE win32_handle;
 
if (!(win32_handle = CreateThread (NULL, 0, (LPTHREAD_START_ROUTINE) func,
arg, 0, &thread_id)))
thread_id = 0;
 
return (objc_thread_t) (INT_PTR) thread_id;
}
 
/* Set the current thread's priority. */
int
__gthread_objc_thread_set_priority (int priority)
{
int sys_priority = 0;
 
switch (priority)
{
case OBJC_THREAD_INTERACTIVE_PRIORITY:
sys_priority = THREAD_PRIORITY_NORMAL;
break;
default:
case OBJC_THREAD_BACKGROUND_PRIORITY:
sys_priority = THREAD_PRIORITY_BELOW_NORMAL;
break;
case OBJC_THREAD_LOW_PRIORITY:
sys_priority = THREAD_PRIORITY_LOWEST;
break;
}
 
/* Change priority */
if (SetThreadPriority (GetCurrentThread (), sys_priority))
return 0;
else
return -1;
}
 
/* Return the current thread's priority. */
int
__gthread_objc_thread_get_priority (void)
{
int sys_priority;
 
sys_priority = GetThreadPriority (GetCurrentThread ());
 
switch (sys_priority)
{
case THREAD_PRIORITY_HIGHEST:
case THREAD_PRIORITY_TIME_CRITICAL:
case THREAD_PRIORITY_ABOVE_NORMAL:
case THREAD_PRIORITY_NORMAL:
return OBJC_THREAD_INTERACTIVE_PRIORITY;
 
default:
case THREAD_PRIORITY_BELOW_NORMAL:
return OBJC_THREAD_BACKGROUND_PRIORITY;
 
case THREAD_PRIORITY_IDLE:
case THREAD_PRIORITY_LOWEST:
return OBJC_THREAD_LOW_PRIORITY;
}
 
/* Couldn't get priority. */
return -1;
}
 
/* Yield our process time to another thread. */
void
__gthread_objc_thread_yield (void)
{
Sleep (0);
}
 
/* Terminate the current thread. */
int
__gthread_objc_thread_exit (void)
{
/* exit the thread */
ExitThread (__objc_thread_exit_status);
 
/* Failed if we reached here */
return -1;
}
 
/* Returns an integer value which uniquely describes a thread. */
objc_thread_t
__gthread_objc_thread_id (void)
{
return (objc_thread_t) (INT_PTR) GetCurrentThreadId ();
}
 
/* Sets the thread's local storage pointer. */
int
__gthread_objc_thread_set_data (void *value)
{
if (TlsSetValue (__gthread_objc_data_tls, value))
return 0;
else
return -1;
}
 
/* Returns the thread's local storage pointer. */
void *
__gthread_objc_thread_get_data (void)
{
DWORD lasterror;
void *ptr;
 
lasterror = GetLastError ();
 
ptr = TlsGetValue (__gthread_objc_data_tls); /* Return thread data. */
 
SetLastError (lasterror);
 
return ptr;
}
 
/* Backend mutex functions */
 
/* Allocate a mutex. */
int
__gthread_objc_mutex_allocate (objc_mutex_t mutex)
{
if ((mutex->backend = (void *) CreateMutex (NULL, 0, NULL)) == NULL)
return -1;
else
return 0;
}
 
/* Deallocate a mutex. */
int
__gthread_objc_mutex_deallocate (objc_mutex_t mutex)
{
CloseHandle ((HANDLE) (mutex->backend));
return 0;
}
 
/* Grab a lock on a mutex. */
int
__gthread_objc_mutex_lock (objc_mutex_t mutex)
{
int status;
 
status = WaitForSingleObject ((HANDLE) (mutex->backend), INFINITE);
if (status != WAIT_OBJECT_0 && status != WAIT_ABANDONED)
return -1;
else
return 0;
}
 
/* Try to grab a lock on a mutex. */
int
__gthread_objc_mutex_trylock (objc_mutex_t mutex)
{
int status;
 
status = WaitForSingleObject ((HANDLE) (mutex->backend), 0);
if (status != WAIT_OBJECT_0 && status != WAIT_ABANDONED)
return -1;
else
return 0;
}
 
/* Unlock the mutex */
int
__gthread_objc_mutex_unlock (objc_mutex_t mutex)
{
if (ReleaseMutex ((HANDLE) (mutex->backend)) == 0)
return -1;
else
return 0;
}
 
/* Backend condition mutex functions */
 
/* Allocate a condition. */
int
__gthread_objc_condition_allocate (objc_condition_t __UNUSED_PARAM(condition))
{
/* Unimplemented. */
return -1;
}
 
/* Deallocate a condition. */
int
__gthread_objc_condition_deallocate (objc_condition_t __UNUSED_PARAM(condition))
{
/* Unimplemented. */
return -1;
}
 
/* Wait on the condition */
int
__gthread_objc_condition_wait (objc_condition_t __UNUSED_PARAM(condition),
objc_mutex_t __UNUSED_PARAM(mutex))
{
/* Unimplemented. */
return -1;
}
 
/* Wake up all threads waiting on this condition. */
int
__gthread_objc_condition_broadcast (objc_condition_t __UNUSED_PARAM(condition))
{
/* Unimplemented. */
return -1;
}
 
/* Wake up one thread waiting on this condition. */
int
__gthread_objc_condition_signal (objc_condition_t __UNUSED_PARAM(condition))
{
/* Unimplemented. */
return -1;
}
 
#else /* _LIBOBJC */
 
#ifdef __cplusplus
extern "C" {
#endif
 
typedef unsigned long __gthread_key_t;
 
typedef struct {
int done;
long started;
} __gthread_once_t;
 
typedef struct {
long counter;
void *sema;
} __gthread_mutex_t;
 
typedef struct {
long counter;
long depth;
unsigned long owner;
void *sema;
} __gthread_recursive_mutex_t;
 
#define __GTHREAD_ONCE_INIT {0, -1}
#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function
#define __GTHREAD_MUTEX_INIT_DEFAULT {-1, 0}
#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION \
__gthread_recursive_mutex_init_function
#define __GTHREAD_RECURSIVE_MUTEX_INIT_DEFAULT {-1, 0, 0, 0}
 
#if defined (_WIN32) && !defined(__CYGWIN__)
#define MINGW32_SUPPORTS_MT_EH 1
/* Mingw runtime >= v0.3 provides a magic variable that is set to nonzero
if -mthreads option was specified, or 0 otherwise. This is to get around
the lack of weak symbols in PE-COFF. */
extern int _CRT_MT;
extern int __mingwthr_key_dtor (unsigned long, void (*) (void *));
#endif /* _WIN32 && !__CYGWIN__ */
 
/* The Windows95 kernel does not export InterlockedCompareExchange.
This provides a substitute. When building apps that reference
gthread_mutex_try_lock, the __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
macro must be defined if Windows95 is a target. Currently
gthread_mutex_try_lock is not referenced by libgcc or libstdc++. */
#ifdef __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
static inline long
__gthr_i486_lock_cmp_xchg(long *__dest, long __xchg, long __comperand)
{
long result;
__asm__ __volatile__ ("\n\
lock\n\
cmpxchg{l} {%4, %1|%1, %4}\n"
: "=a" (result), "=m" (*__dest)
: "0" (__comperand), "m" (*__dest), "r" (__xchg)
: "cc");
return result;
}
#define __GTHR_W32_InterlockedCompareExchange __gthr_i486_lock_cmp_xchg
#else /* __GTHREAD_I486_INLINE_LOCK_PRIMITIVES */
#define __GTHR_W32_InterlockedCompareExchange InterlockedCompareExchange
#endif /* __GTHREAD_I486_INLINE_LOCK_PRIMITIVES */
 
static inline int
__gthread_active_p (void)
{
#ifdef MINGW32_SUPPORTS_MT_EH
return _CRT_MT;
#else
return 1;
#endif
}
 
#if __GTHREAD_HIDE_WIN32API
 
/* The implementations are in config/i386/gthr-win32.c in libgcc.a.
Only stubs are exposed to avoid polluting the C++ namespace with
windows api definitions. */
 
extern int __gthr_win32_once (__gthread_once_t *, void (*) (void));
extern int __gthr_win32_key_create (__gthread_key_t *, void (*) (void*));
extern int __gthr_win32_key_delete (__gthread_key_t);
extern void * __gthr_win32_getspecific (__gthread_key_t);
extern int __gthr_win32_setspecific (__gthread_key_t, const void *);
extern void __gthr_win32_mutex_init_function (__gthread_mutex_t *);
extern int __gthr_win32_mutex_lock (__gthread_mutex_t *);
extern int __gthr_win32_mutex_trylock (__gthread_mutex_t *);
extern int __gthr_win32_mutex_unlock (__gthread_mutex_t *);
extern void
__gthr_win32_recursive_mutex_init_function (__gthread_recursive_mutex_t *);
extern int __gthr_win32_recursive_mutex_lock (__gthread_recursive_mutex_t *);
extern int
__gthr_win32_recursive_mutex_trylock (__gthread_recursive_mutex_t *);
extern int __gthr_win32_recursive_mutex_unlock (__gthread_recursive_mutex_t *);
extern void __gthr_win32_mutex_destroy (__gthread_mutex_t *);
extern int
__gthr_win32_recursive_mutex_destroy (__gthread_recursive_mutex_t *);
 
static inline int
__gthread_once (__gthread_once_t *__once, void (*__func) (void))
{
if (__gthread_active_p ())
return __gthr_win32_once (__once, __func);
else
return -1;
}
 
static inline int
__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *))
{
return __gthr_win32_key_create (__key, __dtor);
}
 
static inline int
__gthread_key_delete (__gthread_key_t __key)
{
return __gthr_win32_key_delete (__key);
}
 
static inline void *
__gthread_getspecific (__gthread_key_t __key)
{
return __gthr_win32_getspecific (__key);
}
 
static inline int
__gthread_setspecific (__gthread_key_t __key, const void *__ptr)
{
return __gthr_win32_setspecific (__key, __ptr);
}
 
static inline void
__gthread_mutex_init_function (__gthread_mutex_t *__mutex)
{
__gthr_win32_mutex_init_function (__mutex);
}
 
static inline void
__gthread_mutex_destroy (__gthread_mutex_t *__mutex)
{
__gthr_win32_mutex_destroy (__mutex);
}
 
static inline int
__gthread_mutex_lock (__gthread_mutex_t *__mutex)
{
if (__gthread_active_p ())
return __gthr_win32_mutex_lock (__mutex);
else
return 0;
}
 
static inline int
__gthread_mutex_trylock (__gthread_mutex_t *__mutex)
{
if (__gthread_active_p ())
return __gthr_win32_mutex_trylock (__mutex);
else
return 0;
}
 
static inline int
__gthread_mutex_unlock (__gthread_mutex_t *__mutex)
{
if (__gthread_active_p ())
return __gthr_win32_mutex_unlock (__mutex);
else
return 0;
}
 
static inline void
__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex)
{
__gthr_win32_recursive_mutex_init_function (__mutex);
}
 
static inline int
__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
{
if (__gthread_active_p ())
return __gthr_win32_recursive_mutex_lock (__mutex);
else
return 0;
}
 
static inline int
__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
{
if (__gthread_active_p ())
return __gthr_win32_recursive_mutex_trylock (__mutex);
else
return 0;
}
 
static inline int
__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
{
if (__gthread_active_p ())
return __gthr_win32_recursive_mutex_unlock (__mutex);
else
return 0;
}
 
static inline int
__gthread_recursive_mutex_destroy (__gthread_recursive_mutex_t *__mutex)
{
return __gthr_win32_recursive_mutex_destroy (__mutex);
}
 
#else /* ! __GTHREAD_HIDE_WIN32API */
 
#include <windows.h>
#include <errno.h>
 
static inline int
__gthread_once (__gthread_once_t *__once, void (*__func) (void))
{
if (! __gthread_active_p ())
return -1;
else if (__once == NULL || __func == NULL)
return EINVAL;
 
if (! __once->done)
{
if (InterlockedIncrement (&(__once->started)) == 0)
{
(*__func) ();
__once->done = TRUE;
}
else
{
/* Another thread is currently executing the code, so wait for it
to finish; yield the CPU in the meantime. If performance
does become an issue, the solution is to use an Event that
we wait on here (and set above), but that implies a place to
create the event before this routine is called. */
while (! __once->done)
Sleep (0);
}
}
 
return 0;
}
 
/* Windows32 thread local keys don't support destructors; this leads to
leaks, especially in threaded applications making extensive use of
C++ EH. Mingw uses a thread-support DLL to work-around this problem. */
static inline int
__gthread_key_create (__gthread_key_t *__key,
void (*__dtor) (void *) __attribute__((__unused__)))
{
int __status = 0;
DWORD __tls_index = TlsAlloc ();
if (__tls_index != 0xFFFFFFFF)
{
*__key = __tls_index;
#ifdef MINGW32_SUPPORTS_MT_EH
/* Mingw runtime will run the dtors in reverse order for each thread
when the thread exits. */
__status = __mingwthr_key_dtor (*__key, __dtor);
#endif
}
else
__status = (int) GetLastError ();
return __status;
}
 
static inline int
__gthread_key_delete (__gthread_key_t __key)
{
return (TlsFree (__key) != 0) ? 0 : (int) GetLastError ();
}
 
static inline void *
__gthread_getspecific (__gthread_key_t __key)
{
DWORD __lasterror;
void *__ptr;
 
__lasterror = GetLastError ();
 
__ptr = TlsGetValue (__key);
 
SetLastError (__lasterror);
 
return __ptr;
}
 
static inline int
__gthread_setspecific (__gthread_key_t __key, const void *__ptr)
{
if (TlsSetValue (__key, CONST_CAST2(void *, const void *, __ptr)) != 0)
return 0;
else
return GetLastError ();
}
 
static inline void
__gthread_mutex_init_function (__gthread_mutex_t *__mutex)
{
__mutex->counter = -1;
__mutex->sema = CreateSemaphoreW (NULL, 0, 65535, NULL);
}
 
static inline void
__gthread_mutex_destroy (__gthread_mutex_t *__mutex)
{
CloseHandle ((HANDLE) __mutex->sema);
}
 
static inline int
__gthread_mutex_lock (__gthread_mutex_t *__mutex)
{
int __status = 0;
 
if (__gthread_active_p ())
{
if (InterlockedIncrement (&__mutex->counter) == 0 ||
WaitForSingleObject (__mutex->sema, INFINITE) == WAIT_OBJECT_0)
__status = 0;
else
{
/* WaitForSingleObject returns WAIT_FAILED, and we can only do
some best-effort cleanup here. */
InterlockedDecrement (&__mutex->counter);
__status = 1;
}
}
return __status;
}
 
static inline int
__gthread_mutex_trylock (__gthread_mutex_t *__mutex)
{
int __status = 0;
 
if (__gthread_active_p ())
{
if (__GTHR_W32_InterlockedCompareExchange (&__mutex->counter, 0, -1) < 0)
__status = 0;
else
__status = 1;
}
return __status;
}
 
static inline int
__gthread_mutex_unlock (__gthread_mutex_t *__mutex)
{
if (__gthread_active_p ())
{
if (InterlockedDecrement (&__mutex->counter) >= 0)
return ReleaseSemaphore (__mutex->sema, 1, NULL) ? 0 : 1;
}
return 0;
}
 
static inline void
__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex)
{
__mutex->counter = -1;
__mutex->depth = 0;
__mutex->owner = 0;
__mutex->sema = CreateSemaphoreW (NULL, 0, 65535, NULL);
}
 
static inline int
__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
{
if (__gthread_active_p ())
{
DWORD __me = GetCurrentThreadId();
if (InterlockedIncrement (&__mutex->counter) == 0)
{
__mutex->depth = 1;
__mutex->owner = __me;
}
else if (__mutex->owner == __me)
{
InterlockedDecrement (&__mutex->counter);
++(__mutex->depth);
}
else if (WaitForSingleObject (__mutex->sema, INFINITE) == WAIT_OBJECT_0)
{
__mutex->depth = 1;
__mutex->owner = __me;
}
else
{
/* WaitForSingleObject returns WAIT_FAILED, and we can only do
some best-effort cleanup here. */
InterlockedDecrement (&__mutex->counter);
return 1;
}
}
return 0;
}
 
static inline int
__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
{
if (__gthread_active_p ())
{
DWORD __me = GetCurrentThreadId();
if (__GTHR_W32_InterlockedCompareExchange (&__mutex->counter, 0, -1) < 0)
{
__mutex->depth = 1;
__mutex->owner = __me;
}
else if (__mutex->owner == __me)
++(__mutex->depth);
else
return 1;
}
return 0;
}
 
static inline int
__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
{
if (__gthread_active_p ())
{
--(__mutex->depth);
if (__mutex->depth == 0)
{
__mutex->owner = 0;
 
if (InterlockedDecrement (&__mutex->counter) >= 0)
return ReleaseSemaphore (__mutex->sema, 1, NULL) ? 0 : 1;
}
}
return 0;
}
 
static inline int
__gthread_recursive_mutex_destroy (__gthread_recursive_mutex_t *__mutex)
{
CloseHandle ((HANDLE) __mutex->sema);
return 0;
}
 
#endif /* __GTHREAD_HIDE_WIN32API */
 
#ifdef __cplusplus
}
#endif
 
#endif /* _LIBOBJC */
 
#endif /* ! GCC_GTHR_WIN32_H */
/contrib/toolchain/gcc/5x/libgcc/config/i386/linux-unwind.h
0,0 → 1,198
/* DWARF2 EH unwinding support for AMD x86-64 and x86.
Copyright (C) 2004-2015 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
/* Do code reading to identify a signal frame, and set the frame
state data appropriately. See unwind-dw2.c for the structs.
Don't use this at all if inhibit_libc is used. */
 
#ifndef inhibit_libc
 
/* There's no sys/ucontext.h for glibc 2.0, so no
signal-turned-exceptions for them. There's also no configure-run for
the target, so we can't check on (e.g.) HAVE_SYS_UCONTEXT_H. Using the
target libc version macro should be enough. */
#if defined __GLIBC__ && !(__GLIBC__ == 2 && __GLIBC_MINOR__ == 0)
 
#include <signal.h>
#include <sys/ucontext.h>
 
#ifdef __x86_64__
 
#define MD_FALLBACK_FRAME_STATE_FOR x86_64_fallback_frame_state
 
static _Unwind_Reason_Code
x86_64_fallback_frame_state (struct _Unwind_Context *context,
_Unwind_FrameState *fs)
{
unsigned char *pc = context->ra;
struct sigcontext *sc;
long new_cfa;
 
/* movq $__NR_rt_sigreturn, %rax ; syscall. */
#ifdef __LP64__
#define RT_SIGRETURN_SYSCALL 0x050f0000000fc0c7ULL
#else
#define RT_SIGRETURN_SYSCALL 0x050f40000201c0c7ULL
#endif
if (*(unsigned char *)(pc+0) == 0x48
&& *(unsigned long long *)(pc+1) == RT_SIGRETURN_SYSCALL)
{
struct ucontext *uc_ = context->cfa;
/* The void * cast is necessary to avoid an aliasing warning.
The aliasing warning is correct, but should not be a problem
because it does not alias anything. */
sc = (struct sigcontext *) (void *) &uc_->uc_mcontext;
}
else
return _URC_END_OF_STACK;
 
new_cfa = sc->rsp;
fs->regs.cfa_how = CFA_REG_OFFSET;
/* Register 7 is rsp */
fs->regs.cfa_reg = 7;
fs->regs.cfa_offset = new_cfa - (long) context->cfa;
 
/* The SVR4 register numbering macros aren't usable in libgcc. */
fs->regs.reg[0].how = REG_SAVED_OFFSET;
fs->regs.reg[0].loc.offset = (long)&sc->rax - new_cfa;
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = (long)&sc->rdx - new_cfa;
fs->regs.reg[2].how = REG_SAVED_OFFSET;
fs->regs.reg[2].loc.offset = (long)&sc->rcx - new_cfa;
fs->regs.reg[3].how = REG_SAVED_OFFSET;
fs->regs.reg[3].loc.offset = (long)&sc->rbx - new_cfa;
fs->regs.reg[4].how = REG_SAVED_OFFSET;
fs->regs.reg[4].loc.offset = (long)&sc->rsi - new_cfa;
fs->regs.reg[5].how = REG_SAVED_OFFSET;
fs->regs.reg[5].loc.offset = (long)&sc->rdi - new_cfa;
fs->regs.reg[6].how = REG_SAVED_OFFSET;
fs->regs.reg[6].loc.offset = (long)&sc->rbp - new_cfa;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = (long)&sc->r8 - new_cfa;
fs->regs.reg[9].how = REG_SAVED_OFFSET;
fs->regs.reg[9].loc.offset = (long)&sc->r9 - new_cfa;
fs->regs.reg[10].how = REG_SAVED_OFFSET;
fs->regs.reg[10].loc.offset = (long)&sc->r10 - new_cfa;
fs->regs.reg[11].how = REG_SAVED_OFFSET;
fs->regs.reg[11].loc.offset = (long)&sc->r11 - new_cfa;
fs->regs.reg[12].how = REG_SAVED_OFFSET;
fs->regs.reg[12].loc.offset = (long)&sc->r12 - new_cfa;
fs->regs.reg[13].how = REG_SAVED_OFFSET;
fs->regs.reg[13].loc.offset = (long)&sc->r13 - new_cfa;
fs->regs.reg[14].how = REG_SAVED_OFFSET;
fs->regs.reg[14].loc.offset = (long)&sc->r14 - new_cfa;
fs->regs.reg[15].how = REG_SAVED_OFFSET;
fs->regs.reg[15].loc.offset = (long)&sc->r15 - new_cfa;
fs->regs.reg[16].how = REG_SAVED_OFFSET;
fs->regs.reg[16].loc.offset = (long)&sc->rip - new_cfa;
fs->retaddr_column = 16;
fs->signal_frame = 1;
return _URC_NO_REASON;
}
 
#else /* ifdef __x86_64__ */
 
#define MD_FALLBACK_FRAME_STATE_FOR x86_fallback_frame_state
 
static _Unwind_Reason_Code
x86_fallback_frame_state (struct _Unwind_Context *context,
_Unwind_FrameState *fs)
{
unsigned char *pc = context->ra;
struct sigcontext *sc;
long new_cfa;
 
/* popl %eax ; movl $__NR_sigreturn,%eax ; int $0x80 */
if (*(unsigned short *)(pc+0) == 0xb858
&& *(unsigned int *)(pc+2) == 119
&& *(unsigned short *)(pc+6) == 0x80cd)
sc = context->cfa + 4;
/* movl $__NR_rt_sigreturn,%eax ; int $0x80 */
else if (*(unsigned char *)(pc+0) == 0xb8
&& *(unsigned int *)(pc+1) == 173
&& *(unsigned short *)(pc+5) == 0x80cd)
{
struct rt_sigframe {
int sig;
siginfo_t *pinfo;
void *puc;
siginfo_t info;
struct ucontext uc;
} *rt_ = context->cfa;
/* The void * cast is necessary to avoid an aliasing warning.
The aliasing warning is correct, but should not be a problem
because it does not alias anything. */
sc = (struct sigcontext *) (void *) &rt_->uc.uc_mcontext;
}
else
return _URC_END_OF_STACK;
 
new_cfa = sc->esp;
fs->regs.cfa_how = CFA_REG_OFFSET;
fs->regs.cfa_reg = 4;
fs->regs.cfa_offset = new_cfa - (long) context->cfa;
 
/* The SVR4 register numbering macros aren't usable in libgcc. */
fs->regs.reg[0].how = REG_SAVED_OFFSET;
fs->regs.reg[0].loc.offset = (long)&sc->eax - new_cfa;
fs->regs.reg[3].how = REG_SAVED_OFFSET;
fs->regs.reg[3].loc.offset = (long)&sc->ebx - new_cfa;
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = (long)&sc->ecx - new_cfa;
fs->regs.reg[2].how = REG_SAVED_OFFSET;
fs->regs.reg[2].loc.offset = (long)&sc->edx - new_cfa;
fs->regs.reg[6].how = REG_SAVED_OFFSET;
fs->regs.reg[6].loc.offset = (long)&sc->esi - new_cfa;
fs->regs.reg[7].how = REG_SAVED_OFFSET;
fs->regs.reg[7].loc.offset = (long)&sc->edi - new_cfa;
fs->regs.reg[5].how = REG_SAVED_OFFSET;
fs->regs.reg[5].loc.offset = (long)&sc->ebp - new_cfa;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = (long)&sc->eip - new_cfa;
fs->retaddr_column = 8;
fs->signal_frame = 1;
return _URC_NO_REASON;
}
 
#define MD_FROB_UPDATE_CONTEXT x86_frob_update_context
 
/* Fix up for kernels that have vDSO, but don't have S flag in it. */
 
static void
x86_frob_update_context (struct _Unwind_Context *context,
_Unwind_FrameState *fs ATTRIBUTE_UNUSED)
{
unsigned char *pc = context->ra;
 
/* movl $__NR_rt_sigreturn,%eax ; {int $0x80 | syscall} */
if (*(unsigned char *)(pc+0) == 0xb8
&& *(unsigned int *)(pc+1) == 173
&& (*(unsigned short *)(pc+5) == 0x80cd
|| *(unsigned short *)(pc+5) == 0x050f))
_Unwind_SetSignalFrame (context, 1);
}
 
#endif /* ifdef __x86_64__ */
#endif /* not glibc 2.0 */
#endif /* ifdef inhibit_libc */
/contrib/toolchain/gcc/5x/libgcc/config/i386/morestack.S
0,0 → 1,863
# x86/x86_64 support for -fsplit-stack.
# Copyright (C) 2009-2015 Free Software Foundation, Inc.
# Contributed by Ian Lance Taylor <iant@google.com>.
 
# This file is part of GCC.
 
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
 
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
 
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
 
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
 
 
# Support for allocating more stack space when using -fsplit-stack.
# When a function discovers that it needs more stack space, it will
# call __morestack with the size of the stack frame and the size of
# the parameters to copy from the old stack frame to the new one.
# The __morestack function preserves the parameter registers and
# calls __generic_morestack to actually allocate the stack space.
 
# When this is called stack space is very low, but we ensure that
# there is enough space to push the parameter registers and to call
# __generic_morestack.
 
# When calling __generic_morestack, FRAME_SIZE points to the size of
# the desired frame when the function is called, and the function
# sets it to the size of the allocated stack. OLD_STACK points to
# the parameters on the old stack and PARAM_SIZE is the number of
# bytes of parameters to copy to the new stack. These are the
# parameters of the function that called __morestack. The
# __generic_morestack function returns the new stack pointer,
# pointing to the address of the first copied parameter. The return
# value minus the returned *FRAME_SIZE will be the first address on
# the stack which we should not use.
 
# void *__generic_morestack (size_t *frame_size, void *old_stack,
# size_t param_size);
 
# The __morestack routine has to arrange for the caller to return to a
# stub on the new stack. The stub is responsible for restoring the
# old stack pointer and returning to the caller's caller. This calls
# __generic_releasestack to retrieve the old stack pointer and release
# the newly allocated stack.
 
# void *__generic_releasestack (size_t *available);
 
# We do a little dance so that the processor's call/return return
# address prediction works out. The compiler arranges for the caller
# to look like this:
# call __generic_morestack
# ret
# L:
# // carry on with function
# After we allocate more stack, we call L, which is in our caller.
# When that returns (to the predicted instruction), we release the
# stack segment and reset the stack pointer. We then return to the
# predicted instruction, namely the ret instruction immediately after
# the call to __generic_morestack. That then returns to the caller of
# the original caller.
 
 
# The amount of extra space we ask for. In general this has to be
# enough for the dynamic loader to find a symbol and for a signal
# handler to run.
#ifndef __x86_64__
#define BACKOFF (1024)
#else
#define BACKOFF (1536)
#endif
 
 
# The amount of space we ask for when calling non-split-stack code.
#define NON_SPLIT_STACK 0x100000
 
# This entry point is for split-stack code which calls non-split-stack
# code. When the linker sees this case, it converts the call to
# __morestack to call __morestack_non_split instead. We just bump the
# requested stack space by 16K.
 
.global __morestack_non_split
.hidden __morestack_non_split
 
#ifdef __ELF__
.type __morestack_non_split,@function
#endif
 
__morestack_non_split:
.cfi_startproc
 
#ifndef __x86_64__
 
# See below for an extended explanation of this.
.cfi_def_cfa %esp,16
 
pushl %eax # Save %eax in case it is a parameter.
 
.cfi_adjust_cfa_offset 4 # Account for pushed register.
 
movl %esp,%eax # Current stack,
subl 8(%esp),%eax # less required stack frame size,
subl $NON_SPLIT_STACK,%eax # less space for non-split code.
cmpl %gs:0x30,%eax # See if we have enough space.
jb 2f # Get more space if we need it.
 
# Here the stack is
# %esp + 20: stack pointer after two returns
# %esp + 16: return address of morestack caller's caller
# %esp + 12: size of parameters
# %esp + 8: new stack frame size
# %esp + 4: return address of this function
# %esp: saved %eax
#
# Since we aren't doing a full split stack, we don't need to
# do anything when our caller returns. So we return to our
# caller rather than calling it, and let it return as usual.
# To make that work we adjust the return address.
 
# This breaks call/return address prediction for the call to
# this function. I can't figure out a way to make it work
# short of copying the parameters down the stack, which will
# probably take more clock cycles than we will lose breaking
# call/return address prediction. We will only break
# prediction for this call, not for our caller.
 
movl 4(%esp),%eax # Increment the return address
cmpb $0xc3,(%eax) # to skip the ret instruction;
je 1f # see above.
addl $2,%eax
1: inc %eax
 
# If the instruction that we return to is
# leal 20(%ebp),{%eax,%ecx,%edx}
# then we have been called by a varargs function that expects
# %ebp to hold a real value. That can only work if we do the
# full stack split routine. FIXME: This is fragile.
cmpb $0x8d,(%eax)
jne 3f
cmpb $0x14,2(%eax)
jne 3f
cmpb $0x45,1(%eax)
je 2f
cmpb $0x4d,1(%eax)
je 2f
cmpb $0x55,1(%eax)
je 2f
 
3:
movl %eax,4(%esp) # Update return address.
 
popl %eax # Restore %eax and stack.
 
.cfi_adjust_cfa_offset -4 # Account for popped register.
 
ret $8 # Return to caller, popping args.
 
2:
.cfi_adjust_cfa_offset 4 # Back to where we were.
 
popl %eax # Restore %eax and stack.
 
.cfi_adjust_cfa_offset -4 # Account for popped register.
 
# Increment space we request.
addl $NON_SPLIT_STACK+0x1000+BACKOFF,4(%esp)
 
# Fall through into morestack.
 
#else
 
# See below for an extended explanation of this.
.cfi_def_cfa %rsp,16
 
pushq %rax # Save %rax in case caller is using
# it to preserve original %r10.
.cfi_adjust_cfa_offset 8 # Adjust for pushed register.
 
movq %rsp,%rax # Current stack,
subq %r10,%rax # less required stack frame size,
subq $NON_SPLIT_STACK,%rax # less space for non-split code.
 
#ifdef __LP64__
cmpq %fs:0x70,%rax # See if we have enough space.
#else
cmpl %fs:0x40,%eax
#endif
 
jb 2f # Get more space if we need it.
 
# If the instruction that we return to is
# leaq 24(%rbp), %r11n
# then we have been called by a varargs function that expects
# %ebp to hold a real value. That can only work if we do the
# full stack split routine. FIXME: This is fragile.
movq 8(%rsp),%rax
incq %rax # Skip ret instruction in caller.
cmpl $0x185d8d4c,(%rax)
je 2f
 
# This breaks call/return prediction, as described above.
incq 8(%rsp) # Increment the return address.
 
popq %rax # Restore register.
 
.cfi_adjust_cfa_offset -8 # Adjust for popped register.
 
ret # Return to caller.
 
2:
popq %rax # Restore register.
 
.cfi_adjust_cfa_offset -8 # Adjust for popped register.
 
# Increment space we request.
addq $NON_SPLIT_STACK+0x1000+BACKOFF,%r10
 
# Fall through into morestack.
 
#endif
 
.cfi_endproc
#ifdef __ELF__
.size __morestack_non_split, . - __morestack_non_split
#endif
 
# __morestack_non_split falls through into __morestack.
 
 
# The __morestack function.
 
.global __morestack
.hidden __morestack
 
#ifdef __ELF__
.type __morestack,@function
#endif
 
__morestack:
.LFB1:
.cfi_startproc
 
 
#ifndef __x86_64__
 
 
# The 32-bit __morestack function.
 
# We use a cleanup to restore the stack guard if an exception
# is thrown through this code.
#ifndef __PIC__
.cfi_personality 0,__gcc_personality_v0
.cfi_lsda 0,.LLSDA1
#else
.cfi_personality 0x9b,DW.ref.__gcc_personality_v0
.cfi_lsda 0x1b,.LLSDA1
#endif
 
# We return below with a ret $8. We will return to a single
# return instruction, which will return to the caller of our
# caller. We let the unwinder skip that single return
# instruction, and just return to the real caller.
 
# Here CFA points just past the return address on the stack,
# e.g., on function entry it is %esp + 4. The stack looks
# like this:
# CFA + 12: stack pointer after two returns
# CFA + 8: return address of morestack caller's caller
# CFA + 4: size of parameters
# CFA: new stack frame size
# CFA - 4: return address of this function
# CFA - 8: previous value of %ebp; %ebp points here
# Setting the new CFA to be the current CFA + 12 (i.e., %esp +
# 16) will make the unwinder pick up the right return address.
 
.cfi_def_cfa %esp,16
 
pushl %ebp
.cfi_adjust_cfa_offset 4
.cfi_offset %ebp, -20
movl %esp,%ebp
.cfi_def_cfa_register %ebp
 
# In 32-bit mode the parameters are pushed on the stack. The
# argument size is pushed then the new stack frame size is
# pushed.
 
# In the body of a non-leaf function, the stack pointer will
# be aligned to a 16-byte boundary. That is CFA + 12 in the
# stack picture above: (CFA + 12) % 16 == 0. At this point we
# have %esp == CFA - 8, so %esp % 16 == 12. We need some
# space for saving registers and passing parameters, and we
# need to wind up with %esp % 16 == 0.
subl $44,%esp
 
# Because our cleanup code may need to clobber %ebx, we need
# to save it here so the unwinder can restore the value used
# by the caller. Note that we don't have to restore the
# register, since we don't change it, we just have to save it
# for the unwinder.
movl %ebx,-4(%ebp)
.cfi_offset %ebx, -24
 
# In 32-bit mode the registers %eax, %edx, and %ecx may be
# used for parameters, depending on the regparm and fastcall
# attributes.
 
movl %eax,-8(%ebp)
movl %edx,-12(%ebp)
movl %ecx,-16(%ebp)
 
call __morestack_block_signals
 
movl 12(%ebp),%eax # The size of the parameters.
movl %eax,8(%esp)
leal 20(%ebp),%eax # Address of caller's parameters.
movl %eax,4(%esp)
addl $BACKOFF,8(%ebp) # Ask for backoff bytes.
leal 8(%ebp),%eax # The address of the new frame size.
movl %eax,(%esp)
 
call __generic_morestack
 
movl %eax,%esp # Switch to the new stack.
subl 8(%ebp),%eax # The end of the stack space.
addl $BACKOFF,%eax # Back off 512 bytes.
 
.LEHB0:
# FIXME: The offset must match
# TARGET_THREAD_SPLIT_STACK_OFFSET in
# gcc/config/i386/linux.h.
movl %eax,%gs:0x30 # Save the new stack boundary.
 
call __morestack_unblock_signals
 
movl -12(%ebp),%edx # Restore registers.
movl -16(%ebp),%ecx
 
movl 4(%ebp),%eax # Increment the return address
cmpb $0xc3,(%eax) # to skip the ret instruction;
je 1f # see above.
addl $2,%eax
1: inc %eax
 
movl %eax,-12(%ebp) # Store return address in an
# unused slot.
 
movl -8(%ebp),%eax # Restore the last register.
 
call *-12(%ebp) # Call our caller!
 
# The caller will return here, as predicted.
 
# Save the registers which may hold a return value. We
# assume that __generic_releasestack does not touch any
# floating point or vector registers.
pushl %eax
pushl %edx
 
# Push the arguments to __generic_releasestack now so that the
# stack is at a 16-byte boundary for
# __morestack_block_signals.
pushl $0 # Where the available space is returned.
leal 0(%esp),%eax # Push its address.
push %eax
 
call __morestack_block_signals
 
call __generic_releasestack
 
subl 4(%esp),%eax # Subtract available space.
addl $BACKOFF,%eax # Back off 512 bytes.
.LEHE0:
movl %eax,%gs:0x30 # Save the new stack boundary.
 
addl $8,%esp # Remove values from stack.
 
# We need to restore the old stack pointer, which is in %rbp,
# before we unblock signals. We also need to restore %eax and
# %edx after we unblock signals but before we return. Do this
# by moving %eax and %edx from the current stack to the old
# stack.
 
popl %edx # Pop return value from current stack.
popl %eax
 
movl %ebp,%esp # Restore stack pointer.
 
# As before, we now have %esp % 16 == 12.
 
pushl %eax # Push return value on old stack.
pushl %edx
subl $4,%esp # Align stack to 16-byte boundary.
 
call __morestack_unblock_signals
 
addl $4,%esp
popl %edx # Restore return value.
popl %eax
 
.cfi_remember_state
 
# We never changed %ebx, so we don't have to actually restore it.
.cfi_restore %ebx
 
popl %ebp
.cfi_restore %ebp
.cfi_def_cfa %esp, 16
ret $8 # Return to caller, which will
# immediately return. Pop
# arguments as we go.
 
# This is the cleanup code called by the stack unwinder when unwinding
# through the code between .LEHB0 and .LEHE0 above.
.L1:
.cfi_restore_state
subl $16,%esp # Maintain 16 byte alignment.
movl %eax,4(%esp) # Save exception header.
movl %ebp,(%esp) # Stack pointer after resume.
call __generic_findstack
movl %ebp,%ecx # Get the stack pointer.
subl %eax,%ecx # Subtract available space.
addl $BACKOFF,%ecx # Back off 512 bytes.
movl %ecx,%gs:0x30 # Save new stack boundary.
movl 4(%esp),%eax # Function argument.
movl %eax,(%esp)
#ifdef __PIC__
call __x86.get_pc_thunk.bx # %ebx may not be set up for us.
addl $_GLOBAL_OFFSET_TABLE_, %ebx
call _Unwind_Resume@PLT # Resume unwinding.
#else
call _Unwind_Resume
#endif
 
#else /* defined(__x86_64__) */
 
 
# The 64-bit __morestack function.
 
# We use a cleanup to restore the stack guard if an exception
# is thrown through this code.
#ifndef __PIC__
.cfi_personality 0x3,__gcc_personality_v0
.cfi_lsda 0x3,.LLSDA1
#else
.cfi_personality 0x9b,DW.ref.__gcc_personality_v0
.cfi_lsda 0x1b,.LLSDA1
#endif
 
# We will return a single return instruction, which will
# return to the caller of our caller. Let the unwinder skip
# that single return instruction, and just return to the real
# caller.
.cfi_def_cfa %rsp,16
 
# Set up a normal backtrace.
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp, -24
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
 
# In 64-bit mode the new stack frame size is passed in r10
# and the argument size is passed in r11.
 
addq $BACKOFF,%r10 # Ask for backoff bytes.
pushq %r10 # Save new frame size.
 
# In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8,
# and %r9 may be used for parameters. We also preserve %rax
# which the caller may use to hold %r10.
 
pushq %rax
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
 
pushq %r11
 
# We entered morestack with the stack pointer aligned to a
# 16-byte boundary (the call to morestack's caller used 8
# bytes, and the call to morestack used 8 bytes). We have now
# pushed 10 registers, so we are still aligned to a 16-byte
# boundary.
 
call __morestack_block_signals
 
leaq -8(%rbp),%rdi # Address of new frame size.
leaq 24(%rbp),%rsi # The caller's parameters.
popq %rdx # The size of the parameters.
 
subq $8,%rsp # Align stack.
 
call __generic_morestack
 
movq -8(%rbp),%r10 # Reload modified frame size
movq %rax,%rsp # Switch to the new stack.
subq %r10,%rax # The end of the stack space.
addq $BACKOFF,%rax # Back off 1024 bytes.
 
.LEHB0:
# FIXME: The offset must match
# TARGET_THREAD_SPLIT_STACK_OFFSET in
# gcc/config/i386/linux64.h.
# Macro to save the new stack boundary.
#ifdef __LP64__
#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movq %r##reg,%fs:0x70
#else
#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movl %e##reg,%fs:0x40
#endif
X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
 
call __morestack_unblock_signals
 
movq -24(%rbp),%rdi # Restore registers.
movq -32(%rbp),%rsi
movq -40(%rbp),%rdx
movq -48(%rbp),%rcx
movq -56(%rbp),%r8
movq -64(%rbp),%r9
 
movq 8(%rbp),%r10 # Increment the return address
incq %r10 # to skip the ret instruction;
# see above.
 
movq -16(%rbp),%rax # Restore caller's %rax.
 
call *%r10 # Call our caller!
 
# The caller will return here, as predicted.
 
# Save the registers which may hold a return value. We
# assume that __generic_releasestack does not touch any
# floating point or vector registers.
pushq %rax
pushq %rdx
 
call __morestack_block_signals
 
pushq $0 # For alignment.
pushq $0 # Where the available space is returned.
leaq 0(%rsp),%rdi # Pass its address.
 
call __generic_releasestack
 
subq 0(%rsp),%rax # Subtract available space.
addq $BACKOFF,%rax # Back off 1024 bytes.
.LEHE0:
X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
 
addq $16,%rsp # Remove values from stack.
 
# We need to restore the old stack pointer, which is in %rbp,
# before we unblock signals. We also need to restore %rax and
# %rdx after we unblock signals but before we return. Do this
# by moving %rax and %rdx from the current stack to the old
# stack.
 
popq %rdx # Pop return value from current stack.
popq %rax
 
movq %rbp,%rsp # Restore stack pointer.
 
# Now (%rsp & 16) == 8.
 
subq $8,%rsp # For alignment.
pushq %rax # Push return value on old stack.
pushq %rdx
 
call __morestack_unblock_signals
 
popq %rdx # Restore return value.
popq %rax
addq $8,%rsp
 
.cfi_remember_state
popq %rbp
.cfi_restore %rbp
.cfi_def_cfa %rsp, 16
ret # Return to caller, which will
# immediately return.
 
# This is the cleanup code called by the stack unwinder when unwinding
# through the code between .LEHB0 and .LEHE0 above.
.L1:
.cfi_restore_state
subq $16,%rsp # Maintain 16 byte alignment.
movq %rax,(%rsp) # Save exception header.
movq %rbp,%rdi # Stack pointer after resume.
call __generic_findstack
movq %rbp,%rcx # Get the stack pointer.
subq %rax,%rcx # Subtract available space.
addq $BACKOFF,%rcx # Back off 1024 bytes.
X86_64_SAVE_NEW_STACK_BOUNDARY (cx)
movq (%rsp),%rdi # Restore exception data for call.
#ifdef __PIC__
call _Unwind_Resume@PLT # Resume unwinding.
#else
call _Unwind_Resume # Resume unwinding.
#endif
 
#endif /* defined(__x86_64__) */
 
.cfi_endproc
#ifdef __ELF__
.size __morestack, . - __morestack
#endif
 
#if !defined(__x86_64__) && defined(__PIC__)
# Output the thunk to get PC into bx, since we use it above.
.section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat
.globl __x86.get_pc_thunk.bx
.hidden __x86.get_pc_thunk.bx
#ifdef __ELF__
.type __x86.get_pc_thunk.bx, @function
#endif
__x86.get_pc_thunk.bx:
.cfi_startproc
movl (%esp), %ebx
ret
.cfi_endproc
#ifdef __ELF__
.size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx
#endif
#endif
 
# The exception table. This tells the personality routine to execute
# the exception handler.
 
.section .gcc_except_table,"a",@progbits
.align 4
.LLSDA1:
.byte 0xff # @LPStart format (omit)
.byte 0xff # @TType format (omit)
.byte 0x1 # call-site format (uleb128)
.uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length
.LLSDACSB1:
.uleb128 .LEHB0-.LFB1 # region 0 start
.uleb128 .LEHE0-.LEHB0 # length
.uleb128 .L1-.LFB1 # landing pad
.uleb128 0 # action
.LLSDACSE1:
 
 
.global __gcc_personality_v0
#ifdef __PIC__
# Build a position independent reference to the basic
# personality function.
.hidden DW.ref.__gcc_personality_v0
.weak DW.ref.__gcc_personality_v0
.section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat
.type DW.ref.__gcc_personality_v0, @object
DW.ref.__gcc_personality_v0:
#ifndef __LP64__
.align 4
.size DW.ref.__gcc_personality_v0, 4
.long __gcc_personality_v0
#else
.align 8
.size DW.ref.__gcc_personality_v0, 8
.quad __gcc_personality_v0
#endif
#endif
 
#if defined __x86_64__ && defined __LP64__
 
# This entry point is used for the large model. With this entry point
# the upper 32 bits of %r10 hold the argument size and the lower 32
# bits hold the new stack frame size. There doesn't seem to be a way
# to know in the assembler code that we are assembling for the large
# model, and there doesn't seem to be a large model multilib anyhow.
# If one is developed, then the non-PIC code is probably OK since we
# will probably be close to the morestack code, but the PIC code
# almost certainly needs to be changed. FIXME.
 
.text
.global __morestack_large_model
.hidden __morestack_large_model
 
#ifdef __ELF__
.type __morestack_large_model,@function
#endif
 
__morestack_large_model:
 
.cfi_startproc
 
movq %r10, %r11
andl $0xffffffff, %r10d
sarq $32, %r11
jmp __morestack
 
.cfi_endproc
#ifdef __ELF__
.size __morestack_large_model, . - __morestack_large_model
#endif
 
#endif /* __x86_64__ && __LP64__ */
 
# Initialize the stack test value when the program starts or when a
# new thread starts. We don't know how large the main stack is, so we
# guess conservatively. We might be able to use getrlimit here.
 
.text
.global __stack_split_initialize
.hidden __stack_split_initialize
 
#ifdef __ELF__
.type __stack_split_initialize, @function
#endif
 
__stack_split_initialize:
 
#ifndef __x86_64__
 
leal -16000(%esp),%eax # We should have at least 16K.
movl %eax,%gs:0x30
subl $4,%esp # Align stack.
pushl $16000
pushl %esp
#ifdef __PIC__
call __generic_morestack_set_initial_sp@PLT
#else
call __generic_morestack_set_initial_sp
#endif
addl $12,%esp
ret
 
#else /* defined(__x86_64__) */
 
leaq -16000(%rsp),%rax # We should have at least 16K.
X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
subq $8,%rsp # Align stack.
movq %rsp,%rdi
movq $16000,%rsi
#ifdef __PIC__
call __generic_morestack_set_initial_sp@PLT
#else
call __generic_morestack_set_initial_sp
#endif
addq $8,%rsp
ret
 
#endif /* defined(__x86_64__) */
 
#ifdef __ELF__
.size __stack_split_initialize, . - __stack_split_initialize
#endif
 
# Routines to get and set the guard, for __splitstack_getcontext,
# __splitstack_setcontext, and __splitstack_makecontext.
 
# void *__morestack_get_guard (void) returns the current stack guard.
.text
.global __morestack_get_guard
.hidden __morestack_get_guard
 
#ifdef __ELF__
.type __morestack_get_guard,@function
#endif
 
__morestack_get_guard:
 
#ifndef __x86_64__
movl %gs:0x30,%eax
#else
#ifdef __LP64__
movq %fs:0x70,%rax
#else
movl %fs:0x40,%eax
#endif
#endif
ret
 
#ifdef __ELF__
.size __morestack_get_guard, . - __morestack_get_guard
#endif
 
# void __morestack_set_guard (void *) sets the stack guard.
.global __morestack_set_guard
.hidden __morestack_set_guard
 
#ifdef __ELF__
.type __morestack_set_guard,@function
#endif
 
__morestack_set_guard:
 
#ifndef __x86_64__
movl 4(%esp),%eax
movl %eax,%gs:0x30
#else
X86_64_SAVE_NEW_STACK_BOUNDARY (di)
#endif
ret
 
#ifdef __ELF__
.size __morestack_set_guard, . - __morestack_set_guard
#endif
 
# void *__morestack_make_guard (void *, size_t) returns the stack
# guard value for a stack.
.global __morestack_make_guard
.hidden __morestack_make_guard
 
#ifdef __ELF__
.type __morestack_make_guard,@function
#endif
 
__morestack_make_guard:
 
#ifndef __x86_64__
movl 4(%esp),%eax
subl 8(%esp),%eax
addl $BACKOFF,%eax
#else
subq %rsi,%rdi
addq $BACKOFF,%rdi
movq %rdi,%rax
#endif
ret
 
#ifdef __ELF__
.size __morestack_make_guard, . - __morestack_make_guard
#endif
 
# Make __stack_split_initialize a high priority constructor. FIXME:
# This is ELF specific.
 
.section .ctors.65535,"aw",@progbits
 
#ifndef __LP64__
.align 4
.long __stack_split_initialize
.long __morestack_load_mmap
#else
.align 8
.quad __stack_split_initialize
.quad __morestack_load_mmap
#endif
 
#ifdef __ELF__
.section .note.GNU-stack,"",@progbits
.section .note.GNU-split-stack,"",@progbits
.section .note.GNU-no-split-stack,"",@progbits
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/sfp-exceptions.c
0,0 → 1,108
/*
* Copyright (C) 2012-2015 Free Software Foundation, Inc.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
 
#ifndef _SOFT_FLOAT
#include "sfp-machine.h"
 
struct fenv
{
unsigned short int __control_word;
unsigned short int __unused1;
unsigned short int __status_word;
unsigned short int __unused2;
unsigned short int __tags;
unsigned short int __unused3;
unsigned int __eip;
unsigned short int __cs_selector;
unsigned int __opcode:11;
unsigned int __unused4:5;
unsigned int __data_offset;
unsigned short int __data_selector;
unsigned short int __unused5;
};
 
void
__sfp_handle_exceptions (int _fex)
{
if (_fex & FP_EX_INVALID)
{
float f = 0.0f;
#ifdef __SSE_MATH__
volatile float r __attribute__ ((unused));
asm volatile ("%vdivss\t{%0, %d0|%d0, %0}" : "+x" (f));
r = f; /* Needed to trigger exception. */
#else
asm volatile ("fdiv\t{%y0, %0|%0, %y0}" : "+t" (f));
/* No need for fwait, exception is triggered by emitted fstp. */
#endif
}
if (_fex & FP_EX_DENORM)
{
struct fenv temp;
asm volatile ("fnstenv\t%0" : "=m" (temp));
temp.__status_word |= FP_EX_DENORM;
asm volatile ("fldenv\t%0" : : "m" (temp));
asm volatile ("fwait");
}
if (_fex & FP_EX_DIVZERO)
{
float f = 1.0f, g = 0.0f;
#ifdef __SSE_MATH__
volatile float r __attribute__ ((unused));
asm volatile ("%vdivss\t{%1, %d0|%d0, %1}" : "+x" (f) : "xm" (g));
r = f; /* Needed to trigger exception. */
#else
asm volatile ("fdivs\t%1" : "+t" (f) : "m" (g));
/* No need for fwait, exception is triggered by emitted fstp. */
#endif
}
if (_fex & FP_EX_OVERFLOW)
{
struct fenv temp;
asm volatile ("fnstenv\t%0" : "=m" (temp));
temp.__status_word |= FP_EX_OVERFLOW;
asm volatile ("fldenv\t%0" : : "m" (temp));
asm volatile ("fwait");
}
if (_fex & FP_EX_UNDERFLOW)
{
struct fenv temp;
asm volatile ("fnstenv\t%0" : "=m" (temp));
temp.__status_word |= FP_EX_UNDERFLOW;
asm volatile ("fldenv\t%0" : : "m" (temp));
asm volatile ("fwait");
}
if (_fex & FP_EX_INEXACT)
{
float f = 1.0f, g = 3.0f;
#ifdef __SSE_MATH__
volatile float r __attribute__ ((unused));
asm volatile ("%vdivss\t{%1, %d0|%d0, %1}" : "+x" (f) : "xm" (g));
r = f; /* Needed to trigger exception. */
#else
asm volatile ("fdivs\t%1" : "+t" (f) : "m" (g));
/* No need for fwait, exception is triggered by emitted fstp. */
#endif
}
};
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/sfp-machine.h
0,0 → 1,85
#ifdef __MINGW32__
/* Make sure we are using gnu-style bitfield handling. */
#define _FP_STRUCT_LAYOUT __attribute__ ((gcc_struct))
#endif
 
/* The type of the result of a floating point comparison. This must
match `__libgcc_cmp_return__' in GCC for the target. */
typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
#define CMPtype __gcc_CMPtype
 
#ifdef __x86_64__
#include "config/i386/64/sfp-machine.h"
#else
#include "config/i386/32/sfp-machine.h"
#endif
 
#define _FP_KEEPNANFRACP 1
#define _FP_QNANNEGATEDP 0
 
#define _FP_NANSIGN_S 1
#define _FP_NANSIGN_D 1
#define _FP_NANSIGN_E 1
#define _FP_NANSIGN_Q 1
 
/* Here is something Intel misdesigned: the specs don't define
the case where we have two NaNs with same mantissas, but
different sign. Different operations pick up different NaNs. */
#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
do { \
if (_FP_FRAC_GT_##wc(X, Y) \
|| (_FP_FRAC_EQ_##wc(X,Y) && (OP == '+' || OP == '*'))) \
{ \
R##_s = X##_s; \
_FP_FRAC_COPY_##wc(R,X); \
} \
else \
{ \
R##_s = Y##_s; \
_FP_FRAC_COPY_##wc(R,Y); \
} \
R##_c = FP_CLS_NAN; \
} while (0)
 
#ifndef _SOFT_FLOAT
#define FP_EX_INVALID 0x01
#define FP_EX_DENORM 0x02
#define FP_EX_DIVZERO 0x04
#define FP_EX_OVERFLOW 0x08
#define FP_EX_UNDERFLOW 0x10
#define FP_EX_INEXACT 0x20
#define FP_EX_ALL \
(FP_EX_INVALID | FP_EX_DENORM | FP_EX_DIVZERO | FP_EX_OVERFLOW \
| FP_EX_UNDERFLOW | FP_EX_INEXACT)
 
void __sfp_handle_exceptions (int);
 
#define FP_HANDLE_EXCEPTIONS \
do { \
if (__builtin_expect (_fex, 0)) \
__sfp_handle_exceptions (_fex); \
} while (0);
 
#define FP_TRAPPING_EXCEPTIONS ((~_fcw >> FP_EX_SHIFT) & FP_EX_ALL)
 
#define FP_ROUNDMODE (_fcw & FP_RND_MASK)
#endif
 
#define _FP_TININESS_AFTER_ROUNDING 1
 
#define __LITTLE_ENDIAN 1234
#define __BIG_ENDIAN 4321
 
#define __BYTE_ORDER __LITTLE_ENDIAN
 
/* Define ALIASNAME as a strong alias for NAME. */
#if defined __MACH__
/* Mach-O doesn't support aliasing. If these functions ever return
anything but CMPtype we need to revisit this... */
#define strong_alias(name, aliasname) \
CMPtype aliasname (TFtype a, TFtype b) { return name(a, b); }
#else
# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
# define _strong_alias(name, aliasname) \
extern __typeof (name) aliasname __attribute__ ((alias (#name)));
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/sol2-c1.S
0,0 → 1,173
/* crt1.s for Solaris 2, x86
 
Copyright (C) 1993-2015 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
 
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
 
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
 
/* This file takes control of the process from the kernel, as specified
in section 3 of the System V Application Binary Interface, Intel386
Processor Supplement. It has been constructed from information obtained
from the ABI, information obtained from single stepping existing
Solaris executables through their startup code with gdb, and from
information obtained by single stepping executables on other i386 SVR4
implementations. This file is the first thing linked into any
executable. */
 
#ifndef GCRT1
.ident "GNU C crt1.s"
#define CLEANUP _cleanup
#else
/* This is a modified crt1.s by J.W.Hawtin <oolon@ankh.org> 15/8/96,
to allow program profiling, by calling monstartup on entry and _mcleanup
on exit. */
.ident "GNU C gcrt1.s"
#define CLEANUP _mcleanup
#endif
.weak _cleanup
.weak _DYNAMIC
.text
 
/* Start creating the initial frame by pushing a NULL value for the return
address of the initial frame, and mark the end of the stack frame chain
(the innermost stack frame) with a NULL value, per page 3-32 of the ABI.
Initialize the first stack frame pointer in %ebp (the contents of which
are unspecified at process initialization). */
 
.globl _start
_start:
pushl $0x0
pushl $0x0
movl %esp,%ebp
 
/* As specified per page 3-32 of the ABI, %edx contains a function
pointer that should be registered with atexit(), for proper
shared object termination. Just push it onto the stack for now
to preserve it. We want to register _cleanup() first. */
 
pushl %edx
 
/* Check to see if there is an _cleanup() function linked in, and if
so, register it with atexit() as the last thing to be run by
atexit(). */
 
movl $CLEANUP,%eax
testl %eax,%eax
je .L1
pushl $CLEANUP
call atexit
addl $0x4,%esp
.L1:
 
/* Now check to see if we have an _DYNAMIC table, and if so then
we need to register the function pointer previously in %edx, but
now conveniently saved on the stack as the argument to pass to
atexit(). */
 
movl $_DYNAMIC,%eax
testl %eax,%eax
je .L2
call atexit
.L2:
 
/* Register _fini() with atexit(). We will take care of calling _init()
directly. */
 
pushl $_fini
call atexit
 
#ifdef GCRT1
/* Start profiling. */
 
pushl %ebp
movl %esp,%ebp
pushl $_etext
pushl $_start
call monstartup
addl $8,%esp
popl %ebp
#endif
 
/* Compute the address of the environment vector on the stack and load
it into the global variable _environ. Currently argc is at 8 off
the frame pointer. Fetch the argument count into %eax, scale by the
size of each arg (4 bytes) and compute the address of the environment
vector which is 16 bytes (the two zero words we pushed, plus argc,
plus the null word terminating the arg vector) further up the stack,
off the frame pointer (whew!). */
 
movl 8(%ebp),%eax
leal 16(%ebp,%eax,4),%edx
movl %edx,_environ
 
/* Push the environment vector pointer, the argument vector pointer,
and the argument count on to the stack to set up the arguments
for _init(), _fpstart(), and main(). Note that the environment
vector pointer and the arg count were previously loaded into
%edx and %eax respectively. The only new value we need to compute
is the argument vector pointer, which is at a fixed address off
the initial frame pointer. */
 
/* Make sure the stack is properly aligned. */
andl $0xfffffff0,%esp
subl $4,%esp
 
pushl %edx
leal 12(%ebp),%edx
pushl %edx
pushl %eax
 
/* Call _init(argc, argv, environ), _fpstart(argc, argv, environ), and
main(argc, argv, environ). */
 
call _init
call __fpstart
call main
 
/* Pop the argc, argv, and environ arguments off the stack, push the
value returned from main(), and call exit(). */
 
addl $12,%esp
pushl %eax
call exit
 
/* An inline equivalent of _exit, as specified in Figure 3-26 of the ABI. */
 
pushl $0x0
movl $0x1,%eax
lcall $7,$0
 
/* If all else fails, just try a halt! */
 
hlt
.type _start,@function
.size _start,.-_start
 
#ifndef GCRT1
/* A dummy profiling support routine for non-profiling executables,
in case we link in some objects that have been compiled for profiling. */
 
.weak _mcount
_mcount:
ret
.type _mcount,@function
.size _mcount,.-_mcount
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/sol2-unwind.h
0,0 → 1,244
/* DWARF2 EH unwinding support for AMD x86-64 and x86.
Copyright (C) 2009-2015 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
/* Do code reading to identify a signal frame, and set the frame
state data appropriately. See unwind-dw2.c for the structs. */
 
#include <ucontext.h>
#include <sys/frame.h>
 
#ifdef __x86_64__
 
#define MD_FALLBACK_FRAME_STATE_FOR x86_64_fallback_frame_state
 
static _Unwind_Reason_Code
x86_64_fallback_frame_state (struct _Unwind_Context *context,
_Unwind_FrameState *fs)
{
unsigned char *pc = context->ra;
mcontext_t *mctx;
long new_cfa;
 
if (/* Solaris 10+
------------
<__sighndlr+0>: push %rbp
<__sighndlr+1>: mov %rsp,%rbp
<__sighndlr+4>: callq *%rcx
<__sighndlr+6>: leaveq <--- PC
<__sighndlr+7>: retq */
*(unsigned long *)(pc - 6) == 0xc3c9d1ffe5894855)
 
/* We need to move up three frames:
 
<signal handler> <-- context->cfa
__sighndlr
call_user_handler
sigacthandler
<kernel>
 
context->cfa points into the frame after the saved frame pointer and
saved pc (struct frame).
 
The ucontext_t structure is in the kernel frame after the signal
number and a siginfo_t *. Since the frame sizes vary even within
Solaris 10 updates, we need to walk the stack to get there. */
{
struct frame *fp = (struct frame *) context->cfa - 1;
struct handler_args {
int signo;
siginfo_t *sip;
ucontext_t ucontext;
} *handler_args;
ucontext_t *ucp;
 
/* Next frame: __sighndlr frame pointer. */
fp = (struct frame *) fp->fr_savfp;
/* call_user_handler frame pointer. */
fp = (struct frame *) fp->fr_savfp;
/* sigacthandler frame pointer. */
fp = (struct frame *) fp->fr_savfp;
 
/* The argument area precedes the struct frame. */
handler_args = (struct handler_args *) (fp + 1);
ucp = &handler_args->ucontext;
mctx = &ucp->uc_mcontext;
}
else
return _URC_END_OF_STACK;
 
new_cfa = mctx->gregs[REG_RSP];
 
fs->regs.cfa_how = CFA_REG_OFFSET;
fs->regs.cfa_reg = 7;
fs->regs.cfa_offset = new_cfa - (long) context->cfa;
 
/* The SVR4 register numbering macros aren't usable in libgcc. */
fs->regs.reg[0].how = REG_SAVED_OFFSET;
fs->regs.reg[0].loc.offset = (long)&mctx->gregs[REG_RAX] - new_cfa;
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = (long)&mctx->gregs[REG_RDX] - new_cfa;
fs->regs.reg[2].how = REG_SAVED_OFFSET;
fs->regs.reg[2].loc.offset = (long)&mctx->gregs[REG_RCX] - new_cfa;
fs->regs.reg[3].how = REG_SAVED_OFFSET;
fs->regs.reg[3].loc.offset = (long)&mctx->gregs[REG_RBX] - new_cfa;
fs->regs.reg[4].how = REG_SAVED_OFFSET;
fs->regs.reg[4].loc.offset = (long)&mctx->gregs[REG_RSI] - new_cfa;
fs->regs.reg[5].how = REG_SAVED_OFFSET;
fs->regs.reg[5].loc.offset = (long)&mctx->gregs[REG_RDI] - new_cfa;
fs->regs.reg[6].how = REG_SAVED_OFFSET;
fs->regs.reg[6].loc.offset = (long)&mctx->gregs[REG_RBP] - new_cfa;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = (long)&mctx->gregs[REG_R8] - new_cfa;
fs->regs.reg[9].how = REG_SAVED_OFFSET;
fs->regs.reg[9].loc.offset = (long)&mctx->gregs[REG_R9] - new_cfa;
fs->regs.reg[10].how = REG_SAVED_OFFSET;
fs->regs.reg[10].loc.offset = (long)&mctx->gregs[REG_R10] - new_cfa;
fs->regs.reg[11].how = REG_SAVED_OFFSET;
fs->regs.reg[11].loc.offset = (long)&mctx->gregs[REG_R11] - new_cfa;
fs->regs.reg[12].how = REG_SAVED_OFFSET;
fs->regs.reg[12].loc.offset = (long)&mctx->gregs[REG_R12] - new_cfa;
fs->regs.reg[13].how = REG_SAVED_OFFSET;
fs->regs.reg[13].loc.offset = (long)&mctx->gregs[REG_R13] - new_cfa;
fs->regs.reg[14].how = REG_SAVED_OFFSET;
fs->regs.reg[14].loc.offset = (long)&mctx->gregs[REG_R14] - new_cfa;
fs->regs.reg[15].how = REG_SAVED_OFFSET;
fs->regs.reg[15].loc.offset = (long)&mctx->gregs[REG_R15] - new_cfa;
fs->regs.reg[16].how = REG_SAVED_OFFSET;
fs->regs.reg[16].loc.offset = (long)&mctx->gregs[REG_RIP] - new_cfa;
fs->retaddr_column = 16;
fs->signal_frame = 1;
 
return _URC_NO_REASON;
}
 
#else
 
#define MD_FALLBACK_FRAME_STATE_FOR x86_fallback_frame_state
 
static _Unwind_Reason_Code
x86_fallback_frame_state (struct _Unwind_Context *context,
_Unwind_FrameState *fs)
{
unsigned char *pc = context->ra;
mcontext_t *mctx;
long new_cfa;
 
if (/* Solaris 10
-----------
<__sighndlr+0>: push %ebp
<__sighndlr+1>: mov %esp,%ebp
<__sighndlr+3>: pushl 0x10(%ebp)
<__sighndlr+6>: pushl 0xc(%ebp)
<__sighndlr+9>: pushl 0x8(%ebp)
<__sighndlr+12>: call *0x14(%ebp)
<__sighndlr+15>: add $0xc,%esp <--- PC
<__sighndlr+18>: leave
<__sighndlr+19>: ret */
(*(unsigned long *)(pc - 15) == 0xffec8b55
&& *(unsigned long *)(pc - 11) == 0x75ff1075
&& *(unsigned long *)(pc - 7) == 0x0875ff0c
&& *(unsigned long *)(pc - 3) == 0x831455ff
&& *(unsigned long *)(pc + 1) == 0xc3c90cc4)
 
|| /* Solaris 11 before snv_125
--------------------------
<__sighndlr+0> push %ebp
<__sighndlr+1> mov %esp,%ebp
<__sighndlr+4> pushl 0x10(%ebp)
<__sighndlr+6> pushl 0xc(%ebp)
<__sighndlr+9> pushl 0x8(%ebp)
<__sighndlr+12> call *0x14(%ebp)
<__sighndlr+15> add $0xc,%esp
<__sighndlr+18> leave <--- PC
<__sighndlr+19> ret */
(*(unsigned long *)(pc - 18) == 0xffec8b55
&& *(unsigned long *)(pc - 14) == 0x7fff107f
&& *(unsigned long *)(pc - 10) == 0x0875ff0c
&& *(unsigned long *)(pc - 6) == 0x83145fff
&& *(unsigned long *)(pc - 1) == 0xc3c90cc4)
 
|| /* Solaris 11 since snv_125
-------------------------
<__sighndlr+0> push %ebp
<__sighndlr+1> mov %esp,%ebp
<__sighndlr+3> and $0xfffffff0,%esp
<__sighndlr+6> sub $0x4,%esp
<__sighndlr+9> pushl 0x10(%ebp)
<__sighndlr+12> pushl 0xc(%ebp)
<__sighndlr+15> pushl 0x8(%ebp)
<__sighndlr+18> call *0x14(%ebp)
<__sighndlr+21> leave <--- PC
<__sighndlr+22> ret */
(*(unsigned long *)(pc - 21) == 0x83ec8b55
&& *(unsigned long *)(pc - 17) == 0xec83f0e4
&& *(unsigned long *)(pc - 13) == 0x1075ff04
&& *(unsigned long *)(pc - 9) == 0xff0c75ff
&& *(unsigned long *)(pc - 5) == 0x55ff0875
&& (*(unsigned long *)(pc - 1) & 0x00ffffff) == 0x00c3c914))
{
struct handler_args {
int signo;
siginfo_t *sip;
ucontext_t *ucontext;
} *handler_args = context->cfa;
mctx = &handler_args->ucontext->uc_mcontext;
}
else
return _URC_END_OF_STACK;
 
new_cfa = mctx->gregs[UESP];
 
fs->regs.cfa_how = CFA_REG_OFFSET;
fs->regs.cfa_reg = 4;
fs->regs.cfa_offset = new_cfa - (long) context->cfa;
 
/* The SVR4 register numbering macros aren't usable in libgcc. */
fs->regs.reg[0].how = REG_SAVED_OFFSET;
fs->regs.reg[0].loc.offset = (long)&mctx->gregs[EAX] - new_cfa;
fs->regs.reg[3].how = REG_SAVED_OFFSET;
fs->regs.reg[3].loc.offset = (long)&mctx->gregs[EBX] - new_cfa;
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = (long)&mctx->gregs[ECX] - new_cfa;
fs->regs.reg[2].how = REG_SAVED_OFFSET;
fs->regs.reg[2].loc.offset = (long)&mctx->gregs[EDX] - new_cfa;
fs->regs.reg[6].how = REG_SAVED_OFFSET;
fs->regs.reg[6].loc.offset = (long)&mctx->gregs[ESI] - new_cfa;
fs->regs.reg[7].how = REG_SAVED_OFFSET;
fs->regs.reg[7].loc.offset = (long)&mctx->gregs[EDI] - new_cfa;
fs->regs.reg[5].how = REG_SAVED_OFFSET;
fs->regs.reg[5].loc.offset = (long)&mctx->gregs[EBP] - new_cfa;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = (long)&mctx->gregs[EIP] - new_cfa;
fs->retaddr_column = 8;
 
/* SIGFPE for IEEE-754 exceptions is delivered after the faulting insn
rather than before it, so don't set fs->signal_frame in that case.
We test whether the ES field of the Status Register is zero. */
if ((mctx->fpregs.fp_reg_set.fpchip_state.status & 0x80) == 0)
fs->signal_frame = 1;
 
return _URC_NO_REASON;
}
 
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/value-unwind.h
0,0 → 1,25
/* Store register values as _Unwind_Word type in DWARF2 EH unwind context.
Copyright (C) 2011-2015 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
/* Define this macro if the target stores register values as _Unwind_Word
type in unwind context. Only enable it for x32. */
#if defined __x86_64 && !defined __LP64__
# define REG_VALUE_IN_UNWIND_CONTEXT
#endif
/contrib/toolchain/gcc/5x/libgcc/config/i386/w32-unwind.h
0,0 → 1,207
/* Definitions for Dwarf2 EH unwind support for Windows32 targets
Copyright (C) 2007-2015 Free Software Foundation, Inc.
Contributed by Pascal Obry <obry@adacore.com>
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
 
/* This file implements the md_fallback_frame_state_for routine for
Windows, triggered when the GCC table based unwinding process hits a
frame for which no unwind info has been registered. This typically
occurs when raising an exception from a signal handler, because the
handler is actually called from the OS kernel.
 
The basic idea is to detect that we are indeed trying to unwind past a
signal handler and to fill out the GCC internal unwinding structures for
the OS kernel frame as if it had been directly called from the
interrupted context.
 
This is all assuming that the code to set the handler asked the kernel
to pass a pointer to such context information.
 
There is three main parts.
 
1) The first thing to do is to check if we are in a signal context. If
not we can just return as there is nothing to do. We are probably on
some foreign code for which no unwind frame can be found. If this is
a call from the Windows signal handler, then:
 
2) We must get the signal context information.
 
* With the standard exception filter:
 
This is on Windows pointed to by an EXCEPTION_POINTERS. We know that
the signal handle will call an UnhandledExceptionFilter with this
parameter. The spec for this routine is:
 
LONG WINAPI UnhandledExceptionFilter(struct _EXCEPTION_POINTERS*);
 
So the pointer to struct _EXCEPTION_POINTERS must be somewhere on the
stack.
 
This was found experimentally to always be at offset 0 of the context
frame in all cases handled by this implementation.
 
* With the SEH exception handler:
 
In this case the signal context is directly on the stack as the SEH
exception handler has the following prototype:
 
DWORD
SEH_error_handler (PEXCEPTION_RECORD ExceptionRecord,
PVOID EstablisherFrame,
PCONTEXT ContextRecord,
PVOID DispatcherContext)
 
This was found experimentally to always be at offset 56 of the
context frame in all cases handled by this implementation.
 
3) When we have the signal context we just have to save some registers
and set the return address based on the program counter (Eip).
 
Note that this implementation follows closely the same principles as the
GNU/Linux and OSF ones. */
 
#ifndef __MINGW64__
 
#define WIN32_MEAN_AND_LEAN
#include <windows.h>
/* Patterns found experimentally to be on a Windows signal handler */
 
/* In a standard exception filter */
 
#define SIG_PAT1 \
(pc_[-2] == 0xff && pc_[-1] == 0xd0 /* call %eax */ \
&& pc_[0] == 0x83 && pc_[1] == 0xf8) /* cmp 0xdepl,%eax */
 
#define SIG_PAT2 \
(pc_[-5] == 0xe8 && pc_[-4] == 0x68 /* call (depl16) */ \
&& pc_[0] == 0xc3) /* ret */
 
/* In a Win32 SEH handler */
 
#define SIG_SEH1 \
(pc_[-5] == 0xe8 /* call addr */ \
&& pc_[0] == 0x83 && pc_[1] == 0xc4 /* add 0xval,%esp */ \
&& pc_[3] == 0xb8) /* mov 0xval,%eax */
 
#define SIG_SEH2 \
(pc_[-5] == 0x8b && pc_[-4] == 0x4d /* mov depl(%ebp),%ecx */ \
&& pc_[0] == 0x64 && pc_[1] == 0x8b) /* mov %fs:(0),<reg> */ \
 
/* In the GCC alloca (stack probing) */
 
#define SIG_ALLOCA \
(pc_[-1] == 0x83 /* orl $0x0,(%ecx) */ \
&& pc_[0] == 0x9 && pc_[1] == 0 \
&& pc_[2] == 0x2d && pc_[3] == 0 /* subl $0x1000,%eax */ \
&& pc_[4] == 0x10 && pc_[5] == 0)
 
 
#define MD_FALLBACK_FRAME_STATE_FOR i386_w32_fallback_frame_state
 
static _Unwind_Reason_Code
i386_w32_fallback_frame_state (struct _Unwind_Context *context,
_Unwind_FrameState *fs)
 
{
void * ctx_ra_ = (void *)(context->ra); /* return address */
void * ctx_cfa_ = (void *)(context->cfa); /* context frame address */
unsigned char * pc_ = (unsigned char *) ctx_ra_;
 
/* In the test below we look for two specific patterns found
experimentally to be in the Windows signal handler. */
if (SIG_PAT1 || SIG_PAT2 || SIG_SEH1 || SIG_SEH2)
{
PEXCEPTION_POINTERS weinfo_;
PCONTEXT proc_ctx_;
long new_cfa_;
 
if (SIG_SEH1)
proc_ctx_ = (PCONTEXT) (*(int*)(ctx_cfa_ + 56));
else if (SIG_SEH2)
proc_ctx_ = (PCONTEXT) (*(int*)(ctx_cfa_ + 8));
else
{
weinfo_ = (PEXCEPTION_POINTERS) (*(int*)ctx_cfa_);
proc_ctx_ = weinfo_->ContextRecord;
}
 
/* The new context frame address is the stack pointer. */
new_cfa_ = proc_ctx_->Esp;
fs->regs.cfa_how = CFA_REG_OFFSET;
fs->regs.cfa_reg = __builtin_dwarf_sp_column();
fs->regs.cfa_offset = new_cfa_ - (long) ctx_cfa_;
 
/* Restore registers. */
fs->regs.reg[0].how = REG_SAVED_OFFSET;
fs->regs.reg[0].loc.offset = (long)&proc_ctx_->Eax - new_cfa_;
fs->regs.reg[3].how = REG_SAVED_OFFSET;
fs->regs.reg[3].loc.offset = (long)&proc_ctx_->Ebx - new_cfa_;
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = (long)&proc_ctx_->Ecx - new_cfa_;
fs->regs.reg[2].how = REG_SAVED_OFFSET;
fs->regs.reg[2].loc.offset = (long)&proc_ctx_->Edx - new_cfa_;
fs->regs.reg[6].how = REG_SAVED_OFFSET;
fs->regs.reg[6].loc.offset = (long)&proc_ctx_->Esi - new_cfa_;
fs->regs.reg[7].how = REG_SAVED_OFFSET;
fs->regs.reg[7].loc.offset = (long)&proc_ctx_->Edi - new_cfa_;
fs->regs.reg[5].how = REG_SAVED_OFFSET;
fs->regs.reg[5].loc.offset = (long)&proc_ctx_->Ebp - new_cfa_;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = (long)&proc_ctx_->Eip - new_cfa_;
fs->retaddr_column = 8;
fs->signal_frame = 1;
 
return _URC_NO_REASON;
}
 
/* Unwinding through _alloca, propagating from a trap triggered by
one of it's probes prior to the real SP adjustment. The only
operations of interest performed is "pushl %ecx", followed by
ecx clobbering. */
else if (SIG_ALLOCA)
{
/* Only one push between entry in _alloca and the probe trap. */
long new_cfa_ = (long) ctx_cfa_ + 4;
 
fs->regs.cfa_how = CFA_REG_OFFSET;
fs->regs.cfa_reg = __builtin_dwarf_sp_column();
fs->regs.cfa_offset = new_cfa_ - (long) ctx_cfa_;
 
/* The saved value of %ecx is at CFA - 4 */
fs->regs.reg[1].how = REG_SAVED_OFFSET;
fs->regs.reg[1].loc.offset = -4;
 
/* and what is stored at the CFA is the return address. */
fs->retaddr_column = 8;
fs->regs.reg[8].how = REG_SAVED_OFFSET;
fs->regs.reg[8].loc.offset = 0;
fs->signal_frame = 1;
 
return _URC_NO_REASON;
}
else
return _URC_END_OF_STACK;
}
 
#endif /* !__MINGW64__ */