/drivers/include/uapi/asm/e820.h |
---|
0,0 → 1,70 |
#ifndef _UAPI_ASM_X86_E820_H |
#define _UAPI_ASM_X86_E820_H |
#define E820MAP 0x2d0 /* our map */ |
#define E820MAX 128 /* number of entries in E820MAP */ |
/* |
* Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the |
* constrained space in the zeropage. If we have more nodes than |
* that, and if we've booted off EFI firmware, then the EFI tables |
* passed us from the EFI firmware can list more nodes. Size our |
* internal memory map tables to have room for these additional |
* nodes, based on up to three entries per node for which the |
* kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT), |
* plus E820MAX, allowing space for the possible duplicate E820 |
* entries that might need room in the same arrays, prior to the |
* call to sanitize_e820_map() to remove duplicates. The allowance |
* of three memory map entries per node is "enough" entries for |
* the initial hardware platform motivating this mechanism to make |
* use of additional EFI map entries. Future platforms may want |
* to allow more than three entries per node or otherwise refine |
* this size. |
*/ |
#ifndef __KERNEL__ |
#define E820_X_MAX E820MAX |
#endif |
#define E820NR 0x1e8 /* # entries in E820MAP */ |
#define E820_RAM 1 |
#define E820_RESERVED 2 |
#define E820_ACPI 3 |
#define E820_NVS 4 |
#define E820_UNUSABLE 5 |
/* |
* reserved RAM used by kernel itself |
* if CONFIG_INTEL_TXT is enabled, memory of this type will be |
* included in the S3 integrity calculation and so should not include |
* any memory that BIOS might alter over the S3 transition |
*/ |
#define E820_RESERVED_KERN 128 |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
struct e820entry { |
__u64 addr; /* start of memory segment */ |
__u64 size; /* size of memory segment */ |
__u32 type; /* type of memory segment */ |
} __attribute__((packed)); |
struct e820map { |
__u32 nr_map; |
struct e820entry map[E820_X_MAX]; |
}; |
#define ISA_START_ADDRESS 0xa0000 |
#define ISA_END_ADDRESS 0x100000 |
#define BIOS_BEGIN 0x000a0000 |
#define BIOS_END 0x00100000 |
#define BIOS_ROM_BASE 0xffe00000 |
#define BIOS_ROM_END 0xffffffff |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_X86_E820_H */ |
/drivers/include/uapi/asm/errno.h |
---|
0,0 → 1,0 |
#include <asm-generic/errno.h> |
/drivers/include/uapi/asm/ioctl.h |
---|
0,0 → 1,0 |
#include <asm-generic/ioctl.h> |
/drivers/include/uapi/asm/msr-index.h |
---|
0,0 → 1,624 |
#ifndef _ASM_X86_MSR_INDEX_H |
#define _ASM_X86_MSR_INDEX_H |
/* CPU model specific register (MSR) numbers */ |
/* x86-64 specific MSRs */ |
#define MSR_EFER 0xc0000080 /* extended feature register */ |
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ |
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ |
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ |
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ |
#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ |
#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ |
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ |
#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */ |
/* EFER bits: */ |
#define _EFER_SCE 0 /* SYSCALL/SYSRET */ |
#define _EFER_LME 8 /* Long mode enable */ |
#define _EFER_LMA 10 /* Long mode active (read-only) */ |
#define _EFER_NX 11 /* No execute enable */ |
#define _EFER_SVME 12 /* Enable virtualization */ |
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */ |
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ |
#define EFER_SCE (1<<_EFER_SCE) |
#define EFER_LME (1<<_EFER_LME) |
#define EFER_LMA (1<<_EFER_LMA) |
#define EFER_NX (1<<_EFER_NX) |
#define EFER_SVME (1<<_EFER_SVME) |
#define EFER_LMSLE (1<<_EFER_LMSLE) |
#define EFER_FFXSR (1<<_EFER_FFXSR) |
/* Intel MSRs. Some also available on other CPUs */ |
#define MSR_IA32_PERFCTR0 0x000000c1 |
#define MSR_IA32_PERFCTR1 0x000000c2 |
#define MSR_FSB_FREQ 0x000000cd |
#define MSR_NHM_PLATFORM_INFO 0x000000ce |
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 |
#define NHM_C3_AUTO_DEMOTE (1UL << 25) |
#define NHM_C1_AUTO_DEMOTE (1UL << 26) |
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) |
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) |
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) |
#define MSR_PLATFORM_INFO 0x000000ce |
#define MSR_MTRRcap 0x000000fe |
#define MSR_IA32_BBL_CR_CTL 0x00000119 |
#define MSR_IA32_BBL_CR_CTL3 0x0000011e |
#define MSR_IA32_SYSENTER_CS 0x00000174 |
#define MSR_IA32_SYSENTER_ESP 0x00000175 |
#define MSR_IA32_SYSENTER_EIP 0x00000176 |
#define MSR_IA32_MCG_CAP 0x00000179 |
#define MSR_IA32_MCG_STATUS 0x0000017a |
#define MSR_IA32_MCG_CTL 0x0000017b |
#define MSR_OFFCORE_RSP_0 0x000001a6 |
#define MSR_OFFCORE_RSP_1 0x000001a7 |
#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad |
#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae |
#define MSR_LBR_SELECT 0x000001c8 |
#define MSR_LBR_TOS 0x000001c9 |
#define MSR_LBR_NHM_FROM 0x00000680 |
#define MSR_LBR_NHM_TO 0x000006c0 |
#define MSR_LBR_CORE_FROM 0x00000040 |
#define MSR_LBR_CORE_TO 0x00000060 |
#define MSR_IA32_PEBS_ENABLE 0x000003f1 |
#define MSR_IA32_DS_AREA 0x00000600 |
#define MSR_IA32_PERF_CAPABILITIES 0x00000345 |
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 |
#define MSR_MTRRfix64K_00000 0x00000250 |
#define MSR_MTRRfix16K_80000 0x00000258 |
#define MSR_MTRRfix16K_A0000 0x00000259 |
#define MSR_MTRRfix4K_C0000 0x00000268 |
#define MSR_MTRRfix4K_C8000 0x00000269 |
#define MSR_MTRRfix4K_D0000 0x0000026a |
#define MSR_MTRRfix4K_D8000 0x0000026b |
#define MSR_MTRRfix4K_E0000 0x0000026c |
#define MSR_MTRRfix4K_E8000 0x0000026d |
#define MSR_MTRRfix4K_F0000 0x0000026e |
#define MSR_MTRRfix4K_F8000 0x0000026f |
#define MSR_MTRRdefType 0x000002ff |
#define MSR_IA32_CR_PAT 0x00000277 |
#define MSR_IA32_DEBUGCTLMSR 0x000001d9 |
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db |
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc |
#define MSR_IA32_LASTINTFROMIP 0x000001dd |
#define MSR_IA32_LASTINTTOIP 0x000001de |
/* DEBUGCTLMSR bits (others vary by model): */ |
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ |
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ |
#define DEBUGCTLMSR_TR (1UL << 6) |
#define DEBUGCTLMSR_BTS (1UL << 7) |
#define DEBUGCTLMSR_BTINT (1UL << 8) |
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9) |
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) |
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) |
#define MSR_IA32_POWER_CTL 0x000001fc |
#define MSR_IA32_MC0_CTL 0x00000400 |
#define MSR_IA32_MC0_STATUS 0x00000401 |
#define MSR_IA32_MC0_ADDR 0x00000402 |
#define MSR_IA32_MC0_MISC 0x00000403 |
/* C-state Residency Counters */ |
#define MSR_PKG_C3_RESIDENCY 0x000003f8 |
#define MSR_PKG_C6_RESIDENCY 0x000003f9 |
#define MSR_PKG_C7_RESIDENCY 0x000003fa |
#define MSR_CORE_C3_RESIDENCY 0x000003fc |
#define MSR_CORE_C6_RESIDENCY 0x000003fd |
#define MSR_CORE_C7_RESIDENCY 0x000003fe |
#define MSR_PKG_C2_RESIDENCY 0x0000060d |
#define MSR_PKG_C8_RESIDENCY 0x00000630 |
#define MSR_PKG_C9_RESIDENCY 0x00000631 |
#define MSR_PKG_C10_RESIDENCY 0x00000632 |
/* Run Time Average Power Limiting (RAPL) Interface */ |
#define MSR_RAPL_POWER_UNIT 0x00000606 |
#define MSR_PKG_POWER_LIMIT 0x00000610 |
#define MSR_PKG_ENERGY_STATUS 0x00000611 |
#define MSR_PKG_PERF_STATUS 0x00000613 |
#define MSR_PKG_POWER_INFO 0x00000614 |
#define MSR_DRAM_POWER_LIMIT 0x00000618 |
#define MSR_DRAM_ENERGY_STATUS 0x00000619 |
#define MSR_DRAM_PERF_STATUS 0x0000061b |
#define MSR_DRAM_POWER_INFO 0x0000061c |
#define MSR_PP0_POWER_LIMIT 0x00000638 |
#define MSR_PP0_ENERGY_STATUS 0x00000639 |
#define MSR_PP0_POLICY 0x0000063a |
#define MSR_PP0_PERF_STATUS 0x0000063b |
#define MSR_PP1_POWER_LIMIT 0x00000640 |
#define MSR_PP1_ENERGY_STATUS 0x00000641 |
#define MSR_PP1_POLICY 0x00000642 |
#define MSR_CORE_C1_RES 0x00000660 |
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 |
#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 |
/* Hardware P state interface */ |
#define MSR_PPERF 0x0000064e |
#define MSR_PERF_LIMIT_REASONS 0x0000064f |
#define MSR_PM_ENABLE 0x00000770 |
#define MSR_HWP_CAPABILITIES 0x00000771 |
#define MSR_HWP_REQUEST_PKG 0x00000772 |
#define MSR_HWP_INTERRUPT 0x00000773 |
#define MSR_HWP_REQUEST 0x00000774 |
#define MSR_HWP_STATUS 0x00000777 |
/* CPUID.6.EAX */ |
#define HWP_BASE_BIT (1<<7) |
#define HWP_NOTIFICATIONS_BIT (1<<8) |
#define HWP_ACTIVITY_WINDOW_BIT (1<<9) |
#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) |
#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) |
/* IA32_HWP_CAPABILITIES */ |
#define HWP_HIGHEST_PERF(x) (x & 0xff) |
#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8) |
#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16) |
#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24) |
/* IA32_HWP_REQUEST */ |
#define HWP_MIN_PERF(x) (x & 0xff) |
#define HWP_MAX_PERF(x) ((x & 0xff) << 8) |
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) |
#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24) |
#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32) |
#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42) |
/* IA32_HWP_STATUS */ |
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1) |
#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) |
/* IA32_HWP_INTERRUPT */ |
#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) |
#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) |
#define MSR_AMD64_MC0_MASK 0xc0010044 |
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) |
#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) |
#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) |
#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) |
#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) |
/* These are consecutive and not in the normal 4er MCE bank block */ |
#define MSR_IA32_MC0_CTL2 0x00000280 |
#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) |
#define MSR_P6_PERFCTR0 0x000000c1 |
#define MSR_P6_PERFCTR1 0x000000c2 |
#define MSR_P6_EVNTSEL0 0x00000186 |
#define MSR_P6_EVNTSEL1 0x00000187 |
#define MSR_KNC_PERFCTR0 0x00000020 |
#define MSR_KNC_PERFCTR1 0x00000021 |
#define MSR_KNC_EVNTSEL0 0x00000028 |
#define MSR_KNC_EVNTSEL1 0x00000029 |
/* Alternative perfctr range with full access. */ |
#define MSR_IA32_PMC0 0x000004c1 |
/* AMD64 MSRs. Not complete. See the architecture manual for a more |
complete list. */ |
#define MSR_AMD64_PATCH_LEVEL 0x0000008b |
#define MSR_AMD64_TSC_RATIO 0xc0000104 |
#define MSR_AMD64_NB_CFG 0xc001001f |
#define MSR_AMD64_PATCH_LOADER 0xc0010020 |
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 |
#define MSR_AMD64_OSVW_STATUS 0xc0010141 |
#define MSR_AMD64_LS_CFG 0xc0011020 |
#define MSR_AMD64_DC_CFG 0xc0011022 |
#define MSR_AMD64_BU_CFG2 0xc001102a |
#define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
#define MSR_AMD64_IBSFETCH_REG_COUNT 3 |
#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1) |
#define MSR_AMD64_IBSOPCTL 0xc0011033 |
#define MSR_AMD64_IBSOPRIP 0xc0011034 |
#define MSR_AMD64_IBSOPDATA 0xc0011035 |
#define MSR_AMD64_IBSOPDATA2 0xc0011036 |
#define MSR_AMD64_IBSOPDATA3 0xc0011037 |
#define MSR_AMD64_IBSDCLINAD 0xc0011038 |
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 |
#define MSR_AMD64_IBSOP_REG_COUNT 7 |
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) |
#define MSR_AMD64_IBSCTL 0xc001103a |
#define MSR_AMD64_IBSBRTARGET 0xc001103b |
#define MSR_AMD64_IBSOPDATA4 0xc001103d |
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ |
/* Fam 16h MSRs */ |
#define MSR_F16H_L2I_PERF_CTL 0xc0010230 |
#define MSR_F16H_L2I_PERF_CTR 0xc0010231 |
/* Fam 15h MSRs */ |
#define MSR_F15H_PERF_CTL 0xc0010200 |
#define MSR_F15H_PERF_CTR 0xc0010201 |
#define MSR_F15H_NB_PERF_CTL 0xc0010240 |
#define MSR_F15H_NB_PERF_CTR 0xc0010241 |
/* Fam 10h MSRs */ |
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 |
#define FAM10H_MMIO_CONF_ENABLE (1<<0) |
#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf |
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 |
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL |
#define FAM10H_MMIO_CONF_BASE_SHIFT 20 |
#define MSR_FAM10H_NODE_ID 0xc001100c |
/* K8 MSRs */ |
#define MSR_K8_TOP_MEM1 0xc001001a |
#define MSR_K8_TOP_MEM2 0xc001001d |
#define MSR_K8_SYSCFG 0xc0010010 |
#define MSR_K8_INT_PENDING_MSG 0xc0010055 |
/* C1E active bits in int pending message */ |
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000 |
#define MSR_K8_TSEG_ADDR 0xc0010112 |
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ |
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ |
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ |
/* K7 MSRs */ |
#define MSR_K7_EVNTSEL0 0xc0010000 |
#define MSR_K7_PERFCTR0 0xc0010004 |
#define MSR_K7_EVNTSEL1 0xc0010001 |
#define MSR_K7_PERFCTR1 0xc0010005 |
#define MSR_K7_EVNTSEL2 0xc0010002 |
#define MSR_K7_PERFCTR2 0xc0010006 |
#define MSR_K7_EVNTSEL3 0xc0010003 |
#define MSR_K7_PERFCTR3 0xc0010007 |
#define MSR_K7_CLK_CTL 0xc001001b |
#define MSR_K7_HWCR 0xc0010015 |
#define MSR_K7_FID_VID_CTL 0xc0010041 |
#define MSR_K7_FID_VID_STATUS 0xc0010042 |
/* K6 MSRs */ |
#define MSR_K6_WHCR 0xc0000082 |
#define MSR_K6_UWCCR 0xc0000085 |
#define MSR_K6_EPMR 0xc0000086 |
#define MSR_K6_PSOR 0xc0000087 |
#define MSR_K6_PFIR 0xc0000088 |
/* Centaur-Hauls/IDT defined MSRs. */ |
#define MSR_IDT_FCR1 0x00000107 |
#define MSR_IDT_FCR2 0x00000108 |
#define MSR_IDT_FCR3 0x00000109 |
#define MSR_IDT_FCR4 0x0000010a |
#define MSR_IDT_MCR0 0x00000110 |
#define MSR_IDT_MCR1 0x00000111 |
#define MSR_IDT_MCR2 0x00000112 |
#define MSR_IDT_MCR3 0x00000113 |
#define MSR_IDT_MCR4 0x00000114 |
#define MSR_IDT_MCR5 0x00000115 |
#define MSR_IDT_MCR6 0x00000116 |
#define MSR_IDT_MCR7 0x00000117 |
#define MSR_IDT_MCR_CTRL 0x00000120 |
/* VIA Cyrix defined MSRs*/ |
#define MSR_VIA_FCR 0x00001107 |
#define MSR_VIA_LONGHAUL 0x0000110a |
#define MSR_VIA_RNG 0x0000110b |
#define MSR_VIA_BCR2 0x00001147 |
/* Transmeta defined MSRs */ |
#define MSR_TMTA_LONGRUN_CTRL 0x80868010 |
#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 |
#define MSR_TMTA_LRTI_READOUT 0x80868018 |
#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a |
/* Intel defined MSRs. */ |
#define MSR_IA32_P5_MC_ADDR 0x00000000 |
#define MSR_IA32_P5_MC_TYPE 0x00000001 |
#define MSR_IA32_TSC 0x00000010 |
#define MSR_IA32_PLATFORM_ID 0x00000017 |
#define MSR_IA32_EBL_CR_POWERON 0x0000002a |
#define MSR_EBC_FREQUENCY_ID 0x0000002c |
#define MSR_SMI_COUNT 0x00000034 |
#define MSR_IA32_FEATURE_CONTROL 0x0000003a |
#define MSR_IA32_TSC_ADJUST 0x0000003b |
#define MSR_IA32_BNDCFGS 0x00000d90 |
#define MSR_IA32_XSS 0x00000da0 |
#define FEATURE_CONTROL_LOCKED (1<<0) |
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) |
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) |
#define MSR_IA32_APICBASE 0x0000001b |
#define MSR_IA32_APICBASE_BSP (1<<8) |
#define MSR_IA32_APICBASE_ENABLE (1<<11) |
#define MSR_IA32_APICBASE_BASE (0xfffff<<12) |
#define MSR_IA32_TSCDEADLINE 0x000006e0 |
#define MSR_IA32_UCODE_WRITE 0x00000079 |
#define MSR_IA32_UCODE_REV 0x0000008b |
#define MSR_IA32_PERF_STATUS 0x00000198 |
#define MSR_IA32_PERF_CTL 0x00000199 |
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 |
#define MSR_AMD_PERF_STATUS 0xc0010063 |
#define MSR_AMD_PERF_CTL 0xc0010062 |
#define MSR_IA32_MPERF 0x000000e7 |
#define MSR_IA32_APERF 0x000000e8 |
#define MSR_IA32_THERM_CONTROL 0x0000019a |
#define MSR_IA32_THERM_INTERRUPT 0x0000019b |
#define THERM_INT_HIGH_ENABLE (1 << 0) |
#define THERM_INT_LOW_ENABLE (1 << 1) |
#define THERM_INT_PLN_ENABLE (1 << 24) |
#define MSR_IA32_THERM_STATUS 0x0000019c |
#define THERM_STATUS_PROCHOT (1 << 0) |
#define THERM_STATUS_POWER_LIMIT (1 << 10) |
#define MSR_THERM2_CTL 0x0000019d |
#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16) |
#define MSR_IA32_MISC_ENABLE 0x000001a0 |
#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 |
#define MSR_MISC_PWR_MGMT 0x000001aa |
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 |
#define ENERGY_PERF_BIAS_PERFORMANCE 0 |
#define ENERGY_PERF_BIAS_NORMAL 6 |
#define ENERGY_PERF_BIAS_POWERSAVE 15 |
#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1 |
#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0) |
#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10) |
#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2 |
#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0) |
#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1) |
#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24) |
/* Thermal Thresholds Support */ |
#define THERM_INT_THRESHOLD0_ENABLE (1 << 15) |
#define THERM_SHIFT_THRESHOLD0 8 |
#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0) |
#define THERM_INT_THRESHOLD1_ENABLE (1 << 23) |
#define THERM_SHIFT_THRESHOLD1 16 |
#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1) |
#define THERM_STATUS_THRESHOLD0 (1 << 6) |
#define THERM_LOG_THRESHOLD0 (1 << 7) |
#define THERM_STATUS_THRESHOLD1 (1 << 8) |
#define THERM_LOG_THRESHOLD1 (1 << 9) |
/* MISC_ENABLE bits: architectural */ |
#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0 |
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) |
#define MSR_IA32_MISC_ENABLE_TCC_BIT 1 |
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT) |
#define MSR_IA32_MISC_ENABLE_EMON_BIT 7 |
#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT) |
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11 |
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT) |
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12 |
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT) |
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16 |
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT) |
#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18 |
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT) |
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22 |
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) |
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23 |
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34 |
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT) |
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ |
#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2 |
#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT) |
#define MSR_IA32_MISC_ENABLE_TM1_BIT 3 |
#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT) |
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4 |
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6 |
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8 |
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT) |
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9 |
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_FERR_BIT 10 |
#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT) |
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10 |
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT) |
#define MSR_IA32_MISC_ENABLE_TM2_BIT 13 |
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT) |
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19 |
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20 |
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT) |
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24 |
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT) |
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37 |
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38 |
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39 |
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT) |
#define MSR_IA32_TSC_DEADLINE 0x000006E0 |
/* P4/Xeon+ specific */ |
#define MSR_IA32_MCG_EAX 0x00000180 |
#define MSR_IA32_MCG_EBX 0x00000181 |
#define MSR_IA32_MCG_ECX 0x00000182 |
#define MSR_IA32_MCG_EDX 0x00000183 |
#define MSR_IA32_MCG_ESI 0x00000184 |
#define MSR_IA32_MCG_EDI 0x00000185 |
#define MSR_IA32_MCG_EBP 0x00000186 |
#define MSR_IA32_MCG_ESP 0x00000187 |
#define MSR_IA32_MCG_EFLAGS 0x00000188 |
#define MSR_IA32_MCG_EIP 0x00000189 |
#define MSR_IA32_MCG_RESERVED 0x0000018a |
/* Pentium IV performance counter MSRs */ |
#define MSR_P4_BPU_PERFCTR0 0x00000300 |
#define MSR_P4_BPU_PERFCTR1 0x00000301 |
#define MSR_P4_BPU_PERFCTR2 0x00000302 |
#define MSR_P4_BPU_PERFCTR3 0x00000303 |
#define MSR_P4_MS_PERFCTR0 0x00000304 |
#define MSR_P4_MS_PERFCTR1 0x00000305 |
#define MSR_P4_MS_PERFCTR2 0x00000306 |
#define MSR_P4_MS_PERFCTR3 0x00000307 |
#define MSR_P4_FLAME_PERFCTR0 0x00000308 |
#define MSR_P4_FLAME_PERFCTR1 0x00000309 |
#define MSR_P4_FLAME_PERFCTR2 0x0000030a |
#define MSR_P4_FLAME_PERFCTR3 0x0000030b |
#define MSR_P4_IQ_PERFCTR0 0x0000030c |
#define MSR_P4_IQ_PERFCTR1 0x0000030d |
#define MSR_P4_IQ_PERFCTR2 0x0000030e |
#define MSR_P4_IQ_PERFCTR3 0x0000030f |
#define MSR_P4_IQ_PERFCTR4 0x00000310 |
#define MSR_P4_IQ_PERFCTR5 0x00000311 |
#define MSR_P4_BPU_CCCR0 0x00000360 |
#define MSR_P4_BPU_CCCR1 0x00000361 |
#define MSR_P4_BPU_CCCR2 0x00000362 |
#define MSR_P4_BPU_CCCR3 0x00000363 |
#define MSR_P4_MS_CCCR0 0x00000364 |
#define MSR_P4_MS_CCCR1 0x00000365 |
#define MSR_P4_MS_CCCR2 0x00000366 |
#define MSR_P4_MS_CCCR3 0x00000367 |
#define MSR_P4_FLAME_CCCR0 0x00000368 |
#define MSR_P4_FLAME_CCCR1 0x00000369 |
#define MSR_P4_FLAME_CCCR2 0x0000036a |
#define MSR_P4_FLAME_CCCR3 0x0000036b |
#define MSR_P4_IQ_CCCR0 0x0000036c |
#define MSR_P4_IQ_CCCR1 0x0000036d |
#define MSR_P4_IQ_CCCR2 0x0000036e |
#define MSR_P4_IQ_CCCR3 0x0000036f |
#define MSR_P4_IQ_CCCR4 0x00000370 |
#define MSR_P4_IQ_CCCR5 0x00000371 |
#define MSR_P4_ALF_ESCR0 0x000003ca |
#define MSR_P4_ALF_ESCR1 0x000003cb |
#define MSR_P4_BPU_ESCR0 0x000003b2 |
#define MSR_P4_BPU_ESCR1 0x000003b3 |
#define MSR_P4_BSU_ESCR0 0x000003a0 |
#define MSR_P4_BSU_ESCR1 0x000003a1 |
#define MSR_P4_CRU_ESCR0 0x000003b8 |
#define MSR_P4_CRU_ESCR1 0x000003b9 |
#define MSR_P4_CRU_ESCR2 0x000003cc |
#define MSR_P4_CRU_ESCR3 0x000003cd |
#define MSR_P4_CRU_ESCR4 0x000003e0 |
#define MSR_P4_CRU_ESCR5 0x000003e1 |
#define MSR_P4_DAC_ESCR0 0x000003a8 |
#define MSR_P4_DAC_ESCR1 0x000003a9 |
#define MSR_P4_FIRM_ESCR0 0x000003a4 |
#define MSR_P4_FIRM_ESCR1 0x000003a5 |
#define MSR_P4_FLAME_ESCR0 0x000003a6 |
#define MSR_P4_FLAME_ESCR1 0x000003a7 |
#define MSR_P4_FSB_ESCR0 0x000003a2 |
#define MSR_P4_FSB_ESCR1 0x000003a3 |
#define MSR_P4_IQ_ESCR0 0x000003ba |
#define MSR_P4_IQ_ESCR1 0x000003bb |
#define MSR_P4_IS_ESCR0 0x000003b4 |
#define MSR_P4_IS_ESCR1 0x000003b5 |
#define MSR_P4_ITLB_ESCR0 0x000003b6 |
#define MSR_P4_ITLB_ESCR1 0x000003b7 |
#define MSR_P4_IX_ESCR0 0x000003c8 |
#define MSR_P4_IX_ESCR1 0x000003c9 |
#define MSR_P4_MOB_ESCR0 0x000003aa |
#define MSR_P4_MOB_ESCR1 0x000003ab |
#define MSR_P4_MS_ESCR0 0x000003c0 |
#define MSR_P4_MS_ESCR1 0x000003c1 |
#define MSR_P4_PMH_ESCR0 0x000003ac |
#define MSR_P4_PMH_ESCR1 0x000003ad |
#define MSR_P4_RAT_ESCR0 0x000003bc |
#define MSR_P4_RAT_ESCR1 0x000003bd |
#define MSR_P4_SAAT_ESCR0 0x000003ae |
#define MSR_P4_SAAT_ESCR1 0x000003af |
#define MSR_P4_SSU_ESCR0 0x000003be |
#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */ |
#define MSR_P4_TBPU_ESCR0 0x000003c2 |
#define MSR_P4_TBPU_ESCR1 0x000003c3 |
#define MSR_P4_TC_ESCR0 0x000003c4 |
#define MSR_P4_TC_ESCR1 0x000003c5 |
#define MSR_P4_U2L_ESCR0 0x000003b0 |
#define MSR_P4_U2L_ESCR1 0x000003b1 |
#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2 |
/* Intel Core-based CPU performance counters */ |
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 |
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a |
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b |
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d |
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e |
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f |
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 |
/* Geode defined MSRs */ |
#define MSR_GEODE_BUSCONT_CONF0 0x00001900 |
/* Intel VT MSRs */ |
#define MSR_IA32_VMX_BASIC 0x00000480 |
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 |
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 |
#define MSR_IA32_VMX_EXIT_CTLS 0x00000483 |
#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 |
#define MSR_IA32_VMX_MISC 0x00000485 |
#define MSR_IA32_VMX_CR0_FIXED0 0x00000486 |
#define MSR_IA32_VMX_CR0_FIXED1 0x00000487 |
#define MSR_IA32_VMX_CR4_FIXED0 0x00000488 |
#define MSR_IA32_VMX_CR4_FIXED1 0x00000489 |
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a |
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b |
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c |
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d |
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e |
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f |
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 |
#define MSR_IA32_VMX_VMFUNC 0x00000491 |
/* VMX_BASIC bits and bitmasks */ |
#define VMX_BASIC_VMCS_SIZE_SHIFT 32 |
#define VMX_BASIC_TRUE_CTLS (1ULL << 55) |
#define VMX_BASIC_64 0x0001000000000000LLU |
#define VMX_BASIC_MEM_TYPE_SHIFT 50 |
#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU |
#define VMX_BASIC_MEM_TYPE_WB 6LLU |
#define VMX_BASIC_INOUT 0x0040000000000000LLU |
/* MSR_IA32_VMX_MISC bits */ |
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) |
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F |
/* AMD-V MSRs */ |
#define MSR_VM_CR 0xc0010114 |
#define MSR_VM_IGNNE 0xc0010115 |
#define MSR_VM_HSAVE_PA 0xc0010117 |
#endif /* _ASM_X86_MSR_INDEX_H */ |
/drivers/include/uapi/asm/msr.h |
---|
0,0 → 1,15 |
#ifndef _UAPI_ASM_X86_MSR_H |
#define _UAPI_ASM_X86_MSR_H |
#include <asm/msr-index.h> |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
#include <linux/ioctl.h> |
#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8]) |
#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8]) |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_X86_MSR_H */ |
/drivers/include/uapi/asm/page_32_types.h |
---|
0,0 → 1,58 |
#ifndef _ASM_X86_PAGE_32_DEFS_H |
#define _ASM_X86_PAGE_32_DEFS_H |
#include <linux/const.h> |
/* |
* This handles the memory map. |
* |
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has |
* a virtual address space of one gigabyte, which limits the |
* amount of physical memory you can use to about 950MB. |
* |
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G |
* and CONFIG_HIGHMEM64G options in the kernel configuration. |
*/ |
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) |
#define __START_KERNEL_map __PAGE_OFFSET |
#define THREAD_SIZE_ORDER 1 |
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) |
#define DOUBLEFAULT_STACK 1 |
#define NMI_STACK 0 |
#define DEBUG_STACK 0 |
#define MCE_STACK 0 |
#define N_EXCEPTION_STACKS 1 |
#ifdef CONFIG_X86_PAE |
/* 44=32+12, the limit we can fit into an unsigned long pfn */ |
#define __PHYSICAL_MASK_SHIFT 44 |
#define __VIRTUAL_MASK_SHIFT 32 |
#else /* !CONFIG_X86_PAE */ |
#define __PHYSICAL_MASK_SHIFT 32 |
#define __VIRTUAL_MASK_SHIFT 32 |
#endif /* CONFIG_X86_PAE */ |
/* |
* Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S) |
*/ |
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) |
#ifndef __ASSEMBLY__ |
/* |
* This much address space is reserved for vmalloc() and iomap() |
* as well as fixmap mappings. |
*/ |
extern unsigned int __VMALLOC_RESERVE; |
extern int sysctl_legacy_va_layout; |
extern void find_low_pfn_range(void); |
extern void setup_bootmem_allocator(void); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PAGE_32_DEFS_H */ |
/drivers/include/uapi/asm/page_types.h |
---|
0,0 → 1,68 |
#ifndef _ASM_X86_PAGE_DEFS_H |
#define _ASM_X86_PAGE_DEFS_H |
#include <linux/const.h> |
#include <linux/types.h> |
/* PAGE_SHIFT determines the page size */ |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) |
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) |
/* Cast PAGE_MASK to a signed type so that it is sign-extended if |
virtual addresses are 32-bits but physical addresses are larger |
(ie, 32-bit PAE). */ |
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) |
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) |
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) |
#define HPAGE_SHIFT PMD_SHIFT |
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
#define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
#define HUGE_MAX_HSTATE 2 |
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
#define VM_DATA_DEFAULT_FLAGS \ |
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ |
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \ |
CONFIG_PHYSICAL_ALIGN) |
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) |
#ifdef CONFIG_X86_64 |
#include <asm/page_64_types.h> |
#else |
#include <asm/page_32_types.h> |
#endif /* CONFIG_X86_64 */ |
#ifndef __ASSEMBLY__ |
extern int devmem_is_allowed(unsigned long pagenr); |
extern unsigned long max_low_pfn_mapped; |
extern unsigned long max_pfn_mapped; |
static inline phys_addr_t get_max_mapped(void) |
{ |
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; |
} |
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); |
extern unsigned long init_memory_mapping(unsigned long start, |
unsigned long end); |
extern void initmem_init(void); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PAGE_DEFS_H */ |
/drivers/include/uapi/asm/posix_types.h |
---|
0,0 → 1,5 |
# ifdef CONFIG_X86_32 |
# include <asm/posix_types_32.h> |
# else |
# include <asm/posix_types_64.h> |
# endif |
/drivers/include/uapi/asm/processor-flags.h |
---|
0,0 → 1,153 |
#ifndef _UAPI_ASM_X86_PROCESSOR_FLAGS_H |
#define _UAPI_ASM_X86_PROCESSOR_FLAGS_H |
/* Various flags defined: can be included from assembler. */ |
#include <linux/const.h> |
/* |
* EFLAGS bits |
*/ |
#define X86_EFLAGS_CF_BIT 0 /* Carry Flag */ |
#define X86_EFLAGS_CF _BITUL(X86_EFLAGS_CF_BIT) |
#define X86_EFLAGS_FIXED_BIT 1 /* Bit 1 - always on */ |
#define X86_EFLAGS_FIXED _BITUL(X86_EFLAGS_FIXED_BIT) |
#define X86_EFLAGS_PF_BIT 2 /* Parity Flag */ |
#define X86_EFLAGS_PF _BITUL(X86_EFLAGS_PF_BIT) |
#define X86_EFLAGS_AF_BIT 4 /* Auxiliary carry Flag */ |
#define X86_EFLAGS_AF _BITUL(X86_EFLAGS_AF_BIT) |
#define X86_EFLAGS_ZF_BIT 6 /* Zero Flag */ |
#define X86_EFLAGS_ZF _BITUL(X86_EFLAGS_ZF_BIT) |
#define X86_EFLAGS_SF_BIT 7 /* Sign Flag */ |
#define X86_EFLAGS_SF _BITUL(X86_EFLAGS_SF_BIT) |
#define X86_EFLAGS_TF_BIT 8 /* Trap Flag */ |
#define X86_EFLAGS_TF _BITUL(X86_EFLAGS_TF_BIT) |
#define X86_EFLAGS_IF_BIT 9 /* Interrupt Flag */ |
#define X86_EFLAGS_IF _BITUL(X86_EFLAGS_IF_BIT) |
#define X86_EFLAGS_DF_BIT 10 /* Direction Flag */ |
#define X86_EFLAGS_DF _BITUL(X86_EFLAGS_DF_BIT) |
#define X86_EFLAGS_OF_BIT 11 /* Overflow Flag */ |
#define X86_EFLAGS_OF _BITUL(X86_EFLAGS_OF_BIT) |
#define X86_EFLAGS_IOPL_BIT 12 /* I/O Privilege Level (2 bits) */ |
#define X86_EFLAGS_IOPL (_AC(3,UL) << X86_EFLAGS_IOPL_BIT) |
#define X86_EFLAGS_NT_BIT 14 /* Nested Task */ |
#define X86_EFLAGS_NT _BITUL(X86_EFLAGS_NT_BIT) |
#define X86_EFLAGS_RF_BIT 16 /* Resume Flag */ |
#define X86_EFLAGS_RF _BITUL(X86_EFLAGS_RF_BIT) |
#define X86_EFLAGS_VM_BIT 17 /* Virtual Mode */ |
#define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT) |
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ |
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) |
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ |
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) |
#define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */ |
#define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT) |
#define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */ |
#define X86_EFLAGS_VIP _BITUL(X86_EFLAGS_VIP_BIT) |
#define X86_EFLAGS_ID_BIT 21 /* CPUID detection */ |
#define X86_EFLAGS_ID _BITUL(X86_EFLAGS_ID_BIT) |
/* |
* Basic CPU control in CR0 |
*/ |
#define X86_CR0_PE_BIT 0 /* Protection Enable */ |
#define X86_CR0_PE _BITUL(X86_CR0_PE_BIT) |
#define X86_CR0_MP_BIT 1 /* Monitor Coprocessor */ |
#define X86_CR0_MP _BITUL(X86_CR0_MP_BIT) |
#define X86_CR0_EM_BIT 2 /* Emulation */ |
#define X86_CR0_EM _BITUL(X86_CR0_EM_BIT) |
#define X86_CR0_TS_BIT 3 /* Task Switched */ |
#define X86_CR0_TS _BITUL(X86_CR0_TS_BIT) |
#define X86_CR0_ET_BIT 4 /* Extension Type */ |
#define X86_CR0_ET _BITUL(X86_CR0_ET_BIT) |
#define X86_CR0_NE_BIT 5 /* Numeric Error */ |
#define X86_CR0_NE _BITUL(X86_CR0_NE_BIT) |
#define X86_CR0_WP_BIT 16 /* Write Protect */ |
#define X86_CR0_WP _BITUL(X86_CR0_WP_BIT) |
#define X86_CR0_AM_BIT 18 /* Alignment Mask */ |
#define X86_CR0_AM _BITUL(X86_CR0_AM_BIT) |
#define X86_CR0_NW_BIT 29 /* Not Write-through */ |
#define X86_CR0_NW _BITUL(X86_CR0_NW_BIT) |
#define X86_CR0_CD_BIT 30 /* Cache Disable */ |
#define X86_CR0_CD _BITUL(X86_CR0_CD_BIT) |
#define X86_CR0_PG_BIT 31 /* Paging */ |
#define X86_CR0_PG _BITUL(X86_CR0_PG_BIT) |
/* |
* Paging options in CR3 |
*/ |
#define X86_CR3_PWT_BIT 3 /* Page Write Through */ |
#define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) |
#define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ |
#define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) |
#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */ |
/* |
* Intel CPU features in CR4 |
*/ |
#define X86_CR4_VME_BIT 0 /* enable vm86 extensions */ |
#define X86_CR4_VME _BITUL(X86_CR4_VME_BIT) |
#define X86_CR4_PVI_BIT 1 /* virtual interrupts flag enable */ |
#define X86_CR4_PVI _BITUL(X86_CR4_PVI_BIT) |
#define X86_CR4_TSD_BIT 2 /* disable time stamp at ipl 3 */ |
#define X86_CR4_TSD _BITUL(X86_CR4_TSD_BIT) |
#define X86_CR4_DE_BIT 3 /* enable debugging extensions */ |
#define X86_CR4_DE _BITUL(X86_CR4_DE_BIT) |
#define X86_CR4_PSE_BIT 4 /* enable page size extensions */ |
#define X86_CR4_PSE _BITUL(X86_CR4_PSE_BIT) |
#define X86_CR4_PAE_BIT 5 /* enable physical address extensions */ |
#define X86_CR4_PAE _BITUL(X86_CR4_PAE_BIT) |
#define X86_CR4_MCE_BIT 6 /* Machine check enable */ |
#define X86_CR4_MCE _BITUL(X86_CR4_MCE_BIT) |
#define X86_CR4_PGE_BIT 7 /* enable global pages */ |
#define X86_CR4_PGE _BITUL(X86_CR4_PGE_BIT) |
#define X86_CR4_PCE_BIT 8 /* enable performance counters at ipl 3 */ |
#define X86_CR4_PCE _BITUL(X86_CR4_PCE_BIT) |
#define X86_CR4_OSFXSR_BIT 9 /* enable fast FPU save and restore */ |
#define X86_CR4_OSFXSR _BITUL(X86_CR4_OSFXSR_BIT) |
#define X86_CR4_OSXMMEXCPT_BIT 10 /* enable unmasked SSE exceptions */ |
#define X86_CR4_OSXMMEXCPT _BITUL(X86_CR4_OSXMMEXCPT_BIT) |
#define X86_CR4_VMXE_BIT 13 /* enable VMX virtualization */ |
#define X86_CR4_VMXE _BITUL(X86_CR4_VMXE_BIT) |
#define X86_CR4_SMXE_BIT 14 /* enable safer mode (TXT) */ |
#define X86_CR4_SMXE _BITUL(X86_CR4_SMXE_BIT) |
#define X86_CR4_FSGSBASE_BIT 16 /* enable RDWRFSGS support */ |
#define X86_CR4_FSGSBASE _BITUL(X86_CR4_FSGSBASE_BIT) |
#define X86_CR4_PCIDE_BIT 17 /* enable PCID support */ |
#define X86_CR4_PCIDE _BITUL(X86_CR4_PCIDE_BIT) |
#define X86_CR4_OSXSAVE_BIT 18 /* enable xsave and xrestore */ |
#define X86_CR4_OSXSAVE _BITUL(X86_CR4_OSXSAVE_BIT) |
#define X86_CR4_SMEP_BIT 20 /* enable SMEP support */ |
#define X86_CR4_SMEP _BITUL(X86_CR4_SMEP_BIT) |
#define X86_CR4_SMAP_BIT 21 /* enable SMAP support */ |
#define X86_CR4_SMAP _BITUL(X86_CR4_SMAP_BIT) |
/* |
* x86-64 Task Priority Register, CR8 |
*/ |
#define X86_CR8_TPR _AC(0x0000000f,UL) /* task priority register */ |
/* |
* AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h> |
*/ |
/* |
* NSC/Cyrix CPU configuration register indexes |
*/ |
#define CX86_PCR0 0x20 |
#define CX86_GCR 0xb8 |
#define CX86_CCR0 0xc0 |
#define CX86_CCR1 0xc1 |
#define CX86_CCR2 0xc2 |
#define CX86_CCR3 0xc3 |
#define CX86_CCR4 0xe8 |
#define CX86_CCR5 0xe9 |
#define CX86_CCR6 0xea |
#define CX86_CCR7 0xeb |
#define CX86_PCR1 0xf0 |
#define CX86_DIR0 0xfe |
#define CX86_DIR1 0xff |
#define CX86_ARR_BASE 0xc4 |
#define CX86_RCR_BASE 0xdc |
#endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */ |
/drivers/include/uapi/asm/ptrace.h |
---|
0,0 → 1,262 |
#ifndef _ASM_X86_PTRACE_H |
#define _ASM_X86_PTRACE_H |
#include <asm/segment.h> |
#include <asm/page_types.h> |
#include <uapi/asm/ptrace.h> |
#ifndef __ASSEMBLY__ |
#ifdef __i386__ |
struct pt_regs { |
unsigned long bx; |
unsigned long cx; |
unsigned long dx; |
unsigned long si; |
unsigned long di; |
unsigned long bp; |
unsigned long ax; |
unsigned long ds; |
unsigned long es; |
unsigned long fs; |
unsigned long gs; |
unsigned long orig_ax; |
unsigned long ip; |
unsigned long cs; |
unsigned long flags; |
unsigned long sp; |
unsigned long ss; |
}; |
#else /* __i386__ */ |
struct pt_regs { |
unsigned long r15; |
unsigned long r14; |
unsigned long r13; |
unsigned long r12; |
unsigned long bp; |
unsigned long bx; |
/* arguments: non interrupts/non tracing syscalls only save up to here*/ |
unsigned long r11; |
unsigned long r10; |
unsigned long r9; |
unsigned long r8; |
unsigned long ax; |
unsigned long cx; |
unsigned long dx; |
unsigned long si; |
unsigned long di; |
unsigned long orig_ax; |
/* end of arguments */ |
/* cpu exception frame or undefined */ |
unsigned long ip; |
unsigned long cs; |
unsigned long flags; |
unsigned long sp; |
unsigned long ss; |
/* top of stack page */ |
}; |
#endif /* !__i386__ */ |
#ifdef CONFIG_PARAVIRT |
#include <asm/paravirt_types.h> |
#endif |
struct cpuinfo_x86; |
struct task_struct; |
extern unsigned long profile_pc(struct pt_regs *regs); |
#define profile_pc profile_pc |
extern unsigned long |
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); |
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, |
int error_code, int si_code); |
extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch); |
extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch, |
unsigned long phase1_result); |
extern long syscall_trace_enter(struct pt_regs *); |
extern void syscall_trace_leave(struct pt_regs *); |
static inline unsigned long regs_return_value(struct pt_regs *regs) |
{ |
return regs->ax; |
} |
/* |
* user_mode_vm(regs) determines whether a register set came from user mode. |
* This is true if V8086 mode was enabled OR if the register set was from |
* protected mode with RPL-3 CS value. This tricky test checks that with |
* one comparison. Many places in the kernel can bypass this full check |
* if they have already ruled out V8086 mode, so user_mode(regs) can be used. |
*/ |
static inline int user_mode(struct pt_regs *regs) |
{ |
#ifdef CONFIG_X86_32 |
return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; |
#else |
return !!(regs->cs & 3); |
#endif |
} |
static inline int user_mode_vm(struct pt_regs *regs) |
{ |
#ifdef CONFIG_X86_32 |
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= |
USER_RPL; |
#else |
return user_mode(regs); |
#endif |
} |
static inline int v8086_mode(struct pt_regs *regs) |
{ |
#ifdef CONFIG_X86_32 |
return (regs->flags & X86_VM_MASK); |
#else |
return 0; /* No V86 mode support in long mode */ |
#endif |
} |
#ifdef CONFIG_X86_64 |
static inline bool user_64bit_mode(struct pt_regs *regs) |
{ |
#ifndef CONFIG_PARAVIRT |
/* |
* On non-paravirt systems, this is the only long mode CPL 3 |
* selector. We do not allow long mode selectors in the LDT. |
*/ |
return regs->cs == __USER_CS; |
#else |
/* Headers are too twisted for this to go in paravirt.h. */ |
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; |
#endif |
} |
#define current_user_stack_pointer() this_cpu_read(old_rsp) |
/* ia32 vs. x32 difference */ |
#define compat_user_stack_pointer() \ |
(test_thread_flag(TIF_IA32) \ |
? current_pt_regs()->sp \ |
: this_cpu_read(old_rsp)) |
#endif |
#ifdef CONFIG_X86_32 |
extern unsigned long kernel_stack_pointer(struct pt_regs *regs); |
#else |
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) |
{ |
return regs->sp; |
} |
#endif |
#define GET_IP(regs) ((regs)->ip) |
#define GET_FP(regs) ((regs)->bp) |
#define GET_USP(regs) ((regs)->sp) |
#include <asm-generic/ptrace.h> |
/* Query offset/name of register from its name/offset */ |
extern int regs_query_register_offset(const char *name); |
extern const char *regs_query_register_name(unsigned int offset); |
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) |
/** |
* regs_get_register() - get register value from its offset |
* @regs: pt_regs from which register value is gotten. |
* @offset: offset number of the register. |
* |
* regs_get_register returns the value of a register. The @offset is the |
* offset of the register in struct pt_regs address which specified by @regs. |
* If @offset is bigger than MAX_REG_OFFSET, this returns 0. |
*/ |
static inline unsigned long regs_get_register(struct pt_regs *regs, |
unsigned int offset) |
{ |
if (unlikely(offset > MAX_REG_OFFSET)) |
return 0; |
#ifdef CONFIG_X86_32 |
/* |
* Traps from the kernel do not save sp and ss. |
* Use the helper function to retrieve sp. |
*/ |
if (offset == offsetof(struct pt_regs, sp) && |
regs->cs == __KERNEL_CS) |
return kernel_stack_pointer(regs); |
#endif |
return *(unsigned long *)((unsigned long)regs + offset); |
} |
/** |
* regs_within_kernel_stack() - check the address in the stack |
* @regs: pt_regs which contains kernel stack pointer. |
* @addr: address which is checked. |
* |
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s). |
* If @addr is within the kernel stack, it returns true. If not, returns false. |
*/ |
static inline int regs_within_kernel_stack(struct pt_regs *regs, |
unsigned long addr) |
{ |
return ((addr & ~(THREAD_SIZE - 1)) == |
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); |
} |
/** |
* regs_get_kernel_stack_nth() - get Nth entry of the stack |
* @regs: pt_regs which contains kernel stack pointer. |
* @n: stack entry number. |
* |
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which |
* is specified by @regs. If the @n th entry is NOT in the kernel stack, |
* this returns 0. |
*/ |
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, |
unsigned int n) |
{ |
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); |
addr += n; |
if (regs_within_kernel_stack(regs, (unsigned long)addr)) |
return *addr; |
else |
return 0; |
} |
#define arch_has_single_step() (1) |
#ifdef CONFIG_X86_DEBUGCTLMSR |
#define arch_has_block_step() (1) |
#else |
#define arch_has_block_step() (boot_cpu_data.x86 >= 6) |
#endif |
#define ARCH_HAS_USER_SINGLE_STEP_INFO |
/* |
* When hitting ptrace_stop(), we cannot return using SYSRET because |
* that does not restore the full CPU state, only a minimal set. The |
* ptracer can change arbitrary register values, which is usually okay |
* because the usual ptrace stops run off the signal delivery path which |
* forces IRET; however, ptrace_event() stops happen in arbitrary places |
* in the kernel and don't force IRET path. |
* |
* So force IRET path after a ptrace stop. |
*/ |
#define arch_ptrace_stop_needed(code, info) \ |
({ \ |
set_thread_flag(TIF_NOTIFY_RESUME); \ |
false; \ |
}) |
struct user_desc; |
extern int do_get_thread_area(struct task_struct *p, int idx, |
struct user_desc __user *info); |
extern int do_set_thread_area(struct task_struct *p, int idx, |
struct user_desc __user *info, int can_allocate); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PTRACE_H */ |
/drivers/include/uapi/asm/segment.h |
---|
0,0 → 1,265 |
#ifndef _ASM_X86_SEGMENT_H |
#define _ASM_X86_SEGMENT_H |
#include <linux/const.h> |
/* Constructor for a conventional segment GDT (or LDT) entry */ |
/* This is a macro so it can be used in initializers */ |
#define GDT_ENTRY(flags, base, limit) \ |
((((base) & _AC(0xff000000,ULL)) << (56-24)) | \ |
(((flags) & _AC(0x0000f0ff,ULL)) << 40) | \ |
(((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \ |
(((base) & _AC(0x00ffffff,ULL)) << 16) | \ |
(((limit) & _AC(0x0000ffff,ULL)))) |
/* Simple and small GDT entries for booting only */ |
#define GDT_ENTRY_BOOT_CS 2 |
#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) |
#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) |
#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) |
#define GDT_ENTRY_BOOT_TSS (GDT_ENTRY_BOOT_CS + 2) |
#define __BOOT_TSS (GDT_ENTRY_BOOT_TSS * 8) |
#define SEGMENT_RPL_MASK 0x3 /* |
* Bottom two bits of selector give the ring |
* privilege level |
*/ |
#define SEGMENT_TI_MASK 0x4 /* Bit 2 is table indicator (LDT/GDT) */ |
#define USER_RPL 0x3 /* User mode is privilege level 3 */ |
#define SEGMENT_LDT 0x4 /* LDT segment has TI set... */ |
#define SEGMENT_GDT 0x0 /* ... GDT has it cleared */ |
#ifdef CONFIG_X86_32 |
/* |
* The layout of the per-CPU GDT under Linux: |
* |
* 0 - null |
* 1 - reserved |
* 2 - reserved |
* 3 - reserved |
* |
* 4 - unused <==== new cacheline |
* 5 - unused |
* |
* ------- start of TLS (Thread-Local Storage) segments: |
* |
* 6 - TLS segment #1 [ glibc's TLS segment ] |
* 7 - TLS segment #2 [ Wine's %fs Win32 segment ] |
* 8 - TLS segment #3 |
* 9 - reserved |
* 10 - reserved |
* 11 - reserved |
* |
* ------- start of kernel segments: |
* |
* 12 - kernel code segment <==== new cacheline |
* 13 - kernel data segment |
* 14 - default user CS |
* 15 - default user DS |
* 16 - TSS |
* 17 - LDT |
* 18 - PNPBIOS support (16->32 gate) |
* 19 - PNPBIOS support |
* 20 - PNPBIOS support |
* 21 - PNPBIOS support |
* 22 - PNPBIOS support |
* 23 - APM BIOS support |
* 24 - APM BIOS support |
* 25 - APM BIOS support |
* |
* 26 - ESPFIX small SS |
* 27 - per-cpu [ offset to per-cpu data area ] |
* 28 - stack_canary-20 [ for stack protector ] |
* 29 - unused |
* 30 - unused |
* 31 - TSS for double fault handler |
*/ |
#define GDT_ENTRY_TLS_MIN 6 |
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) |
#define GDT_ENTRY_DEFAULT_USER_CS 14 |
#define GDT_ENTRY_DEFAULT_USER_DS 15 |
#define GDT_ENTRY_KERNEL_BASE (12) |
#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) |
#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) |
#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) |
#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE+5) |
#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE+6) |
#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE+11) |
#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE+14) |
#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8) |
#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE+15) |
#ifdef CONFIG_SMP |
#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) |
#else |
#define __KERNEL_PERCPU 0 |
#endif |
#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE+16) |
#ifdef CONFIG_CC_STACKPROTECTOR |
#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) |
#else |
#define __KERNEL_STACK_CANARY 0 |
#endif |
#define GDT_ENTRY_DOUBLEFAULT_TSS 31 |
/* |
* The GDT has 32 entries |
*/ |
#define GDT_ENTRIES 32 |
/* The PnP BIOS entries in the GDT */ |
#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) |
#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) |
#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) |
#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) |
#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) |
/* The PnP BIOS selectors */ |
#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ |
#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ |
#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ |
#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ |
#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ |
/* |
* Matching rules for certain types of segments. |
*/ |
/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ |
#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) |
#else |
#include <asm/cache.h> |
#define GDT_ENTRY_KERNEL32_CS 1 |
#define GDT_ENTRY_KERNEL_CS 2 |
#define GDT_ENTRY_KERNEL_DS 3 |
#define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS * 8) |
/* |
* we cannot use the same code segment descriptor for user and kernel |
* -- not even in the long flat mode, because of different DPL /kkeil |
* The segment offset needs to contain a RPL. Grr. -AK |
* GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) |
*/ |
#define GDT_ENTRY_DEFAULT_USER32_CS 4 |
#define GDT_ENTRY_DEFAULT_USER_DS 5 |
#define GDT_ENTRY_DEFAULT_USER_CS 6 |
#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3) |
#define __USER32_DS __USER_DS |
#define GDT_ENTRY_TSS 8 /* needs two entries */ |
#define GDT_ENTRY_LDT 10 /* needs two entries */ |
#define GDT_ENTRY_TLS_MIN 12 |
#define GDT_ENTRY_TLS_MAX 14 |
#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */ |
#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3) |
/* TLS indexes for 64bit - hardcoded in arch_prctl */ |
#define FS_TLS 0 |
#define GS_TLS 1 |
#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3) |
#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3) |
#define GDT_ENTRIES 16 |
#endif |
#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) |
#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) |
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) |
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) |
#ifndef CONFIG_PARAVIRT |
#define get_kernel_rpl() 0 |
#endif |
#define IDT_ENTRIES 256 |
#define NUM_EXCEPTION_VECTORS 32 |
/* Bitmask of exception vectors which push an error code on the stack */ |
#define EXCEPTION_ERRCODE_MASK 0x00027d00 |
#define GDT_SIZE (GDT_ENTRIES * 8) |
#define GDT_ENTRY_TLS_ENTRIES 3 |
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; |
#ifdef CONFIG_TRACING |
#define trace_early_idt_handlers early_idt_handlers |
#endif |
/* |
* Load a segment. Fall back on loading the zero |
* segment if something goes wrong.. |
*/ |
#define loadsegment(seg, value) \ |
do { \ |
unsigned short __val = (value); \ |
\ |
asm volatile(" \n" \ |
"1: movl %k0,%%" #seg " \n" \ |
\ |
".section .fixup,\"ax\" \n" \ |
"2: xorl %k0,%k0 \n" \ |
" jmp 1b \n" \ |
".previous \n" \ |
\ |
_ASM_EXTABLE(1b, 2b) \ |
\ |
: "+r" (__val) : : "memory"); \ |
} while (0) |
/* |
* Save a segment register away |
*/ |
#define savesegment(seg, value) \ |
asm("mov %%" #seg ",%0":"=r" (value) : : "memory") |
/* |
* x86_32 user gs accessors. |
*/ |
#ifdef CONFIG_X86_32 |
#ifdef CONFIG_X86_32_LAZY_GS |
#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) |
#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) |
#define task_user_gs(tsk) ((tsk)->thread.gs) |
#define lazy_save_gs(v) savesegment(gs, (v)) |
#define lazy_load_gs(v) loadsegment(gs, (v)) |
#else /* X86_32_LAZY_GS */ |
#define get_user_gs(regs) (u16)((regs)->gs) |
#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) |
#define task_user_gs(tsk) (task_pt_regs(tsk)->gs) |
#define lazy_save_gs(v) do { } while (0) |
#define lazy_load_gs(v) do { } while (0) |
#endif /* X86_32_LAZY_GS */ |
#endif /* X86_32 */ |
static inline unsigned long get_limit(unsigned long segment) |
{ |
unsigned long __limit; |
asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); |
return __limit + 1; |
} |
#endif /* !__ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_SEGMENT_H */ |
/drivers/include/uapi/asm/sigcontext.h |
---|
0,0 → 1,221 |
#ifndef _UAPI_ASM_X86_SIGCONTEXT_H |
#define _UAPI_ASM_X86_SIGCONTEXT_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
#define FP_XSTATE_MAGIC1 0x46505853U |
#define FP_XSTATE_MAGIC2 0x46505845U |
#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2) |
/* |
* bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame |
* are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes |
* are used to extended the fpstate pointer in the sigcontext, which now |
* includes the extended state information along with fpstate information. |
* |
* Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved |
* area and FP_XSTATE_MAGIC2 at the end of memory layout |
* (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the |
* extended state information in the memory layout pointed by the fpstate |
* pointer in sigcontext. |
*/ |
struct _fpx_sw_bytes { |
__u32 magic1; /* FP_XSTATE_MAGIC1 */ |
__u32 extended_size; /* total size of the layout referred by |
* fpstate pointer in the sigcontext. |
*/ |
__u64 xstate_bv; |
/* feature bit mask (including fp/sse/extended |
* state) that is present in the memory |
* layout. |
*/ |
__u32 xstate_size; /* actual xsave state size, based on the |
* features saved in the layout. |
* 'extended_size' will be greater than |
* 'xstate_size'. |
*/ |
__u32 padding[7]; /* for future use. */ |
}; |
#ifdef __i386__ |
/* |
* As documented in the iBCS2 standard.. |
* |
* The first part of "struct _fpstate" is just the normal i387 |
* hardware setup, the extra "status" word is used to save the |
* coprocessor status word before entering the handler. |
* |
* Pentium III FXSR, SSE support |
* Gareth Hughes <gareth@valinux.com>, May 2000 |
* |
* The FPU state data structure has had to grow to accommodate the |
* extended FPU state required by the Streaming SIMD Extensions. |
* There is no documented standard to accomplish this at the moment. |
*/ |
struct _fpreg { |
unsigned short significand[4]; |
unsigned short exponent; |
}; |
struct _fpxreg { |
unsigned short significand[4]; |
unsigned short exponent; |
unsigned short padding[3]; |
}; |
struct _xmmreg { |
unsigned long element[4]; |
}; |
struct _fpstate { |
/* Regular FPU environment */ |
unsigned long cw; |
unsigned long sw; |
unsigned long tag; |
unsigned long ipoff; |
unsigned long cssel; |
unsigned long dataoff; |
unsigned long datasel; |
struct _fpreg _st[8]; |
unsigned short status; |
unsigned short magic; /* 0xffff = regular FPU data only */ |
/* FXSR FPU environment */ |
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ |
unsigned long mxcsr; |
unsigned long reserved; |
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ |
struct _xmmreg _xmm[8]; |
unsigned long padding1[44]; |
union { |
unsigned long padding2[12]; |
struct _fpx_sw_bytes sw_reserved; /* represents the extended |
* state info */ |
}; |
}; |
#define X86_FXSR_MAGIC 0x0000 |
#ifndef __KERNEL__ |
/* |
* User-space might still rely on the old definition: |
*/ |
struct sigcontext { |
unsigned short gs, __gsh; |
unsigned short fs, __fsh; |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned long edi; |
unsigned long esi; |
unsigned long ebp; |
unsigned long esp; |
unsigned long ebx; |
unsigned long edx; |
unsigned long ecx; |
unsigned long eax; |
unsigned long trapno; |
unsigned long err; |
unsigned long eip; |
unsigned short cs, __csh; |
unsigned long eflags; |
unsigned long esp_at_signal; |
unsigned short ss, __ssh; |
struct _fpstate __user *fpstate; |
unsigned long oldmask; |
unsigned long cr2; |
}; |
#endif /* !__KERNEL__ */ |
#else /* __i386__ */ |
/* FXSAVE frame */ |
/* Note: reserved1/2 may someday contain valuable data. Always save/restore |
them when you change signal frames. */ |
struct _fpstate { |
__u16 cwd; |
__u16 swd; |
__u16 twd; /* Note this is not the same as the |
32bit/x87/FSAVE twd */ |
__u16 fop; |
__u64 rip; |
__u64 rdp; |
__u32 mxcsr; |
__u32 mxcsr_mask; |
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */ |
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ |
__u32 reserved2[12]; |
union { |
__u32 reserved3[12]; |
struct _fpx_sw_bytes sw_reserved; /* represents the extended |
* state information */ |
}; |
}; |
#ifndef __KERNEL__ |
/* |
* User-space might still rely on the old definition: |
*/ |
struct sigcontext { |
__u64 r8; |
__u64 r9; |
__u64 r10; |
__u64 r11; |
__u64 r12; |
__u64 r13; |
__u64 r14; |
__u64 r15; |
__u64 rdi; |
__u64 rsi; |
__u64 rbp; |
__u64 rbx; |
__u64 rdx; |
__u64 rax; |
__u64 rcx; |
__u64 rsp; |
__u64 rip; |
__u64 eflags; /* RFLAGS */ |
__u16 cs; |
__u16 gs; |
__u16 fs; |
__u16 __pad0; |
__u64 err; |
__u64 trapno; |
__u64 oldmask; |
__u64 cr2; |
struct _fpstate __user *fpstate; /* zero when no FPU context */ |
#ifdef __ILP32__ |
__u32 __fpstate_pad; |
#endif |
__u64 reserved1[8]; |
}; |
#endif /* !__KERNEL__ */ |
#endif /* !__i386__ */ |
struct _xsave_hdr { |
__u64 xstate_bv; |
__u64 reserved1[2]; |
__u64 reserved2[5]; |
}; |
struct _ymmh_state { |
/* 16 * 16 bytes for each YMMH-reg */ |
__u32 ymmh_space[64]; |
}; |
/* |
* Extended state pointed by the fpstate pointer in the sigcontext. |
* In addition to the fpstate, information encoded in the xstate_hdr |
* indicates the presence of other extended state information |
* supported by the processor and OS. |
*/ |
struct _xstate { |
struct _fpstate fpstate; |
struct _xsave_hdr xstate_hdr; |
struct _ymmh_state ymmh; |
/* new processor state extensions go here */ |
}; |
#endif /* _UAPI_ASM_X86_SIGCONTEXT_H */ |
/drivers/include/uapi/asm/vm86.h |
---|
0,0 → 1,129 |
#ifndef _UAPI_ASM_X86_VM86_H |
#define _UAPI_ASM_X86_VM86_H |
/* |
* I'm guessing at the VIF/VIP flag usage, but hope that this is how |
* the Pentium uses them. Linux will return from vm86 mode when both |
* VIF and VIP is set. |
* |
* On a Pentium, we could probably optimize the virtual flags directly |
* in the eflags register instead of doing it "by hand" in vflags... |
* |
* Linus |
*/ |
#include <asm/processor-flags.h> |
#define BIOSSEG 0x0f000 |
#define CPU_086 0 |
#define CPU_186 1 |
#define CPU_286 2 |
#define CPU_386 3 |
#define CPU_486 4 |
#define CPU_586 5 |
/* |
* Return values for the 'vm86()' system call |
*/ |
#define VM86_TYPE(retval) ((retval) & 0xff) |
#define VM86_ARG(retval) ((retval) >> 8) |
#define VM86_SIGNAL 0 /* return due to signal */ |
#define VM86_UNKNOWN 1 /* unhandled GP fault |
- IO-instruction or similar */ |
#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ |
#define VM86_STI 3 /* sti/popf/iret instruction enabled |
virtual interrupts */ |
/* |
* Additional return values when invoking new vm86() |
*/ |
#define VM86_PICRETURN 4 /* return due to pending PIC request */ |
#define VM86_TRAP 6 /* return due to DOS-debugger request */ |
/* |
* function codes when invoking new vm86() |
*/ |
#define VM86_PLUS_INSTALL_CHECK 0 |
#define VM86_ENTER 1 |
#define VM86_ENTER_NO_BYPASS 2 |
#define VM86_REQUEST_IRQ 3 |
#define VM86_FREE_IRQ 4 |
#define VM86_GET_IRQ_BITS 5 |
#define VM86_GET_AND_RESET_IRQ 6 |
/* |
* This is the stack-layout seen by the user space program when we have |
* done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout |
* is 'kernel_vm86_regs' (see below). |
*/ |
struct vm86_regs { |
/* |
* normal regs, with special meaning for the segment descriptors.. |
*/ |
long ebx; |
long ecx; |
long edx; |
long esi; |
long edi; |
long ebp; |
long eax; |
long __null_ds; |
long __null_es; |
long __null_fs; |
long __null_gs; |
long orig_eax; |
long eip; |
unsigned short cs, __csh; |
long eflags; |
long esp; |
unsigned short ss, __ssh; |
/* |
* these are specific to v86 mode: |
*/ |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned short fs, __fsh; |
unsigned short gs, __gsh; |
}; |
struct revectored_struct { |
unsigned long __map[8]; /* 256 bits */ |
}; |
struct vm86_struct { |
struct vm86_regs regs; |
unsigned long flags; |
unsigned long screen_bitmap; |
unsigned long cpu_type; |
struct revectored_struct int_revectored; |
struct revectored_struct int21_revectored; |
}; |
/* |
* flags masks |
*/ |
#define VM86_SCREEN_BITMAP 0x0001 |
struct vm86plus_info_struct { |
unsigned long force_return_for_pic:1; |
unsigned long vm86dbg_active:1; /* for debugger */ |
unsigned long vm86dbg_TFpendig:1; /* for debugger */ |
unsigned long unused:28; |
unsigned long is_vm86pus:1; /* for vm86 internal use */ |
unsigned char vm86dbg_intxxtab[32]; /* for debugger */ |
}; |
struct vm86plus_struct { |
struct vm86_regs regs; |
unsigned long flags; |
unsigned long screen_bitmap; |
unsigned long cpu_type; |
struct revectored_struct int_revectored; |
struct revectored_struct int21_revectored; |
struct vm86plus_info_struct vm86plus; |
}; |
#endif /* _UAPI_ASM_X86_VM86_H */ |