/drivers/include/uapi/asm/e820.h |
---|
0,0 → 1,70 |
#ifndef _UAPI_ASM_X86_E820_H |
#define _UAPI_ASM_X86_E820_H |
#define E820MAP 0x2d0 /* our map */ |
#define E820MAX 128 /* number of entries in E820MAP */ |
/* |
* Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the |
* constrained space in the zeropage. If we have more nodes than |
* that, and if we've booted off EFI firmware, then the EFI tables |
* passed us from the EFI firmware can list more nodes. Size our |
* internal memory map tables to have room for these additional |
* nodes, based on up to three entries per node for which the |
* kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT), |
* plus E820MAX, allowing space for the possible duplicate E820 |
* entries that might need room in the same arrays, prior to the |
* call to sanitize_e820_map() to remove duplicates. The allowance |
* of three memory map entries per node is "enough" entries for |
* the initial hardware platform motivating this mechanism to make |
* use of additional EFI map entries. Future platforms may want |
* to allow more than three entries per node or otherwise refine |
* this size. |
*/ |
#ifndef __KERNEL__ |
#define E820_X_MAX E820MAX |
#endif |
#define E820NR 0x1e8 /* # entries in E820MAP */ |
#define E820_RAM 1 |
#define E820_RESERVED 2 |
#define E820_ACPI 3 |
#define E820_NVS 4 |
#define E820_UNUSABLE 5 |
/* |
* reserved RAM used by kernel itself |
* if CONFIG_INTEL_TXT is enabled, memory of this type will be |
* included in the S3 integrity calculation and so should not include |
* any memory that BIOS might alter over the S3 transition |
*/ |
#define E820_RESERVED_KERN 128 |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
struct e820entry { |
__u64 addr; /* start of memory segment */ |
__u64 size; /* size of memory segment */ |
__u32 type; /* type of memory segment */ |
} __attribute__((packed)); |
struct e820map { |
__u32 nr_map; |
struct e820entry map[E820_X_MAX]; |
}; |
#define ISA_START_ADDRESS 0xa0000 |
#define ISA_END_ADDRESS 0x100000 |
#define BIOS_BEGIN 0x000a0000 |
#define BIOS_END 0x00100000 |
#define BIOS_ROM_BASE 0xffe00000 |
#define BIOS_ROM_END 0xffffffff |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_X86_E820_H */ |
/drivers/include/uapi/asm/errno.h |
---|
0,0 → 1,0 |
#include <asm-generic/errno.h> |
/drivers/include/uapi/asm/ioctl.h |
---|
0,0 → 1,0 |
#include <asm-generic/ioctl.h> |
/drivers/include/uapi/asm/msr-index.h |
---|
0,0 → 1,624 |
#ifndef _ASM_X86_MSR_INDEX_H |
#define _ASM_X86_MSR_INDEX_H |
/* CPU model specific register (MSR) numbers */ |
/* x86-64 specific MSRs */ |
#define MSR_EFER 0xc0000080 /* extended feature register */ |
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ |
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ |
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ |
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ |
#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ |
#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ |
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ |
#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */ |
/* EFER bits: */ |
#define _EFER_SCE 0 /* SYSCALL/SYSRET */ |
#define _EFER_LME 8 /* Long mode enable */ |
#define _EFER_LMA 10 /* Long mode active (read-only) */ |
#define _EFER_NX 11 /* No execute enable */ |
#define _EFER_SVME 12 /* Enable virtualization */ |
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */ |
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ |
#define EFER_SCE (1<<_EFER_SCE) |
#define EFER_LME (1<<_EFER_LME) |
#define EFER_LMA (1<<_EFER_LMA) |
#define EFER_NX (1<<_EFER_NX) |
#define EFER_SVME (1<<_EFER_SVME) |
#define EFER_LMSLE (1<<_EFER_LMSLE) |
#define EFER_FFXSR (1<<_EFER_FFXSR) |
/* Intel MSRs. Some also available on other CPUs */ |
#define MSR_IA32_PERFCTR0 0x000000c1 |
#define MSR_IA32_PERFCTR1 0x000000c2 |
#define MSR_FSB_FREQ 0x000000cd |
#define MSR_NHM_PLATFORM_INFO 0x000000ce |
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 |
#define NHM_C3_AUTO_DEMOTE (1UL << 25) |
#define NHM_C1_AUTO_DEMOTE (1UL << 26) |
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) |
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) |
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) |
#define MSR_PLATFORM_INFO 0x000000ce |
#define MSR_MTRRcap 0x000000fe |
#define MSR_IA32_BBL_CR_CTL 0x00000119 |
#define MSR_IA32_BBL_CR_CTL3 0x0000011e |
#define MSR_IA32_SYSENTER_CS 0x00000174 |
#define MSR_IA32_SYSENTER_ESP 0x00000175 |
#define MSR_IA32_SYSENTER_EIP 0x00000176 |
#define MSR_IA32_MCG_CAP 0x00000179 |
#define MSR_IA32_MCG_STATUS 0x0000017a |
#define MSR_IA32_MCG_CTL 0x0000017b |
#define MSR_OFFCORE_RSP_0 0x000001a6 |
#define MSR_OFFCORE_RSP_1 0x000001a7 |
#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad |
#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae |
#define MSR_LBR_SELECT 0x000001c8 |
#define MSR_LBR_TOS 0x000001c9 |
#define MSR_LBR_NHM_FROM 0x00000680 |
#define MSR_LBR_NHM_TO 0x000006c0 |
#define MSR_LBR_CORE_FROM 0x00000040 |
#define MSR_LBR_CORE_TO 0x00000060 |
#define MSR_IA32_PEBS_ENABLE 0x000003f1 |
#define MSR_IA32_DS_AREA 0x00000600 |
#define MSR_IA32_PERF_CAPABILITIES 0x00000345 |
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 |
#define MSR_MTRRfix64K_00000 0x00000250 |
#define MSR_MTRRfix16K_80000 0x00000258 |
#define MSR_MTRRfix16K_A0000 0x00000259 |
#define MSR_MTRRfix4K_C0000 0x00000268 |
#define MSR_MTRRfix4K_C8000 0x00000269 |
#define MSR_MTRRfix4K_D0000 0x0000026a |
#define MSR_MTRRfix4K_D8000 0x0000026b |
#define MSR_MTRRfix4K_E0000 0x0000026c |
#define MSR_MTRRfix4K_E8000 0x0000026d |
#define MSR_MTRRfix4K_F0000 0x0000026e |
#define MSR_MTRRfix4K_F8000 0x0000026f |
#define MSR_MTRRdefType 0x000002ff |
#define MSR_IA32_CR_PAT 0x00000277 |
#define MSR_IA32_DEBUGCTLMSR 0x000001d9 |
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db |
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc |
#define MSR_IA32_LASTINTFROMIP 0x000001dd |
#define MSR_IA32_LASTINTTOIP 0x000001de |
/* DEBUGCTLMSR bits (others vary by model): */ |
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ |
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ |
#define DEBUGCTLMSR_TR (1UL << 6) |
#define DEBUGCTLMSR_BTS (1UL << 7) |
#define DEBUGCTLMSR_BTINT (1UL << 8) |
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9) |
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) |
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) |
#define MSR_IA32_POWER_CTL 0x000001fc |
#define MSR_IA32_MC0_CTL 0x00000400 |
#define MSR_IA32_MC0_STATUS 0x00000401 |
#define MSR_IA32_MC0_ADDR 0x00000402 |
#define MSR_IA32_MC0_MISC 0x00000403 |
/* C-state Residency Counters */ |
#define MSR_PKG_C3_RESIDENCY 0x000003f8 |
#define MSR_PKG_C6_RESIDENCY 0x000003f9 |
#define MSR_PKG_C7_RESIDENCY 0x000003fa |
#define MSR_CORE_C3_RESIDENCY 0x000003fc |
#define MSR_CORE_C6_RESIDENCY 0x000003fd |
#define MSR_CORE_C7_RESIDENCY 0x000003fe |
#define MSR_PKG_C2_RESIDENCY 0x0000060d |
#define MSR_PKG_C8_RESIDENCY 0x00000630 |
#define MSR_PKG_C9_RESIDENCY 0x00000631 |
#define MSR_PKG_C10_RESIDENCY 0x00000632 |
/* Run Time Average Power Limiting (RAPL) Interface */ |
#define MSR_RAPL_POWER_UNIT 0x00000606 |
#define MSR_PKG_POWER_LIMIT 0x00000610 |
#define MSR_PKG_ENERGY_STATUS 0x00000611 |
#define MSR_PKG_PERF_STATUS 0x00000613 |
#define MSR_PKG_POWER_INFO 0x00000614 |
#define MSR_DRAM_POWER_LIMIT 0x00000618 |
#define MSR_DRAM_ENERGY_STATUS 0x00000619 |
#define MSR_DRAM_PERF_STATUS 0x0000061b |
#define MSR_DRAM_POWER_INFO 0x0000061c |
#define MSR_PP0_POWER_LIMIT 0x00000638 |
#define MSR_PP0_ENERGY_STATUS 0x00000639 |
#define MSR_PP0_POLICY 0x0000063a |
#define MSR_PP0_PERF_STATUS 0x0000063b |
#define MSR_PP1_POWER_LIMIT 0x00000640 |
#define MSR_PP1_ENERGY_STATUS 0x00000641 |
#define MSR_PP1_POLICY 0x00000642 |
#define MSR_CORE_C1_RES 0x00000660 |
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 |
#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 |
/* Hardware P state interface */ |
#define MSR_PPERF 0x0000064e |
#define MSR_PERF_LIMIT_REASONS 0x0000064f |
#define MSR_PM_ENABLE 0x00000770 |
#define MSR_HWP_CAPABILITIES 0x00000771 |
#define MSR_HWP_REQUEST_PKG 0x00000772 |
#define MSR_HWP_INTERRUPT 0x00000773 |
#define MSR_HWP_REQUEST 0x00000774 |
#define MSR_HWP_STATUS 0x00000777 |
/* CPUID.6.EAX */ |
#define HWP_BASE_BIT (1<<7) |
#define HWP_NOTIFICATIONS_BIT (1<<8) |
#define HWP_ACTIVITY_WINDOW_BIT (1<<9) |
#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) |
#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) |
/* IA32_HWP_CAPABILITIES */ |
#define HWP_HIGHEST_PERF(x) (x & 0xff) |
#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8) |
#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16) |
#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24) |
/* IA32_HWP_REQUEST */ |
#define HWP_MIN_PERF(x) (x & 0xff) |
#define HWP_MAX_PERF(x) ((x & 0xff) << 8) |
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) |
#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24) |
#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32) |
#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42) |
/* IA32_HWP_STATUS */ |
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1) |
#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) |
/* IA32_HWP_INTERRUPT */ |
#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) |
#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) |
#define MSR_AMD64_MC0_MASK 0xc0010044 |
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) |
#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) |
#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) |
#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) |
#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) |
/* These are consecutive and not in the normal 4er MCE bank block */ |
#define MSR_IA32_MC0_CTL2 0x00000280 |
#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) |
#define MSR_P6_PERFCTR0 0x000000c1 |
#define MSR_P6_PERFCTR1 0x000000c2 |
#define MSR_P6_EVNTSEL0 0x00000186 |
#define MSR_P6_EVNTSEL1 0x00000187 |
#define MSR_KNC_PERFCTR0 0x00000020 |
#define MSR_KNC_PERFCTR1 0x00000021 |
#define MSR_KNC_EVNTSEL0 0x00000028 |
#define MSR_KNC_EVNTSEL1 0x00000029 |
/* Alternative perfctr range with full access. */ |
#define MSR_IA32_PMC0 0x000004c1 |
/* AMD64 MSRs. Not complete. See the architecture manual for a more |
complete list. */ |
#define MSR_AMD64_PATCH_LEVEL 0x0000008b |
#define MSR_AMD64_TSC_RATIO 0xc0000104 |
#define MSR_AMD64_NB_CFG 0xc001001f |
#define MSR_AMD64_PATCH_LOADER 0xc0010020 |
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 |
#define MSR_AMD64_OSVW_STATUS 0xc0010141 |
#define MSR_AMD64_LS_CFG 0xc0011020 |
#define MSR_AMD64_DC_CFG 0xc0011022 |
#define MSR_AMD64_BU_CFG2 0xc001102a |
#define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
#define MSR_AMD64_IBSFETCH_REG_COUNT 3 |
#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1) |
#define MSR_AMD64_IBSOPCTL 0xc0011033 |
#define MSR_AMD64_IBSOPRIP 0xc0011034 |
#define MSR_AMD64_IBSOPDATA 0xc0011035 |
#define MSR_AMD64_IBSOPDATA2 0xc0011036 |
#define MSR_AMD64_IBSOPDATA3 0xc0011037 |
#define MSR_AMD64_IBSDCLINAD 0xc0011038 |
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 |
#define MSR_AMD64_IBSOP_REG_COUNT 7 |
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) |
#define MSR_AMD64_IBSCTL 0xc001103a |
#define MSR_AMD64_IBSBRTARGET 0xc001103b |
#define MSR_AMD64_IBSOPDATA4 0xc001103d |
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ |
/* Fam 16h MSRs */ |
#define MSR_F16H_L2I_PERF_CTL 0xc0010230 |
#define MSR_F16H_L2I_PERF_CTR 0xc0010231 |
/* Fam 15h MSRs */ |
#define MSR_F15H_PERF_CTL 0xc0010200 |
#define MSR_F15H_PERF_CTR 0xc0010201 |
#define MSR_F15H_NB_PERF_CTL 0xc0010240 |
#define MSR_F15H_NB_PERF_CTR 0xc0010241 |
/* Fam 10h MSRs */ |
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 |
#define FAM10H_MMIO_CONF_ENABLE (1<<0) |
#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf |
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 |
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL |
#define FAM10H_MMIO_CONF_BASE_SHIFT 20 |
#define MSR_FAM10H_NODE_ID 0xc001100c |
/* K8 MSRs */ |
#define MSR_K8_TOP_MEM1 0xc001001a |
#define MSR_K8_TOP_MEM2 0xc001001d |
#define MSR_K8_SYSCFG 0xc0010010 |
#define MSR_K8_INT_PENDING_MSG 0xc0010055 |
/* C1E active bits in int pending message */ |
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000 |
#define MSR_K8_TSEG_ADDR 0xc0010112 |
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ |
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ |
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ |
/* K7 MSRs */ |
#define MSR_K7_EVNTSEL0 0xc0010000 |
#define MSR_K7_PERFCTR0 0xc0010004 |
#define MSR_K7_EVNTSEL1 0xc0010001 |
#define MSR_K7_PERFCTR1 0xc0010005 |
#define MSR_K7_EVNTSEL2 0xc0010002 |
#define MSR_K7_PERFCTR2 0xc0010006 |
#define MSR_K7_EVNTSEL3 0xc0010003 |
#define MSR_K7_PERFCTR3 0xc0010007 |
#define MSR_K7_CLK_CTL 0xc001001b |
#define MSR_K7_HWCR 0xc0010015 |
#define MSR_K7_FID_VID_CTL 0xc0010041 |
#define MSR_K7_FID_VID_STATUS 0xc0010042 |
/* K6 MSRs */ |
#define MSR_K6_WHCR 0xc0000082 |
#define MSR_K6_UWCCR 0xc0000085 |
#define MSR_K6_EPMR 0xc0000086 |
#define MSR_K6_PSOR 0xc0000087 |
#define MSR_K6_PFIR 0xc0000088 |
/* Centaur-Hauls/IDT defined MSRs. */ |
#define MSR_IDT_FCR1 0x00000107 |
#define MSR_IDT_FCR2 0x00000108 |
#define MSR_IDT_FCR3 0x00000109 |
#define MSR_IDT_FCR4 0x0000010a |
#define MSR_IDT_MCR0 0x00000110 |
#define MSR_IDT_MCR1 0x00000111 |
#define MSR_IDT_MCR2 0x00000112 |
#define MSR_IDT_MCR3 0x00000113 |
#define MSR_IDT_MCR4 0x00000114 |
#define MSR_IDT_MCR5 0x00000115 |
#define MSR_IDT_MCR6 0x00000116 |
#define MSR_IDT_MCR7 0x00000117 |
#define MSR_IDT_MCR_CTRL 0x00000120 |
/* VIA Cyrix defined MSRs*/ |
#define MSR_VIA_FCR 0x00001107 |
#define MSR_VIA_LONGHAUL 0x0000110a |
#define MSR_VIA_RNG 0x0000110b |
#define MSR_VIA_BCR2 0x00001147 |
/* Transmeta defined MSRs */ |
#define MSR_TMTA_LONGRUN_CTRL 0x80868010 |
#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 |
#define MSR_TMTA_LRTI_READOUT 0x80868018 |
#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a |
/* Intel defined MSRs. */ |
#define MSR_IA32_P5_MC_ADDR 0x00000000 |
#define MSR_IA32_P5_MC_TYPE 0x00000001 |
#define MSR_IA32_TSC 0x00000010 |
#define MSR_IA32_PLATFORM_ID 0x00000017 |
#define MSR_IA32_EBL_CR_POWERON 0x0000002a |
#define MSR_EBC_FREQUENCY_ID 0x0000002c |
#define MSR_SMI_COUNT 0x00000034 |
#define MSR_IA32_FEATURE_CONTROL 0x0000003a |
#define MSR_IA32_TSC_ADJUST 0x0000003b |
#define MSR_IA32_BNDCFGS 0x00000d90 |
#define MSR_IA32_XSS 0x00000da0 |
#define FEATURE_CONTROL_LOCKED (1<<0) |
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) |
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) |
#define MSR_IA32_APICBASE 0x0000001b |
#define MSR_IA32_APICBASE_BSP (1<<8) |
#define MSR_IA32_APICBASE_ENABLE (1<<11) |
#define MSR_IA32_APICBASE_BASE (0xfffff<<12) |
#define MSR_IA32_TSCDEADLINE 0x000006e0 |
#define MSR_IA32_UCODE_WRITE 0x00000079 |
#define MSR_IA32_UCODE_REV 0x0000008b |
#define MSR_IA32_PERF_STATUS 0x00000198 |
#define MSR_IA32_PERF_CTL 0x00000199 |
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 |
#define MSR_AMD_PERF_STATUS 0xc0010063 |
#define MSR_AMD_PERF_CTL 0xc0010062 |
#define MSR_IA32_MPERF 0x000000e7 |
#define MSR_IA32_APERF 0x000000e8 |
#define MSR_IA32_THERM_CONTROL 0x0000019a |
#define MSR_IA32_THERM_INTERRUPT 0x0000019b |
#define THERM_INT_HIGH_ENABLE (1 << 0) |
#define THERM_INT_LOW_ENABLE (1 << 1) |
#define THERM_INT_PLN_ENABLE (1 << 24) |
#define MSR_IA32_THERM_STATUS 0x0000019c |
#define THERM_STATUS_PROCHOT (1 << 0) |
#define THERM_STATUS_POWER_LIMIT (1 << 10) |
#define MSR_THERM2_CTL 0x0000019d |
#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16) |
#define MSR_IA32_MISC_ENABLE 0x000001a0 |
#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 |
#define MSR_MISC_PWR_MGMT 0x000001aa |
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 |
#define ENERGY_PERF_BIAS_PERFORMANCE 0 |
#define ENERGY_PERF_BIAS_NORMAL 6 |
#define ENERGY_PERF_BIAS_POWERSAVE 15 |
#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1 |
#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0) |
#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10) |
#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2 |
#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0) |
#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1) |
#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24) |
/* Thermal Thresholds Support */ |
#define THERM_INT_THRESHOLD0_ENABLE (1 << 15) |
#define THERM_SHIFT_THRESHOLD0 8 |
#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0) |
#define THERM_INT_THRESHOLD1_ENABLE (1 << 23) |
#define THERM_SHIFT_THRESHOLD1 16 |
#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1) |
#define THERM_STATUS_THRESHOLD0 (1 << 6) |
#define THERM_LOG_THRESHOLD0 (1 << 7) |
#define THERM_STATUS_THRESHOLD1 (1 << 8) |
#define THERM_LOG_THRESHOLD1 (1 << 9) |
/* MISC_ENABLE bits: architectural */ |
#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0 |
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) |
#define MSR_IA32_MISC_ENABLE_TCC_BIT 1 |
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT) |
#define MSR_IA32_MISC_ENABLE_EMON_BIT 7 |
#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT) |
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11 |
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT) |
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12 |
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT) |
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16 |
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT) |
#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18 |
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT) |
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22 |
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) |
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23 |
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34 |
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT) |
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ |
#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2 |
#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT) |
#define MSR_IA32_MISC_ENABLE_TM1_BIT 3 |
#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT) |
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4 |
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6 |
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8 |
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT) |
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9 |
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_FERR_BIT 10 |
#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT) |
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10 |
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT) |
#define MSR_IA32_MISC_ENABLE_TM2_BIT 13 |
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT) |
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19 |
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20 |
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT) |
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24 |
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT) |
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37 |
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38 |
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39 |
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT) |
#define MSR_IA32_TSC_DEADLINE 0x000006E0 |
/* P4/Xeon+ specific */ |
#define MSR_IA32_MCG_EAX 0x00000180 |
#define MSR_IA32_MCG_EBX 0x00000181 |
#define MSR_IA32_MCG_ECX 0x00000182 |
#define MSR_IA32_MCG_EDX 0x00000183 |
#define MSR_IA32_MCG_ESI 0x00000184 |
#define MSR_IA32_MCG_EDI 0x00000185 |
#define MSR_IA32_MCG_EBP 0x00000186 |
#define MSR_IA32_MCG_ESP 0x00000187 |
#define MSR_IA32_MCG_EFLAGS 0x00000188 |
#define MSR_IA32_MCG_EIP 0x00000189 |
#define MSR_IA32_MCG_RESERVED 0x0000018a |
/* Pentium IV performance counter MSRs */ |
#define MSR_P4_BPU_PERFCTR0 0x00000300 |
#define MSR_P4_BPU_PERFCTR1 0x00000301 |
#define MSR_P4_BPU_PERFCTR2 0x00000302 |
#define MSR_P4_BPU_PERFCTR3 0x00000303 |
#define MSR_P4_MS_PERFCTR0 0x00000304 |
#define MSR_P4_MS_PERFCTR1 0x00000305 |
#define MSR_P4_MS_PERFCTR2 0x00000306 |
#define MSR_P4_MS_PERFCTR3 0x00000307 |
#define MSR_P4_FLAME_PERFCTR0 0x00000308 |
#define MSR_P4_FLAME_PERFCTR1 0x00000309 |
#define MSR_P4_FLAME_PERFCTR2 0x0000030a |
#define MSR_P4_FLAME_PERFCTR3 0x0000030b |
#define MSR_P4_IQ_PERFCTR0 0x0000030c |
#define MSR_P4_IQ_PERFCTR1 0x0000030d |
#define MSR_P4_IQ_PERFCTR2 0x0000030e |
#define MSR_P4_IQ_PERFCTR3 0x0000030f |
#define MSR_P4_IQ_PERFCTR4 0x00000310 |
#define MSR_P4_IQ_PERFCTR5 0x00000311 |
#define MSR_P4_BPU_CCCR0 0x00000360 |
#define MSR_P4_BPU_CCCR1 0x00000361 |
#define MSR_P4_BPU_CCCR2 0x00000362 |
#define MSR_P4_BPU_CCCR3 0x00000363 |
#define MSR_P4_MS_CCCR0 0x00000364 |
#define MSR_P4_MS_CCCR1 0x00000365 |
#define MSR_P4_MS_CCCR2 0x00000366 |
#define MSR_P4_MS_CCCR3 0x00000367 |
#define MSR_P4_FLAME_CCCR0 0x00000368 |
#define MSR_P4_FLAME_CCCR1 0x00000369 |
#define MSR_P4_FLAME_CCCR2 0x0000036a |
#define MSR_P4_FLAME_CCCR3 0x0000036b |
#define MSR_P4_IQ_CCCR0 0x0000036c |
#define MSR_P4_IQ_CCCR1 0x0000036d |
#define MSR_P4_IQ_CCCR2 0x0000036e |
#define MSR_P4_IQ_CCCR3 0x0000036f |
#define MSR_P4_IQ_CCCR4 0x00000370 |
#define MSR_P4_IQ_CCCR5 0x00000371 |
#define MSR_P4_ALF_ESCR0 0x000003ca |
#define MSR_P4_ALF_ESCR1 0x000003cb |
#define MSR_P4_BPU_ESCR0 0x000003b2 |
#define MSR_P4_BPU_ESCR1 0x000003b3 |
#define MSR_P4_BSU_ESCR0 0x000003a0 |
#define MSR_P4_BSU_ESCR1 0x000003a1 |
#define MSR_P4_CRU_ESCR0 0x000003b8 |
#define MSR_P4_CRU_ESCR1 0x000003b9 |
#define MSR_P4_CRU_ESCR2 0x000003cc |
#define MSR_P4_CRU_ESCR3 0x000003cd |
#define MSR_P4_CRU_ESCR4 0x000003e0 |
#define MSR_P4_CRU_ESCR5 0x000003e1 |
#define MSR_P4_DAC_ESCR0 0x000003a8 |
#define MSR_P4_DAC_ESCR1 0x000003a9 |
#define MSR_P4_FIRM_ESCR0 0x000003a4 |
#define MSR_P4_FIRM_ESCR1 0x000003a5 |
#define MSR_P4_FLAME_ESCR0 0x000003a6 |
#define MSR_P4_FLAME_ESCR1 0x000003a7 |
#define MSR_P4_FSB_ESCR0 0x000003a2 |
#define MSR_P4_FSB_ESCR1 0x000003a3 |
#define MSR_P4_IQ_ESCR0 0x000003ba |
#define MSR_P4_IQ_ESCR1 0x000003bb |
#define MSR_P4_IS_ESCR0 0x000003b4 |
#define MSR_P4_IS_ESCR1 0x000003b5 |
#define MSR_P4_ITLB_ESCR0 0x000003b6 |
#define MSR_P4_ITLB_ESCR1 0x000003b7 |
#define MSR_P4_IX_ESCR0 0x000003c8 |
#define MSR_P4_IX_ESCR1 0x000003c9 |
#define MSR_P4_MOB_ESCR0 0x000003aa |
#define MSR_P4_MOB_ESCR1 0x000003ab |
#define MSR_P4_MS_ESCR0 0x000003c0 |
#define MSR_P4_MS_ESCR1 0x000003c1 |
#define MSR_P4_PMH_ESCR0 0x000003ac |
#define MSR_P4_PMH_ESCR1 0x000003ad |
#define MSR_P4_RAT_ESCR0 0x000003bc |
#define MSR_P4_RAT_ESCR1 0x000003bd |
#define MSR_P4_SAAT_ESCR0 0x000003ae |
#define MSR_P4_SAAT_ESCR1 0x000003af |
#define MSR_P4_SSU_ESCR0 0x000003be |
#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */ |
#define MSR_P4_TBPU_ESCR0 0x000003c2 |
#define MSR_P4_TBPU_ESCR1 0x000003c3 |
#define MSR_P4_TC_ESCR0 0x000003c4 |
#define MSR_P4_TC_ESCR1 0x000003c5 |
#define MSR_P4_U2L_ESCR0 0x000003b0 |
#define MSR_P4_U2L_ESCR1 0x000003b1 |
#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2 |
/* Intel Core-based CPU performance counters */ |
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 |
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a |
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b |
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d |
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e |
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f |
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 |
/* Geode defined MSRs */ |
#define MSR_GEODE_BUSCONT_CONF0 0x00001900 |
/* Intel VT MSRs */ |
#define MSR_IA32_VMX_BASIC 0x00000480 |
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 |
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 |
#define MSR_IA32_VMX_EXIT_CTLS 0x00000483 |
#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 |
#define MSR_IA32_VMX_MISC 0x00000485 |
#define MSR_IA32_VMX_CR0_FIXED0 0x00000486 |
#define MSR_IA32_VMX_CR0_FIXED1 0x00000487 |
#define MSR_IA32_VMX_CR4_FIXED0 0x00000488 |
#define MSR_IA32_VMX_CR4_FIXED1 0x00000489 |
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a |
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b |
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c |
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d |
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e |
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f |
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 |
#define MSR_IA32_VMX_VMFUNC 0x00000491 |
/* VMX_BASIC bits and bitmasks */ |
#define VMX_BASIC_VMCS_SIZE_SHIFT 32 |
#define VMX_BASIC_TRUE_CTLS (1ULL << 55) |
#define VMX_BASIC_64 0x0001000000000000LLU |
#define VMX_BASIC_MEM_TYPE_SHIFT 50 |
#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU |
#define VMX_BASIC_MEM_TYPE_WB 6LLU |
#define VMX_BASIC_INOUT 0x0040000000000000LLU |
/* MSR_IA32_VMX_MISC bits */ |
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) |
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F |
/* AMD-V MSRs */ |
#define MSR_VM_CR 0xc0010114 |
#define MSR_VM_IGNNE 0xc0010115 |
#define MSR_VM_HSAVE_PA 0xc0010117 |
#endif /* _ASM_X86_MSR_INDEX_H */ |
/drivers/include/uapi/asm/msr.h |
---|
0,0 → 1,15 |
#ifndef _UAPI_ASM_X86_MSR_H |
#define _UAPI_ASM_X86_MSR_H |
#include <asm/msr-index.h> |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
#include <linux/ioctl.h> |
#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8]) |
#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8]) |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_X86_MSR_H */ |
/drivers/include/uapi/asm/page_32_types.h |
---|
0,0 → 1,58 |
#ifndef _ASM_X86_PAGE_32_DEFS_H |
#define _ASM_X86_PAGE_32_DEFS_H |
#include <linux/const.h> |
/* |
* This handles the memory map. |
* |
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has |
* a virtual address space of one gigabyte, which limits the |
* amount of physical memory you can use to about 950MB. |
* |
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G |
* and CONFIG_HIGHMEM64G options in the kernel configuration. |
*/ |
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) |
#define __START_KERNEL_map __PAGE_OFFSET |
#define THREAD_SIZE_ORDER 1 |
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) |
#define DOUBLEFAULT_STACK 1 |
#define NMI_STACK 0 |
#define DEBUG_STACK 0 |
#define MCE_STACK 0 |
#define N_EXCEPTION_STACKS 1 |
#ifdef CONFIG_X86_PAE |
/* 44=32+12, the limit we can fit into an unsigned long pfn */ |
#define __PHYSICAL_MASK_SHIFT 44 |
#define __VIRTUAL_MASK_SHIFT 32 |
#else /* !CONFIG_X86_PAE */ |
#define __PHYSICAL_MASK_SHIFT 32 |
#define __VIRTUAL_MASK_SHIFT 32 |
#endif /* CONFIG_X86_PAE */ |
/* |
* Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S) |
*/ |
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) |
#ifndef __ASSEMBLY__ |
/* |
* This much address space is reserved for vmalloc() and iomap() |
* as well as fixmap mappings. |
*/ |
extern unsigned int __VMALLOC_RESERVE; |
extern int sysctl_legacy_va_layout; |
extern void find_low_pfn_range(void); |
extern void setup_bootmem_allocator(void); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PAGE_32_DEFS_H */ |
/drivers/include/uapi/asm/page_types.h |
---|
0,0 → 1,68 |
#ifndef _ASM_X86_PAGE_DEFS_H |
#define _ASM_X86_PAGE_DEFS_H |
#include <linux/const.h> |
#include <linux/types.h> |
/* PAGE_SHIFT determines the page size */ |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) |
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) |
/* Cast PAGE_MASK to a signed type so that it is sign-extended if |
virtual addresses are 32-bits but physical addresses are larger |
(ie, 32-bit PAE). */ |
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) |
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) |
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) |
#define HPAGE_SHIFT PMD_SHIFT |
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
#define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
#define HUGE_MAX_HSTATE 2 |
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
#define VM_DATA_DEFAULT_FLAGS \ |
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ |
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \ |
CONFIG_PHYSICAL_ALIGN) |
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) |
#ifdef CONFIG_X86_64 |
#include <asm/page_64_types.h> |
#else |
#include <asm/page_32_types.h> |
#endif /* CONFIG_X86_64 */ |
#ifndef __ASSEMBLY__ |
extern int devmem_is_allowed(unsigned long pagenr); |
extern unsigned long max_low_pfn_mapped; |
extern unsigned long max_pfn_mapped; |
static inline phys_addr_t get_max_mapped(void) |
{ |
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; |
} |
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); |
extern unsigned long init_memory_mapping(unsigned long start, |
unsigned long end); |
extern void initmem_init(void); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PAGE_DEFS_H */ |
/drivers/include/uapi/asm/posix_types.h |
---|
0,0 → 1,5 |
# ifdef CONFIG_X86_32 |
# include <asm/posix_types_32.h> |
# else |
# include <asm/posix_types_64.h> |
# endif |
/drivers/include/uapi/asm/processor-flags.h |
---|
0,0 → 1,153 |
#ifndef _UAPI_ASM_X86_PROCESSOR_FLAGS_H |
#define _UAPI_ASM_X86_PROCESSOR_FLAGS_H |
/* Various flags defined: can be included from assembler. */ |
#include <linux/const.h> |
/* |
* EFLAGS bits |
*/ |
#define X86_EFLAGS_CF_BIT 0 /* Carry Flag */ |
#define X86_EFLAGS_CF _BITUL(X86_EFLAGS_CF_BIT) |
#define X86_EFLAGS_FIXED_BIT 1 /* Bit 1 - always on */ |
#define X86_EFLAGS_FIXED _BITUL(X86_EFLAGS_FIXED_BIT) |
#define X86_EFLAGS_PF_BIT 2 /* Parity Flag */ |
#define X86_EFLAGS_PF _BITUL(X86_EFLAGS_PF_BIT) |
#define X86_EFLAGS_AF_BIT 4 /* Auxiliary carry Flag */ |
#define X86_EFLAGS_AF _BITUL(X86_EFLAGS_AF_BIT) |
#define X86_EFLAGS_ZF_BIT 6 /* Zero Flag */ |
#define X86_EFLAGS_ZF _BITUL(X86_EFLAGS_ZF_BIT) |
#define X86_EFLAGS_SF_BIT 7 /* Sign Flag */ |
#define X86_EFLAGS_SF _BITUL(X86_EFLAGS_SF_BIT) |
#define X86_EFLAGS_TF_BIT 8 /* Trap Flag */ |
#define X86_EFLAGS_TF _BITUL(X86_EFLAGS_TF_BIT) |
#define X86_EFLAGS_IF_BIT 9 /* Interrupt Flag */ |
#define X86_EFLAGS_IF _BITUL(X86_EFLAGS_IF_BIT) |
#define X86_EFLAGS_DF_BIT 10 /* Direction Flag */ |
#define X86_EFLAGS_DF _BITUL(X86_EFLAGS_DF_BIT) |
#define X86_EFLAGS_OF_BIT 11 /* Overflow Flag */ |
#define X86_EFLAGS_OF _BITUL(X86_EFLAGS_OF_BIT) |
#define X86_EFLAGS_IOPL_BIT 12 /* I/O Privilege Level (2 bits) */ |
#define X86_EFLAGS_IOPL (_AC(3,UL) << X86_EFLAGS_IOPL_BIT) |
#define X86_EFLAGS_NT_BIT 14 /* Nested Task */ |
#define X86_EFLAGS_NT _BITUL(X86_EFLAGS_NT_BIT) |
#define X86_EFLAGS_RF_BIT 16 /* Resume Flag */ |
#define X86_EFLAGS_RF _BITUL(X86_EFLAGS_RF_BIT) |
#define X86_EFLAGS_VM_BIT 17 /* Virtual Mode */ |
#define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT) |
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ |
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) |
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ |
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) |
#define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */ |
#define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT) |
#define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */ |
#define X86_EFLAGS_VIP _BITUL(X86_EFLAGS_VIP_BIT) |
#define X86_EFLAGS_ID_BIT 21 /* CPUID detection */ |
#define X86_EFLAGS_ID _BITUL(X86_EFLAGS_ID_BIT) |
/* |
* Basic CPU control in CR0 |
*/ |
#define X86_CR0_PE_BIT 0 /* Protection Enable */ |
#define X86_CR0_PE _BITUL(X86_CR0_PE_BIT) |
#define X86_CR0_MP_BIT 1 /* Monitor Coprocessor */ |
#define X86_CR0_MP _BITUL(X86_CR0_MP_BIT) |
#define X86_CR0_EM_BIT 2 /* Emulation */ |
#define X86_CR0_EM _BITUL(X86_CR0_EM_BIT) |
#define X86_CR0_TS_BIT 3 /* Task Switched */ |
#define X86_CR0_TS _BITUL(X86_CR0_TS_BIT) |
#define X86_CR0_ET_BIT 4 /* Extension Type */ |
#define X86_CR0_ET _BITUL(X86_CR0_ET_BIT) |
#define X86_CR0_NE_BIT 5 /* Numeric Error */ |
#define X86_CR0_NE _BITUL(X86_CR0_NE_BIT) |
#define X86_CR0_WP_BIT 16 /* Write Protect */ |
#define X86_CR0_WP _BITUL(X86_CR0_WP_BIT) |
#define X86_CR0_AM_BIT 18 /* Alignment Mask */ |
#define X86_CR0_AM _BITUL(X86_CR0_AM_BIT) |
#define X86_CR0_NW_BIT 29 /* Not Write-through */ |
#define X86_CR0_NW _BITUL(X86_CR0_NW_BIT) |
#define X86_CR0_CD_BIT 30 /* Cache Disable */ |
#define X86_CR0_CD _BITUL(X86_CR0_CD_BIT) |
#define X86_CR0_PG_BIT 31 /* Paging */ |
#define X86_CR0_PG _BITUL(X86_CR0_PG_BIT) |
/* |
* Paging options in CR3 |
*/ |
#define X86_CR3_PWT_BIT 3 /* Page Write Through */ |
#define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) |
#define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ |
#define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) |
#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */ |
/* |
* Intel CPU features in CR4 |
*/ |
#define X86_CR4_VME_BIT 0 /* enable vm86 extensions */ |
#define X86_CR4_VME _BITUL(X86_CR4_VME_BIT) |
#define X86_CR4_PVI_BIT 1 /* virtual interrupts flag enable */ |
#define X86_CR4_PVI _BITUL(X86_CR4_PVI_BIT) |
#define X86_CR4_TSD_BIT 2 /* disable time stamp at ipl 3 */ |
#define X86_CR4_TSD _BITUL(X86_CR4_TSD_BIT) |
#define X86_CR4_DE_BIT 3 /* enable debugging extensions */ |
#define X86_CR4_DE _BITUL(X86_CR4_DE_BIT) |
#define X86_CR4_PSE_BIT 4 /* enable page size extensions */ |
#define X86_CR4_PSE _BITUL(X86_CR4_PSE_BIT) |
#define X86_CR4_PAE_BIT 5 /* enable physical address extensions */ |
#define X86_CR4_PAE _BITUL(X86_CR4_PAE_BIT) |
#define X86_CR4_MCE_BIT 6 /* Machine check enable */ |
#define X86_CR4_MCE _BITUL(X86_CR4_MCE_BIT) |
#define X86_CR4_PGE_BIT 7 /* enable global pages */ |
#define X86_CR4_PGE _BITUL(X86_CR4_PGE_BIT) |
#define X86_CR4_PCE_BIT 8 /* enable performance counters at ipl 3 */ |
#define X86_CR4_PCE _BITUL(X86_CR4_PCE_BIT) |
#define X86_CR4_OSFXSR_BIT 9 /* enable fast FPU save and restore */ |
#define X86_CR4_OSFXSR _BITUL(X86_CR4_OSFXSR_BIT) |
#define X86_CR4_OSXMMEXCPT_BIT 10 /* enable unmasked SSE exceptions */ |
#define X86_CR4_OSXMMEXCPT _BITUL(X86_CR4_OSXMMEXCPT_BIT) |
#define X86_CR4_VMXE_BIT 13 /* enable VMX virtualization */ |
#define X86_CR4_VMXE _BITUL(X86_CR4_VMXE_BIT) |
#define X86_CR4_SMXE_BIT 14 /* enable safer mode (TXT) */ |
#define X86_CR4_SMXE _BITUL(X86_CR4_SMXE_BIT) |
#define X86_CR4_FSGSBASE_BIT 16 /* enable RDWRFSGS support */ |
#define X86_CR4_FSGSBASE _BITUL(X86_CR4_FSGSBASE_BIT) |
#define X86_CR4_PCIDE_BIT 17 /* enable PCID support */ |
#define X86_CR4_PCIDE _BITUL(X86_CR4_PCIDE_BIT) |
#define X86_CR4_OSXSAVE_BIT 18 /* enable xsave and xrestore */ |
#define X86_CR4_OSXSAVE _BITUL(X86_CR4_OSXSAVE_BIT) |
#define X86_CR4_SMEP_BIT 20 /* enable SMEP support */ |
#define X86_CR4_SMEP _BITUL(X86_CR4_SMEP_BIT) |
#define X86_CR4_SMAP_BIT 21 /* enable SMAP support */ |
#define X86_CR4_SMAP _BITUL(X86_CR4_SMAP_BIT) |
/* |
* x86-64 Task Priority Register, CR8 |
*/ |
#define X86_CR8_TPR _AC(0x0000000f,UL) /* task priority register */ |
/* |
* AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h> |
*/ |
/* |
* NSC/Cyrix CPU configuration register indexes |
*/ |
#define CX86_PCR0 0x20 |
#define CX86_GCR 0xb8 |
#define CX86_CCR0 0xc0 |
#define CX86_CCR1 0xc1 |
#define CX86_CCR2 0xc2 |
#define CX86_CCR3 0xc3 |
#define CX86_CCR4 0xe8 |
#define CX86_CCR5 0xe9 |
#define CX86_CCR6 0xea |
#define CX86_CCR7 0xeb |
#define CX86_PCR1 0xf0 |
#define CX86_DIR0 0xfe |
#define CX86_DIR1 0xff |
#define CX86_ARR_BASE 0xc4 |
#define CX86_RCR_BASE 0xdc |
#endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */ |
/drivers/include/uapi/asm/ptrace.h |
---|
0,0 → 1,262 |
#ifndef _ASM_X86_PTRACE_H |
#define _ASM_X86_PTRACE_H |
#include <asm/segment.h> |
#include <asm/page_types.h> |
#include <uapi/asm/ptrace.h> |
#ifndef __ASSEMBLY__ |
#ifdef __i386__ |
struct pt_regs { |
unsigned long bx; |
unsigned long cx; |
unsigned long dx; |
unsigned long si; |
unsigned long di; |
unsigned long bp; |
unsigned long ax; |
unsigned long ds; |
unsigned long es; |
unsigned long fs; |
unsigned long gs; |
unsigned long orig_ax; |
unsigned long ip; |
unsigned long cs; |
unsigned long flags; |
unsigned long sp; |
unsigned long ss; |
}; |
#else /* __i386__ */ |
struct pt_regs { |
unsigned long r15; |
unsigned long r14; |
unsigned long r13; |
unsigned long r12; |
unsigned long bp; |
unsigned long bx; |
/* arguments: non interrupts/non tracing syscalls only save up to here*/ |
unsigned long r11; |
unsigned long r10; |
unsigned long r9; |
unsigned long r8; |
unsigned long ax; |
unsigned long cx; |
unsigned long dx; |
unsigned long si; |
unsigned long di; |
unsigned long orig_ax; |
/* end of arguments */ |
/* cpu exception frame or undefined */ |
unsigned long ip; |
unsigned long cs; |
unsigned long flags; |
unsigned long sp; |
unsigned long ss; |
/* top of stack page */ |
}; |
#endif /* !__i386__ */ |
#ifdef CONFIG_PARAVIRT |
#include <asm/paravirt_types.h> |
#endif |
struct cpuinfo_x86; |
struct task_struct; |
extern unsigned long profile_pc(struct pt_regs *regs); |
#define profile_pc profile_pc |
extern unsigned long |
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); |
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, |
int error_code, int si_code); |
extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch); |
extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch, |
unsigned long phase1_result); |
extern long syscall_trace_enter(struct pt_regs *); |
extern void syscall_trace_leave(struct pt_regs *); |
static inline unsigned long regs_return_value(struct pt_regs *regs) |
{ |
return regs->ax; |
} |
/* |
* user_mode_vm(regs) determines whether a register set came from user mode. |
* This is true if V8086 mode was enabled OR if the register set was from |
* protected mode with RPL-3 CS value. This tricky test checks that with |
* one comparison. Many places in the kernel can bypass this full check |
* if they have already ruled out V8086 mode, so user_mode(regs) can be used. |
*/ |
static inline int user_mode(struct pt_regs *regs) |
{ |
#ifdef CONFIG_X86_32 |
return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; |
#else |
return !!(regs->cs & 3); |
#endif |
} |
static inline int user_mode_vm(struct pt_regs *regs) |
{ |
#ifdef CONFIG_X86_32 |
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= |
USER_RPL; |
#else |
return user_mode(regs); |
#endif |
} |
static inline int v8086_mode(struct pt_regs *regs) |
{ |
#ifdef CONFIG_X86_32 |
return (regs->flags & X86_VM_MASK); |
#else |
return 0; /* No V86 mode support in long mode */ |
#endif |
} |
#ifdef CONFIG_X86_64 |
static inline bool user_64bit_mode(struct pt_regs *regs) |
{ |
#ifndef CONFIG_PARAVIRT |
/* |
* On non-paravirt systems, this is the only long mode CPL 3 |
* selector. We do not allow long mode selectors in the LDT. |
*/ |
return regs->cs == __USER_CS; |
#else |
/* Headers are too twisted for this to go in paravirt.h. */ |
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; |
#endif |
} |
#define current_user_stack_pointer() this_cpu_read(old_rsp) |
/* ia32 vs. x32 difference */ |
#define compat_user_stack_pointer() \ |
(test_thread_flag(TIF_IA32) \ |
? current_pt_regs()->sp \ |
: this_cpu_read(old_rsp)) |
#endif |
#ifdef CONFIG_X86_32 |
extern unsigned long kernel_stack_pointer(struct pt_regs *regs); |
#else |
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) |
{ |
return regs->sp; |
} |
#endif |
#define GET_IP(regs) ((regs)->ip) |
#define GET_FP(regs) ((regs)->bp) |
#define GET_USP(regs) ((regs)->sp) |
#include <asm-generic/ptrace.h> |
/* Query offset/name of register from its name/offset */ |
extern int regs_query_register_offset(const char *name); |
extern const char *regs_query_register_name(unsigned int offset); |
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) |
/** |
* regs_get_register() - get register value from its offset |
* @regs: pt_regs from which register value is gotten. |
* @offset: offset number of the register. |
* |
* regs_get_register returns the value of a register. The @offset is the |
* offset of the register in struct pt_regs address which specified by @regs. |
* If @offset is bigger than MAX_REG_OFFSET, this returns 0. |
*/ |
static inline unsigned long regs_get_register(struct pt_regs *regs, |
unsigned int offset) |
{ |
if (unlikely(offset > MAX_REG_OFFSET)) |
return 0; |
#ifdef CONFIG_X86_32 |
/* |
* Traps from the kernel do not save sp and ss. |
* Use the helper function to retrieve sp. |
*/ |
if (offset == offsetof(struct pt_regs, sp) && |
regs->cs == __KERNEL_CS) |
return kernel_stack_pointer(regs); |
#endif |
return *(unsigned long *)((unsigned long)regs + offset); |
} |
/** |
* regs_within_kernel_stack() - check the address in the stack |
* @regs: pt_regs which contains kernel stack pointer. |
* @addr: address which is checked. |
* |
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s). |
* If @addr is within the kernel stack, it returns true. If not, returns false. |
*/ |
static inline int regs_within_kernel_stack(struct pt_regs *regs, |
unsigned long addr) |
{ |
return ((addr & ~(THREAD_SIZE - 1)) == |
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); |
} |
/** |
* regs_get_kernel_stack_nth() - get Nth entry of the stack |
* @regs: pt_regs which contains kernel stack pointer. |
* @n: stack entry number. |
* |
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which |
* is specified by @regs. If the @n th entry is NOT in the kernel stack, |
* this returns 0. |
*/ |
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, |
unsigned int n) |
{ |
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); |
addr += n; |
if (regs_within_kernel_stack(regs, (unsigned long)addr)) |
return *addr; |
else |
return 0; |
} |
#define arch_has_single_step() (1) |
#ifdef CONFIG_X86_DEBUGCTLMSR |
#define arch_has_block_step() (1) |
#else |
#define arch_has_block_step() (boot_cpu_data.x86 >= 6) |
#endif |
#define ARCH_HAS_USER_SINGLE_STEP_INFO |
/* |
* When hitting ptrace_stop(), we cannot return using SYSRET because |
* that does not restore the full CPU state, only a minimal set. The |
* ptracer can change arbitrary register values, which is usually okay |
* because the usual ptrace stops run off the signal delivery path which |
* forces IRET; however, ptrace_event() stops happen in arbitrary places |
* in the kernel and don't force IRET path. |
* |
* So force IRET path after a ptrace stop. |
*/ |
#define arch_ptrace_stop_needed(code, info) \ |
({ \ |
set_thread_flag(TIF_NOTIFY_RESUME); \ |
false; \ |
}) |
struct user_desc; |
extern int do_get_thread_area(struct task_struct *p, int idx, |
struct user_desc __user *info); |
extern int do_set_thread_area(struct task_struct *p, int idx, |
struct user_desc __user *info, int can_allocate); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PTRACE_H */ |
/drivers/include/uapi/asm/segment.h |
---|
0,0 → 1,265 |
#ifndef _ASM_X86_SEGMENT_H |
#define _ASM_X86_SEGMENT_H |
#include <linux/const.h> |
/* Constructor for a conventional segment GDT (or LDT) entry */ |
/* This is a macro so it can be used in initializers */ |
#define GDT_ENTRY(flags, base, limit) \ |
((((base) & _AC(0xff000000,ULL)) << (56-24)) | \ |
(((flags) & _AC(0x0000f0ff,ULL)) << 40) | \ |
(((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \ |
(((base) & _AC(0x00ffffff,ULL)) << 16) | \ |
(((limit) & _AC(0x0000ffff,ULL)))) |
/* Simple and small GDT entries for booting only */ |
#define GDT_ENTRY_BOOT_CS 2 |
#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) |
#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) |
#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) |
#define GDT_ENTRY_BOOT_TSS (GDT_ENTRY_BOOT_CS + 2) |
#define __BOOT_TSS (GDT_ENTRY_BOOT_TSS * 8) |
#define SEGMENT_RPL_MASK 0x3 /* |
* Bottom two bits of selector give the ring |
* privilege level |
*/ |
#define SEGMENT_TI_MASK 0x4 /* Bit 2 is table indicator (LDT/GDT) */ |
#define USER_RPL 0x3 /* User mode is privilege level 3 */ |
#define SEGMENT_LDT 0x4 /* LDT segment has TI set... */ |
#define SEGMENT_GDT 0x0 /* ... GDT has it cleared */ |
#ifdef CONFIG_X86_32 |
/* |
* The layout of the per-CPU GDT under Linux: |
* |
* 0 - null |
* 1 - reserved |
* 2 - reserved |
* 3 - reserved |
* |
* 4 - unused <==== new cacheline |
* 5 - unused |
* |
* ------- start of TLS (Thread-Local Storage) segments: |
* |
* 6 - TLS segment #1 [ glibc's TLS segment ] |
* 7 - TLS segment #2 [ Wine's %fs Win32 segment ] |
* 8 - TLS segment #3 |
* 9 - reserved |
* 10 - reserved |
* 11 - reserved |
* |
* ------- start of kernel segments: |
* |
* 12 - kernel code segment <==== new cacheline |
* 13 - kernel data segment |
* 14 - default user CS |
* 15 - default user DS |
* 16 - TSS |
* 17 - LDT |
* 18 - PNPBIOS support (16->32 gate) |
* 19 - PNPBIOS support |
* 20 - PNPBIOS support |
* 21 - PNPBIOS support |
* 22 - PNPBIOS support |
* 23 - APM BIOS support |
* 24 - APM BIOS support |
* 25 - APM BIOS support |
* |
* 26 - ESPFIX small SS |
* 27 - per-cpu [ offset to per-cpu data area ] |
* 28 - stack_canary-20 [ for stack protector ] |
* 29 - unused |
* 30 - unused |
* 31 - TSS for double fault handler |
*/ |
#define GDT_ENTRY_TLS_MIN 6 |
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) |
#define GDT_ENTRY_DEFAULT_USER_CS 14 |
#define GDT_ENTRY_DEFAULT_USER_DS 15 |
#define GDT_ENTRY_KERNEL_BASE (12) |
#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) |
#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) |
#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) |
#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE+5) |
#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE+6) |
#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE+11) |
#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE+14) |
#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8) |
#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE+15) |
#ifdef CONFIG_SMP |
#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) |
#else |
#define __KERNEL_PERCPU 0 |
#endif |
#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE+16) |
#ifdef CONFIG_CC_STACKPROTECTOR |
#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) |
#else |
#define __KERNEL_STACK_CANARY 0 |
#endif |
#define GDT_ENTRY_DOUBLEFAULT_TSS 31 |
/* |
* The GDT has 32 entries |
*/ |
#define GDT_ENTRIES 32 |
/* The PnP BIOS entries in the GDT */ |
#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) |
#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) |
#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) |
#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) |
#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) |
/* The PnP BIOS selectors */ |
#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ |
#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ |
#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ |
#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ |
#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ |
/* |
* Matching rules for certain types of segments. |
*/ |
/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ |
#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) |
#else |
#include <asm/cache.h> |
#define GDT_ENTRY_KERNEL32_CS 1 |
#define GDT_ENTRY_KERNEL_CS 2 |
#define GDT_ENTRY_KERNEL_DS 3 |
#define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS * 8) |
/* |
* we cannot use the same code segment descriptor for user and kernel |
* -- not even in the long flat mode, because of different DPL /kkeil |
* The segment offset needs to contain a RPL. Grr. -AK |
* GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) |
*/ |
#define GDT_ENTRY_DEFAULT_USER32_CS 4 |
#define GDT_ENTRY_DEFAULT_USER_DS 5 |
#define GDT_ENTRY_DEFAULT_USER_CS 6 |
#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3) |
#define __USER32_DS __USER_DS |
#define GDT_ENTRY_TSS 8 /* needs two entries */ |
#define GDT_ENTRY_LDT 10 /* needs two entries */ |
#define GDT_ENTRY_TLS_MIN 12 |
#define GDT_ENTRY_TLS_MAX 14 |
#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */ |
#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3) |
/* TLS indexes for 64bit - hardcoded in arch_prctl */ |
#define FS_TLS 0 |
#define GS_TLS 1 |
#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3) |
#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3) |
#define GDT_ENTRIES 16 |
#endif |
#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) |
#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) |
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) |
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) |
#ifndef CONFIG_PARAVIRT |
#define get_kernel_rpl() 0 |
#endif |
#define IDT_ENTRIES 256 |
#define NUM_EXCEPTION_VECTORS 32 |
/* Bitmask of exception vectors which push an error code on the stack */ |
#define EXCEPTION_ERRCODE_MASK 0x00027d00 |
#define GDT_SIZE (GDT_ENTRIES * 8) |
#define GDT_ENTRY_TLS_ENTRIES 3 |
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; |
#ifdef CONFIG_TRACING |
#define trace_early_idt_handlers early_idt_handlers |
#endif |
/* |
* Load a segment. Fall back on loading the zero |
* segment if something goes wrong.. |
*/ |
#define loadsegment(seg, value) \ |
do { \ |
unsigned short __val = (value); \ |
\ |
asm volatile(" \n" \ |
"1: movl %k0,%%" #seg " \n" \ |
\ |
".section .fixup,\"ax\" \n" \ |
"2: xorl %k0,%k0 \n" \ |
" jmp 1b \n" \ |
".previous \n" \ |
\ |
_ASM_EXTABLE(1b, 2b) \ |
\ |
: "+r" (__val) : : "memory"); \ |
} while (0) |
/* |
* Save a segment register away |
*/ |
#define savesegment(seg, value) \ |
asm("mov %%" #seg ",%0":"=r" (value) : : "memory") |
/* |
* x86_32 user gs accessors. |
*/ |
#ifdef CONFIG_X86_32 |
#ifdef CONFIG_X86_32_LAZY_GS |
#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) |
#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) |
#define task_user_gs(tsk) ((tsk)->thread.gs) |
#define lazy_save_gs(v) savesegment(gs, (v)) |
#define lazy_load_gs(v) loadsegment(gs, (v)) |
#else /* X86_32_LAZY_GS */ |
#define get_user_gs(regs) (u16)((regs)->gs) |
#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) |
#define task_user_gs(tsk) (task_pt_regs(tsk)->gs) |
#define lazy_save_gs(v) do { } while (0) |
#define lazy_load_gs(v) do { } while (0) |
#endif /* X86_32_LAZY_GS */ |
#endif /* X86_32 */ |
static inline unsigned long get_limit(unsigned long segment) |
{ |
unsigned long __limit; |
asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); |
return __limit + 1; |
} |
#endif /* !__ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_SEGMENT_H */ |
/drivers/include/uapi/asm/sigcontext.h |
---|
0,0 → 1,221 |
#ifndef _UAPI_ASM_X86_SIGCONTEXT_H |
#define _UAPI_ASM_X86_SIGCONTEXT_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
#define FP_XSTATE_MAGIC1 0x46505853U |
#define FP_XSTATE_MAGIC2 0x46505845U |
#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2) |
/* |
* bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame |
* are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes |
* are used to extended the fpstate pointer in the sigcontext, which now |
* includes the extended state information along with fpstate information. |
* |
* Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved |
* area and FP_XSTATE_MAGIC2 at the end of memory layout |
* (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the |
* extended state information in the memory layout pointed by the fpstate |
* pointer in sigcontext. |
*/ |
struct _fpx_sw_bytes { |
__u32 magic1; /* FP_XSTATE_MAGIC1 */ |
__u32 extended_size; /* total size of the layout referred by |
* fpstate pointer in the sigcontext. |
*/ |
__u64 xstate_bv; |
/* feature bit mask (including fp/sse/extended |
* state) that is present in the memory |
* layout. |
*/ |
__u32 xstate_size; /* actual xsave state size, based on the |
* features saved in the layout. |
* 'extended_size' will be greater than |
* 'xstate_size'. |
*/ |
__u32 padding[7]; /* for future use. */ |
}; |
#ifdef __i386__ |
/* |
* As documented in the iBCS2 standard.. |
* |
* The first part of "struct _fpstate" is just the normal i387 |
* hardware setup, the extra "status" word is used to save the |
* coprocessor status word before entering the handler. |
* |
* Pentium III FXSR, SSE support |
* Gareth Hughes <gareth@valinux.com>, May 2000 |
* |
* The FPU state data structure has had to grow to accommodate the |
* extended FPU state required by the Streaming SIMD Extensions. |
* There is no documented standard to accomplish this at the moment. |
*/ |
struct _fpreg { |
unsigned short significand[4]; |
unsigned short exponent; |
}; |
struct _fpxreg { |
unsigned short significand[4]; |
unsigned short exponent; |
unsigned short padding[3]; |
}; |
struct _xmmreg { |
unsigned long element[4]; |
}; |
struct _fpstate { |
/* Regular FPU environment */ |
unsigned long cw; |
unsigned long sw; |
unsigned long tag; |
unsigned long ipoff; |
unsigned long cssel; |
unsigned long dataoff; |
unsigned long datasel; |
struct _fpreg _st[8]; |
unsigned short status; |
unsigned short magic; /* 0xffff = regular FPU data only */ |
/* FXSR FPU environment */ |
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ |
unsigned long mxcsr; |
unsigned long reserved; |
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ |
struct _xmmreg _xmm[8]; |
unsigned long padding1[44]; |
union { |
unsigned long padding2[12]; |
struct _fpx_sw_bytes sw_reserved; /* represents the extended |
* state info */ |
}; |
}; |
#define X86_FXSR_MAGIC 0x0000 |
#ifndef __KERNEL__ |
/* |
* User-space might still rely on the old definition: |
*/ |
struct sigcontext { |
unsigned short gs, __gsh; |
unsigned short fs, __fsh; |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned long edi; |
unsigned long esi; |
unsigned long ebp; |
unsigned long esp; |
unsigned long ebx; |
unsigned long edx; |
unsigned long ecx; |
unsigned long eax; |
unsigned long trapno; |
unsigned long err; |
unsigned long eip; |
unsigned short cs, __csh; |
unsigned long eflags; |
unsigned long esp_at_signal; |
unsigned short ss, __ssh; |
struct _fpstate __user *fpstate; |
unsigned long oldmask; |
unsigned long cr2; |
}; |
#endif /* !__KERNEL__ */ |
#else /* __i386__ */ |
/* FXSAVE frame */ |
/* Note: reserved1/2 may someday contain valuable data. Always save/restore |
them when you change signal frames. */ |
struct _fpstate { |
__u16 cwd; |
__u16 swd; |
__u16 twd; /* Note this is not the same as the |
32bit/x87/FSAVE twd */ |
__u16 fop; |
__u64 rip; |
__u64 rdp; |
__u32 mxcsr; |
__u32 mxcsr_mask; |
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */ |
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ |
__u32 reserved2[12]; |
union { |
__u32 reserved3[12]; |
struct _fpx_sw_bytes sw_reserved; /* represents the extended |
* state information */ |
}; |
}; |
#ifndef __KERNEL__ |
/* |
* User-space might still rely on the old definition: |
*/ |
struct sigcontext { |
__u64 r8; |
__u64 r9; |
__u64 r10; |
__u64 r11; |
__u64 r12; |
__u64 r13; |
__u64 r14; |
__u64 r15; |
__u64 rdi; |
__u64 rsi; |
__u64 rbp; |
__u64 rbx; |
__u64 rdx; |
__u64 rax; |
__u64 rcx; |
__u64 rsp; |
__u64 rip; |
__u64 eflags; /* RFLAGS */ |
__u16 cs; |
__u16 gs; |
__u16 fs; |
__u16 __pad0; |
__u64 err; |
__u64 trapno; |
__u64 oldmask; |
__u64 cr2; |
struct _fpstate __user *fpstate; /* zero when no FPU context */ |
#ifdef __ILP32__ |
__u32 __fpstate_pad; |
#endif |
__u64 reserved1[8]; |
}; |
#endif /* !__KERNEL__ */ |
#endif /* !__i386__ */ |
struct _xsave_hdr { |
__u64 xstate_bv; |
__u64 reserved1[2]; |
__u64 reserved2[5]; |
}; |
struct _ymmh_state { |
/* 16 * 16 bytes for each YMMH-reg */ |
__u32 ymmh_space[64]; |
}; |
/* |
* Extended state pointed by the fpstate pointer in the sigcontext. |
* In addition to the fpstate, information encoded in the xstate_hdr |
* indicates the presence of other extended state information |
* supported by the processor and OS. |
*/ |
struct _xstate { |
struct _fpstate fpstate; |
struct _xsave_hdr xstate_hdr; |
struct _ymmh_state ymmh; |
/* new processor state extensions go here */ |
}; |
#endif /* _UAPI_ASM_X86_SIGCONTEXT_H */ |
/drivers/include/uapi/asm/vm86.h |
---|
0,0 → 1,129 |
#ifndef _UAPI_ASM_X86_VM86_H |
#define _UAPI_ASM_X86_VM86_H |
/* |
* I'm guessing at the VIF/VIP flag usage, but hope that this is how |
* the Pentium uses them. Linux will return from vm86 mode when both |
* VIF and VIP is set. |
* |
* On a Pentium, we could probably optimize the virtual flags directly |
* in the eflags register instead of doing it "by hand" in vflags... |
* |
* Linus |
*/ |
#include <asm/processor-flags.h> |
#define BIOSSEG 0x0f000 |
#define CPU_086 0 |
#define CPU_186 1 |
#define CPU_286 2 |
#define CPU_386 3 |
#define CPU_486 4 |
#define CPU_586 5 |
/* |
* Return values for the 'vm86()' system call |
*/ |
#define VM86_TYPE(retval) ((retval) & 0xff) |
#define VM86_ARG(retval) ((retval) >> 8) |
#define VM86_SIGNAL 0 /* return due to signal */ |
#define VM86_UNKNOWN 1 /* unhandled GP fault |
- IO-instruction or similar */ |
#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ |
#define VM86_STI 3 /* sti/popf/iret instruction enabled |
virtual interrupts */ |
/* |
* Additional return values when invoking new vm86() |
*/ |
#define VM86_PICRETURN 4 /* return due to pending PIC request */ |
#define VM86_TRAP 6 /* return due to DOS-debugger request */ |
/* |
* function codes when invoking new vm86() |
*/ |
#define VM86_PLUS_INSTALL_CHECK 0 |
#define VM86_ENTER 1 |
#define VM86_ENTER_NO_BYPASS 2 |
#define VM86_REQUEST_IRQ 3 |
#define VM86_FREE_IRQ 4 |
#define VM86_GET_IRQ_BITS 5 |
#define VM86_GET_AND_RESET_IRQ 6 |
/* |
* This is the stack-layout seen by the user space program when we have |
* done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout |
* is 'kernel_vm86_regs' (see below). |
*/ |
struct vm86_regs { |
/* |
* normal regs, with special meaning for the segment descriptors.. |
*/ |
long ebx; |
long ecx; |
long edx; |
long esi; |
long edi; |
long ebp; |
long eax; |
long __null_ds; |
long __null_es; |
long __null_fs; |
long __null_gs; |
long orig_eax; |
long eip; |
unsigned short cs, __csh; |
long eflags; |
long esp; |
unsigned short ss, __ssh; |
/* |
* these are specific to v86 mode: |
*/ |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned short fs, __fsh; |
unsigned short gs, __gsh; |
}; |
struct revectored_struct { |
unsigned long __map[8]; /* 256 bits */ |
}; |
struct vm86_struct { |
struct vm86_regs regs; |
unsigned long flags; |
unsigned long screen_bitmap; |
unsigned long cpu_type; |
struct revectored_struct int_revectored; |
struct revectored_struct int21_revectored; |
}; |
/* |
* flags masks |
*/ |
#define VM86_SCREEN_BITMAP 0x0001 |
struct vm86plus_info_struct { |
unsigned long force_return_for_pic:1; |
unsigned long vm86dbg_active:1; /* for debugger */ |
unsigned long vm86dbg_TFpendig:1; /* for debugger */ |
unsigned long unused:28; |
unsigned long is_vm86pus:1; /* for vm86 internal use */ |
unsigned char vm86dbg_intxxtab[32]; /* for debugger */ |
}; |
struct vm86plus_struct { |
struct vm86_regs regs; |
unsigned long flags; |
unsigned long screen_bitmap; |
unsigned long cpu_type; |
struct revectored_struct int_revectored; |
struct revectored_struct int21_revectored; |
struct vm86plus_info_struct vm86plus; |
}; |
#endif /* _UAPI_ASM_X86_VM86_H */ |
/drivers/include/uapi/asm-generic/bitsperlong.h |
---|
0,0 → 1,15 |
#ifndef _UAPI__ASM_GENERIC_BITS_PER_LONG |
#define _UAPI__ASM_GENERIC_BITS_PER_LONG |
/* |
* There seems to be no way of detecting this automatically from user |
* space, so 64 bit architectures should override this in their |
* bitsperlong.h. In particular, an architecture that supports |
* both 32 and 64 bit user space must not rely on CONFIG_64BIT |
* to decide it, but rather check a compiler provided macro. |
*/ |
#ifndef __BITS_PER_LONG |
#define __BITS_PER_LONG 32 |
#endif |
#endif /* _UAPI__ASM_GENERIC_BITS_PER_LONG */ |
/drivers/include/uapi/asm-generic/errno-base.h |
---|
0,0 → 1,39 |
#ifndef _ASM_GENERIC_ERRNO_BASE_H |
#define _ASM_GENERIC_ERRNO_BASE_H |
#define EPERM 1 /* Operation not permitted */ |
#define ENOENT 2 /* No such file or directory */ |
#define ESRCH 3 /* No such process */ |
#define EINTR 4 /* Interrupted system call */ |
#define EIO 5 /* I/O error */ |
#define ENXIO 6 /* No such device or address */ |
#define E2BIG 7 /* Argument list too long */ |
#define ENOEXEC 8 /* Exec format error */ |
#define EBADF 9 /* Bad file number */ |
#define ECHILD 10 /* No child processes */ |
#define EAGAIN 11 /* Try again */ |
#define ENOMEM 12 /* Out of memory */ |
#define EACCES 13 /* Permission denied */ |
#define EFAULT 14 /* Bad address */ |
#define ENOTBLK 15 /* Block device required */ |
#define EBUSY 16 /* Device or resource busy */ |
#define EEXIST 17 /* File exists */ |
#define EXDEV 18 /* Cross-device link */ |
#define ENODEV 19 /* No such device */ |
#define ENOTDIR 20 /* Not a directory */ |
#define EISDIR 21 /* Is a directory */ |
#define EINVAL 22 /* Invalid argument */ |
#define ENFILE 23 /* File table overflow */ |
#define EMFILE 24 /* Too many open files */ |
#define ENOTTY 25 /* Not a typewriter */ |
#define ETXTBSY 26 /* Text file busy */ |
#define EFBIG 27 /* File too large */ |
#define ENOSPC 28 /* No space left on device */ |
#define ESPIPE 29 /* Illegal seek */ |
#define EROFS 30 /* Read-only file system */ |
#define EMLINK 31 /* Too many links */ |
#define EPIPE 32 /* Broken pipe */ |
#define EDOM 33 /* Math argument out of domain of func */ |
#define ERANGE 34 /* Math result not representable */ |
#endif |
/drivers/include/uapi/asm-generic/errno.h |
---|
0,0 → 1,113 |
#ifndef _ASM_GENERIC_ERRNO_H |
#define _ASM_GENERIC_ERRNO_H |
#include <asm-generic/errno-base.h> |
#define EDEADLK 35 /* Resource deadlock would occur */ |
#define ENAMETOOLONG 36 /* File name too long */ |
#define ENOLCK 37 /* No record locks available */ |
#define ENOSYS 38 /* Function not implemented */ |
#define ENOTEMPTY 39 /* Directory not empty */ |
#define ELOOP 40 /* Too many symbolic links encountered */ |
#define EWOULDBLOCK EAGAIN /* Operation would block */ |
#define ENOMSG 42 /* No message of desired type */ |
#define EIDRM 43 /* Identifier removed */ |
#define ECHRNG 44 /* Channel number out of range */ |
#define EL2NSYNC 45 /* Level 2 not synchronized */ |
#define EL3HLT 46 /* Level 3 halted */ |
#define EL3RST 47 /* Level 3 reset */ |
#define ELNRNG 48 /* Link number out of range */ |
#define EUNATCH 49 /* Protocol driver not attached */ |
#define ENOCSI 50 /* No CSI structure available */ |
#define EL2HLT 51 /* Level 2 halted */ |
#define EBADE 52 /* Invalid exchange */ |
#define EBADR 53 /* Invalid request descriptor */ |
#define EXFULL 54 /* Exchange full */ |
#define ENOANO 55 /* No anode */ |
#define EBADRQC 56 /* Invalid request code */ |
#define EBADSLT 57 /* Invalid slot */ |
#define EDEADLOCK EDEADLK |
#define EBFONT 59 /* Bad font file format */ |
#define ENOSTR 60 /* Device not a stream */ |
#define ENODATA 61 /* No data available */ |
#define ETIME 62 /* Timer expired */ |
#define ENOSR 63 /* Out of streams resources */ |
#define ENONET 64 /* Machine is not on the network */ |
#define ENOPKG 65 /* Package not installed */ |
#define EREMOTE 66 /* Object is remote */ |
#define ENOLINK 67 /* Link has been severed */ |
#define EADV 68 /* Advertise error */ |
#define ESRMNT 69 /* Srmount error */ |
#define ECOMM 70 /* Communication error on send */ |
#define EPROTO 71 /* Protocol error */ |
#define EMULTIHOP 72 /* Multihop attempted */ |
#define EDOTDOT 73 /* RFS specific error */ |
#define EBADMSG 74 /* Not a data message */ |
#define EOVERFLOW 75 /* Value too large for defined data type */ |
#define ENOTUNIQ 76 /* Name not unique on network */ |
#define EBADFD 77 /* File descriptor in bad state */ |
#define EREMCHG 78 /* Remote address changed */ |
#define ELIBACC 79 /* Can not access a needed shared library */ |
#define ELIBBAD 80 /* Accessing a corrupted shared library */ |
#define ELIBSCN 81 /* .lib section in a.out corrupted */ |
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ |
#define ELIBEXEC 83 /* Cannot exec a shared library directly */ |
#define EILSEQ 84 /* Illegal byte sequence */ |
#define ERESTART 85 /* Interrupted system call should be restarted */ |
#define ESTRPIPE 86 /* Streams pipe error */ |
#define EUSERS 87 /* Too many users */ |
#define ENOTSOCK 88 /* Socket operation on non-socket */ |
#define EDESTADDRREQ 89 /* Destination address required */ |
#define EMSGSIZE 90 /* Message too long */ |
#define EPROTOTYPE 91 /* Protocol wrong type for socket */ |
#define ENOPROTOOPT 92 /* Protocol not available */ |
#define EPROTONOSUPPORT 93 /* Protocol not supported */ |
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ |
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ |
#define EPFNOSUPPORT 96 /* Protocol family not supported */ |
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ |
#define EADDRINUSE 98 /* Address already in use */ |
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ |
#define ENETDOWN 100 /* Network is down */ |
#define ENETUNREACH 101 /* Network is unreachable */ |
#define ENETRESET 102 /* Network dropped connection because of reset */ |
#define ECONNABORTED 103 /* Software caused connection abort */ |
#define ECONNRESET 104 /* Connection reset by peer */ |
#define ENOBUFS 105 /* No buffer space available */ |
#define EISCONN 106 /* Transport endpoint is already connected */ |
#define ENOTCONN 107 /* Transport endpoint is not connected */ |
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ |
#define ETOOMANYREFS 109 /* Too many references: cannot splice */ |
#define ETIMEDOUT 110 /* Connection timed out */ |
#define ECONNREFUSED 111 /* Connection refused */ |
#define EHOSTDOWN 112 /* Host is down */ |
#define EHOSTUNREACH 113 /* No route to host */ |
#define EALREADY 114 /* Operation already in progress */ |
#define EINPROGRESS 115 /* Operation now in progress */ |
#define ESTALE 116 /* Stale file handle */ |
#define EUCLEAN 117 /* Structure needs cleaning */ |
#define ENOTNAM 118 /* Not a XENIX named type file */ |
#define ENAVAIL 119 /* No XENIX semaphores available */ |
#define EISNAM 120 /* Is a named type file */ |
#define EREMOTEIO 121 /* Remote I/O error */ |
#define EDQUOT 122 /* Quota exceeded */ |
#define ENOMEDIUM 123 /* No medium found */ |
#define EMEDIUMTYPE 124 /* Wrong medium type */ |
#define ECANCELED 125 /* Operation Canceled */ |
#define ENOKEY 126 /* Required key not available */ |
#define EKEYEXPIRED 127 /* Key has expired */ |
#define EKEYREVOKED 128 /* Key has been revoked */ |
#define EKEYREJECTED 129 /* Key was rejected by service */ |
/* for robust mutexes */ |
#define EOWNERDEAD 130 /* Owner died */ |
#define ENOTRECOVERABLE 131 /* State not recoverable */ |
#define ERFKILL 132 /* Operation not possible due to RF-kill */ |
#define EHWPOISON 133 /* Memory page has hardware error */ |
#endif |
/drivers/include/uapi/asm-generic/int-l64.h |
---|
0,0 → 1,34 |
/* |
* asm-generic/int-l64.h |
* |
* Integer declarations for architectures which use "long" |
* for 64-bit types. |
*/ |
#ifndef _UAPI_ASM_GENERIC_INT_L64_H |
#define _UAPI_ASM_GENERIC_INT_L64_H |
#include <asm/bitsperlong.h> |
#ifndef __ASSEMBLY__ |
/* |
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the |
* header files exported to user space |
*/ |
typedef __signed__ char __s8; |
typedef unsigned char __u8; |
typedef __signed__ short __s16; |
typedef unsigned short __u16; |
typedef __signed__ int __s32; |
typedef unsigned int __u32; |
typedef __signed__ long __s64; |
typedef unsigned long __u64; |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_GENERIC_INT_L64_H */ |
/drivers/include/uapi/asm-generic/int-ll64.h |
---|
0,0 → 1,39 |
/* |
* asm-generic/int-ll64.h |
* |
* Integer declarations for architectures which use "long long" |
* for 64-bit types. |
*/ |
#ifndef _UAPI_ASM_GENERIC_INT_LL64_H |
#define _UAPI_ASM_GENERIC_INT_LL64_H |
#include <asm/bitsperlong.h> |
#ifndef __ASSEMBLY__ |
/* |
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the |
* header files exported to user space |
*/ |
typedef __signed__ char __s8; |
typedef unsigned char __u8; |
typedef __signed__ short __s16; |
typedef unsigned short __u16; |
typedef __signed__ int __s32; |
typedef unsigned int __u32; |
#ifdef __GNUC__ |
__extension__ typedef __signed__ long long __s64; |
__extension__ typedef unsigned long long __u64; |
#else |
typedef __signed__ long long __s64; |
typedef unsigned long long __u64; |
#endif |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_GENERIC_INT_LL64_H */ |
/drivers/include/uapi/asm-generic/ioctl.h |
---|
0,0 → 1,98 |
#ifndef _UAPI_ASM_GENERIC_IOCTL_H |
#define _UAPI_ASM_GENERIC_IOCTL_H |
/* ioctl command encoding: 32 bits total, command in lower 16 bits, |
* size of the parameter structure in the lower 14 bits of the |
* upper 16 bits. |
* Encoding the size of the parameter structure in the ioctl request |
* is useful for catching programs compiled with old versions |
* and to avoid overwriting user space outside the user buffer area. |
* The highest 2 bits are reserved for indicating the ``access mode''. |
* NOTE: This limits the max parameter size to 16kB -1 ! |
*/ |
/* |
* The following is for compatibility across the various Linux |
* platforms. The generic ioctl numbering scheme doesn't really enforce |
* a type field. De facto, however, the top 8 bits of the lower 16 |
* bits are indeed used as a type field, so we might just as well make |
* this explicit here. Please be sure to use the decoding macros |
* below from now on. |
*/ |
#define _IOC_NRBITS 8 |
#define _IOC_TYPEBITS 8 |
/* |
* Let any architecture override either of the following before |
* including this file. |
*/ |
#ifndef _IOC_SIZEBITS |
# define _IOC_SIZEBITS 14 |
#endif |
#ifndef _IOC_DIRBITS |
# define _IOC_DIRBITS 2 |
#endif |
#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) |
#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) |
#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) |
#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) |
#define _IOC_NRSHIFT 0 |
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) |
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) |
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) |
/* |
* Direction bits, which any architecture can choose to override |
* before including this file. |
*/ |
#ifndef _IOC_NONE |
# define _IOC_NONE 0U |
#endif |
#ifndef _IOC_WRITE |
# define _IOC_WRITE 1U |
#endif |
#ifndef _IOC_READ |
# define _IOC_READ 2U |
#endif |
#define _IOC(dir,type,nr,size) \ |
(((dir) << _IOC_DIRSHIFT) | \ |
((type) << _IOC_TYPESHIFT) | \ |
((nr) << _IOC_NRSHIFT) | \ |
((size) << _IOC_SIZESHIFT)) |
#ifndef __KERNEL__ |
#define _IOC_TYPECHECK(t) (sizeof(t)) |
#endif |
/* used to create numbers */ |
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) |
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) |
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) |
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) |
#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) |
#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) |
#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) |
/* used to decode ioctl numbers.. */ |
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) |
#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) |
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) |
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) |
/* ...and for the drivers/sound files... */ |
#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) |
#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) |
#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) |
#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) |
#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) |
#endif /* _UAPI_ASM_GENERIC_IOCTL_H */ |
/drivers/include/uapi/asm-generic/posix_types.h |
---|
0,0 → 1,96 |
#ifndef __ASM_GENERIC_POSIX_TYPES_H |
#define __ASM_GENERIC_POSIX_TYPES_H |
#include <asm/bitsperlong.h> |
/* |
* This file is generally used by user-level software, so you need to |
* be a little careful about namespace pollution etc. |
* |
* First the types that are often defined in different ways across |
* architectures, so that you can override them. |
*/ |
#ifndef __kernel_long_t |
typedef long __kernel_long_t; |
typedef unsigned long __kernel_ulong_t; |
#endif |
#ifndef __kernel_ino_t |
typedef __kernel_ulong_t __kernel_ino_t; |
#endif |
#ifndef __kernel_mode_t |
typedef unsigned int __kernel_mode_t; |
#endif |
#ifndef __kernel_pid_t |
typedef int __kernel_pid_t; |
#endif |
#ifndef __kernel_ipc_pid_t |
typedef int __kernel_ipc_pid_t; |
#endif |
#ifndef __kernel_uid_t |
typedef unsigned int __kernel_uid_t; |
typedef unsigned int __kernel_gid_t; |
#endif |
#ifndef __kernel_suseconds_t |
typedef __kernel_long_t __kernel_suseconds_t; |
#endif |
#ifndef __kernel_daddr_t |
typedef int __kernel_daddr_t; |
#endif |
#ifndef __kernel_uid32_t |
typedef unsigned int __kernel_uid32_t; |
typedef unsigned int __kernel_gid32_t; |
#endif |
#ifndef __kernel_old_uid_t |
typedef __kernel_uid_t __kernel_old_uid_t; |
typedef __kernel_gid_t __kernel_old_gid_t; |
#endif |
#ifndef __kernel_old_dev_t |
typedef unsigned int __kernel_old_dev_t; |
#endif |
/* |
* Most 32 bit architectures use "unsigned int" size_t, |
* and all 64 bit architectures use "unsigned long" size_t. |
*/ |
#ifndef __kernel_size_t |
#if __BITS_PER_LONG != 64 |
typedef unsigned int __kernel_size_t; |
typedef int __kernel_ssize_t; |
typedef int __kernel_ptrdiff_t; |
#else |
typedef __kernel_ulong_t __kernel_size_t; |
typedef __kernel_long_t __kernel_ssize_t; |
typedef __kernel_long_t __kernel_ptrdiff_t; |
#endif |
#endif |
#ifndef __kernel_fsid_t |
typedef struct { |
int val[2]; |
} __kernel_fsid_t; |
#endif |
/* |
* anything below here should be completely generic |
*/ |
typedef __kernel_long_t __kernel_off_t; |
typedef long long __kernel_loff_t; |
typedef __kernel_long_t __kernel_time_t; |
typedef __kernel_long_t __kernel_clock_t; |
typedef int __kernel_timer_t; |
typedef int __kernel_clockid_t; |
typedef char * __kernel_caddr_t; |
typedef unsigned short __kernel_uid16_t; |
typedef unsigned short __kernel_gid16_t; |
#endif /* __ASM_GENERIC_POSIX_TYPES_H */ |
/drivers/include/uapi/asm-generic/types.h |
---|
0,0 → 1,8 |
#ifndef _ASM_GENERIC_TYPES_H |
#define _ASM_GENERIC_TYPES_H |
/* |
* int-ll64 is used everywhere now. |
*/ |
#include <asm-generic/int-ll64.h> |
#endif /* _ASM_GENERIC_TYPES_H */ |
/drivers/include/uapi/drm/drm.h |
---|
0,0 → 1,866 |
/** |
* \file drm.h |
* Header for the Direct Rendering Manager |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* |
* \par Acknowledgments: |
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg. |
*/ |
/* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _DRM_H_ |
#define _DRM_H_ |
#if defined(__KERNEL__) || defined(__linux__) |
#include <linux/types.h> |
//#include <asm/ioctl.h> |
typedef unsigned int drm_handle_t; |
#else /* One of the BSDs */ |
#include <sys/ioccom.h> |
#include <sys/types.h> |
typedef int8_t __s8; |
typedef uint8_t __u8; |
typedef int16_t __s16; |
typedef uint16_t __u16; |
typedef int32_t __s32; |
typedef uint32_t __u32; |
typedef int64_t __s64; |
typedef uint64_t __u64; |
typedef unsigned long drm_handle_t; |
#endif |
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ |
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ |
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ |
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ |
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ |
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) |
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) |
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) |
typedef unsigned int drm_context_t; |
typedef unsigned int drm_drawable_t; |
typedef unsigned int drm_magic_t; |
/** |
* Cliprect. |
* |
* \warning: If you change this structure, make sure you change |
* XF86DRIClipRectRec in the server as well |
* |
* \note KW: Actually it's illegal to change either for |
* backwards-compatibility reasons. |
*/ |
struct drm_clip_rect { |
unsigned short x1; |
unsigned short y1; |
unsigned short x2; |
unsigned short y2; |
}; |
/** |
* Drawable information. |
*/ |
struct drm_drawable_info { |
unsigned int num_rects; |
struct drm_clip_rect *rects; |
}; |
/** |
* Texture region, |
*/ |
struct drm_tex_region { |
unsigned char next; |
unsigned char prev; |
unsigned char in_use; |
unsigned char padding; |
unsigned int age; |
}; |
/** |
* Hardware lock. |
* |
* The lock structure is a simple cache-line aligned integer. To avoid |
* processor bus contention on a multiprocessor system, there should not be any |
* other data stored in the same cache line. |
*/ |
struct drm_hw_lock { |
__volatile__ unsigned int lock; /**< lock variable */ |
char padding[60]; /**< Pad to cache line */ |
}; |
/** |
* DRM_IOCTL_VERSION ioctl argument type. |
* |
* \sa drmGetVersion(). |
*/ |
struct drm_version { |
int version_major; /**< Major version */ |
int version_minor; /**< Minor version */ |
int version_patchlevel; /**< Patch level */ |
size_t name_len; /**< Length of name buffer */ |
char __user *name; /**< Name of driver */ |
size_t date_len; /**< Length of date buffer */ |
char __user *date; /**< User-space buffer to hold date */ |
size_t desc_len; /**< Length of desc buffer */ |
char __user *desc; /**< User-space buffer to hold desc */ |
}; |
/** |
* DRM_IOCTL_GET_UNIQUE ioctl argument type. |
* |
* \sa drmGetBusid() and drmSetBusId(). |
*/ |
struct drm_unique { |
size_t unique_len; /**< Length of unique */ |
char __user *unique; /**< Unique name for driver instantiation */ |
}; |
struct drm_list { |
int count; /**< Length of user-space structures */ |
struct drm_version __user *version; |
}; |
struct drm_block { |
int unused; |
}; |
/** |
* DRM_IOCTL_CONTROL ioctl argument type. |
* |
* \sa drmCtlInstHandler() and drmCtlUninstHandler(). |
*/ |
struct drm_control { |
enum { |
DRM_ADD_COMMAND, |
DRM_RM_COMMAND, |
DRM_INST_HANDLER, |
DRM_UNINST_HANDLER |
} func; |
int irq; |
}; |
/** |
* Type of memory to map. |
*/ |
enum drm_map_type { |
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ |
_DRM_REGISTERS = 1, /**< no caching, no core dump */ |
_DRM_SHM = 2, /**< shared, cached */ |
_DRM_AGP = 3, /**< AGP/GART */ |
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ |
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ |
}; |
/** |
* Memory mapping flags. |
*/ |
enum drm_map_flags { |
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ |
_DRM_READ_ONLY = 0x02, |
_DRM_LOCKED = 0x04, /**< shared, cached, locked */ |
_DRM_KERNEL = 0x08, /**< kernel requires access */ |
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ |
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ |
_DRM_REMOVABLE = 0x40, /**< Removable mapping */ |
_DRM_DRIVER = 0x80 /**< Managed by driver */ |
}; |
struct drm_ctx_priv_map { |
unsigned int ctx_id; /**< Context requesting private mapping */ |
void *handle; /**< Handle of map */ |
}; |
/** |
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls |
* argument type. |
* |
* \sa drmAddMap(). |
*/ |
struct drm_map { |
unsigned long offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
/* Private data */ |
}; |
/** |
* DRM_IOCTL_GET_CLIENT ioctl argument type. |
*/ |
struct drm_client { |
int idx; /**< Which client desired? */ |
int auth; /**< Is client authenticated? */ |
unsigned long pid; /**< Process ID */ |
unsigned long uid; /**< User ID */ |
unsigned long magic; /**< Magic */ |
unsigned long iocs; /**< Ioctl count */ |
}; |
enum drm_stat_type { |
_DRM_STAT_LOCK, |
_DRM_STAT_OPENS, |
_DRM_STAT_CLOSES, |
_DRM_STAT_IOCTLS, |
_DRM_STAT_LOCKS, |
_DRM_STAT_UNLOCKS, |
_DRM_STAT_VALUE, /**< Generic value */ |
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ |
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ |
_DRM_STAT_IRQ, /**< IRQ */ |
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */ |
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ |
_DRM_STAT_DMA, /**< DMA */ |
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ |
_DRM_STAT_MISSED /**< Missed DMA opportunity */ |
/* Add to the *END* of the list */ |
}; |
/** |
* DRM_IOCTL_GET_STATS ioctl argument type. |
*/ |
struct drm_stats { |
unsigned long count; |
struct { |
unsigned long value; |
enum drm_stat_type type; |
} data[15]; |
}; |
/** |
* Hardware locking flags. |
*/ |
enum drm_lock_flags { |
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ |
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ |
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ |
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ |
/* These *HALT* flags aren't supported yet |
-- they will be used to support the |
full-screen DGA-like mode. */ |
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ |
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ |
}; |
/** |
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. |
* |
* \sa drmGetLock() and drmUnlock(). |
*/ |
struct drm_lock { |
int context; |
enum drm_lock_flags flags; |
}; |
/** |
* DMA flags |
* |
* \warning |
* These values \e must match xf86drm.h. |
* |
* \sa drm_dma. |
*/ |
enum drm_dma_flags { |
/* Flags for DMA buffer dispatch */ |
_DRM_DMA_BLOCK = 0x01, /**< |
* Block until buffer dispatched. |
* |
* \note The buffer may not yet have |
* been processed by the hardware -- |
* getting a hardware lock with the |
* hardware quiescent will ensure |
* that the buffer has been |
* processed. |
*/ |
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ |
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ |
/* Flags for DMA buffer request */ |
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ |
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ |
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ |
}; |
/** |
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. |
* |
* \sa drmAddBufs(). |
*/ |
struct drm_buf_desc { |
int count; /**< Number of buffers of this size */ |
int size; /**< Size in bytes */ |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
enum { |
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ |
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ |
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ |
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ |
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ |
} flags; |
unsigned long agp_start; /**< |
* Start address of where the AGP buffers are |
* in the AGP aperture |
*/ |
}; |
/** |
* DRM_IOCTL_INFO_BUFS ioctl argument type. |
*/ |
struct drm_buf_info { |
int count; /**< Entries in list */ |
struct drm_buf_desc __user *list; |
}; |
/** |
* DRM_IOCTL_FREE_BUFS ioctl argument type. |
*/ |
struct drm_buf_free { |
int count; |
int __user *list; |
}; |
/** |
* Buffer information |
* |
* \sa drm_buf_map. |
*/ |
struct drm_buf_pub { |
int idx; /**< Index into the master buffer list */ |
int total; /**< Buffer size */ |
int used; /**< Amount of buffer in use (for DMA) */ |
void __user *address; /**< Address of buffer */ |
}; |
/** |
* DRM_IOCTL_MAP_BUFS ioctl argument type. |
*/ |
struct drm_buf_map { |
int count; /**< Length of the buffer list */ |
void __user *virtual; /**< Mmap'd area in user-virtual */ |
struct drm_buf_pub __user *list; /**< Buffer information */ |
}; |
/** |
* DRM_IOCTL_DMA ioctl argument type. |
* |
* Indices here refer to the offset into the buffer list in drm_buf_get. |
* |
* \sa drmDMA(). |
*/ |
struct drm_dma { |
int context; /**< Context handle */ |
int send_count; /**< Number of buffers to send */ |
int __user *send_indices; /**< List of handles to buffers */ |
int __user *send_sizes; /**< Lengths of data to send */ |
enum drm_dma_flags flags; /**< Flags */ |
int request_count; /**< Number of buffers requested */ |
int request_size; /**< Desired size for buffers */ |
int __user *request_indices; /**< Buffer information */ |
int __user *request_sizes; |
int granted_count; /**< Number of buffers granted */ |
}; |
enum drm_ctx_flags { |
_DRM_CONTEXT_PRESERVED = 0x01, |
_DRM_CONTEXT_2DONLY = 0x02 |
}; |
/** |
* DRM_IOCTL_ADD_CTX ioctl argument type. |
* |
* \sa drmCreateContext() and drmDestroyContext(). |
*/ |
struct drm_ctx { |
drm_context_t handle; |
enum drm_ctx_flags flags; |
}; |
/** |
* DRM_IOCTL_RES_CTX ioctl argument type. |
*/ |
struct drm_ctx_res { |
int count; |
struct drm_ctx __user *contexts; |
}; |
/** |
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. |
*/ |
struct drm_draw { |
drm_drawable_t handle; |
}; |
/** |
* DRM_IOCTL_UPDATE_DRAW ioctl argument type. |
*/ |
typedef enum { |
DRM_DRAWABLE_CLIPRECTS, |
} drm_drawable_info_type_t; |
struct drm_update_draw { |
drm_drawable_t handle; |
unsigned int type; |
unsigned int num; |
unsigned long long data; |
}; |
/** |
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. |
*/ |
struct drm_auth { |
drm_magic_t magic; |
}; |
/** |
* DRM_IOCTL_IRQ_BUSID ioctl argument type. |
* |
* \sa drmGetInterruptFromBusID(). |
*/ |
struct drm_irq_busid { |
int irq; /**< IRQ number */ |
int busnum; /**< bus number */ |
int devnum; /**< device number */ |
int funcnum; /**< function number */ |
}; |
enum drm_vblank_seq_type { |
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ |
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ |
/* bits 1-6 are reserved for high crtcs */ |
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, |
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ |
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ |
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ |
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ |
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ |
}; |
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 |
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) |
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ |
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) |
struct drm_wait_vblank_request { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
unsigned long signal; |
}; |
struct drm_wait_vblank_reply { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
long tval_sec; |
long tval_usec; |
}; |
/** |
* DRM_IOCTL_WAIT_VBLANK ioctl argument type. |
* |
* \sa drmWaitVBlank(). |
*/ |
union drm_wait_vblank { |
struct drm_wait_vblank_request request; |
struct drm_wait_vblank_reply reply; |
}; |
#define _DRM_PRE_MODESET 1 |
#define _DRM_POST_MODESET 2 |
/** |
* DRM_IOCTL_MODESET_CTL ioctl argument type |
* |
* \sa drmModesetCtl(). |
*/ |
struct drm_modeset_ctl { |
__u32 crtc; |
__u32 cmd; |
}; |
/** |
* DRM_IOCTL_AGP_ENABLE ioctl argument type. |
* |
* \sa drmAgpEnable(). |
*/ |
struct drm_agp_mode { |
unsigned long mode; /**< AGP mode */ |
}; |
/** |
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. |
* |
* \sa drmAgpAlloc() and drmAgpFree(). |
*/ |
struct drm_agp_buffer { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for binding / unbinding */ |
unsigned long type; /**< Type of memory to allocate */ |
unsigned long physical; /**< Physical used by i810 */ |
}; |
/** |
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. |
* |
* \sa drmAgpBind() and drmAgpUnbind(). |
*/ |
struct drm_agp_binding { |
unsigned long handle; /**< From drm_agp_buffer */ |
unsigned long offset; /**< In bytes -- will round to page boundary */ |
}; |
/** |
* DRM_IOCTL_AGP_INFO ioctl argument type. |
* |
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), |
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), |
* drmAgpVendorId() and drmAgpDeviceId(). |
*/ |
struct drm_agp_info { |
int agp_version_major; |
int agp_version_minor; |
unsigned long mode; |
unsigned long aperture_base; /* physical address */ |
unsigned long aperture_size; /* bytes */ |
unsigned long memory_allowed; /* bytes */ |
unsigned long memory_used; |
/* PCI information */ |
unsigned short id_vendor; |
unsigned short id_device; |
}; |
/** |
* DRM_IOCTL_SG_ALLOC ioctl argument type. |
*/ |
struct drm_scatter_gather { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for mapping / unmapping */ |
}; |
/** |
* DRM_IOCTL_SET_VERSION ioctl argument type. |
*/ |
struct drm_set_version { |
int drm_di_major; |
int drm_di_minor; |
int drm_dd_major; |
int drm_dd_minor; |
}; |
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */ |
struct drm_gem_close { |
/** Handle of the object to be closed. */ |
__u32 handle; |
__u32 pad; |
}; |
/** DRM_IOCTL_GEM_FLINK ioctl argument type */ |
struct drm_gem_flink { |
/** Handle for the object being named */ |
__u32 handle; |
/** Returned global name */ |
__u32 name; |
}; |
/** DRM_IOCTL_GEM_OPEN ioctl argument type */ |
struct drm_gem_open { |
/** Name of object being opened */ |
__u32 name; |
/** Returned handle for the object */ |
__u32 handle; |
/** Returned size of the object */ |
__u64 size; |
}; |
#define DRM_CAP_DUMB_BUFFER 0x1 |
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2 |
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 |
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4 |
#define DRM_CAP_PRIME 0x5 |
#define DRM_PRIME_CAP_IMPORT 0x1 |
#define DRM_PRIME_CAP_EXPORT 0x2 |
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 |
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 |
/* |
* The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight |
* combination for the hardware cursor. The intention is that a hardware |
* agnostic userspace can query a cursor plane size to use. |
* |
* Note that the cross-driver contract is to merely return a valid size; |
* drivers are free to attach another meaning on top, eg. i915 returns the |
* maximum plane size. |
*/ |
#define DRM_CAP_CURSOR_WIDTH 0x8 |
#define DRM_CAP_CURSOR_HEIGHT 0x9 |
/** DRM_IOCTL_GET_CAP ioctl argument type */ |
struct drm_get_cap { |
__u64 capability; |
__u64 value; |
}; |
/** |
* DRM_CLIENT_CAP_STEREO_3D |
* |
* if set to 1, the DRM core will expose the stereo 3D capabilities of the |
* monitor by advertising the supported 3D layouts in the flags of struct |
* drm_mode_modeinfo. |
*/ |
#define DRM_CLIENT_CAP_STEREO_3D 1 |
/** |
* DRM_CLIENT_CAP_UNIVERSAL_PLANES |
* |
* If set to 1, the DRM core will expose all planes (overlay, primary, and |
* cursor) to userspace. |
*/ |
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 |
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ |
struct drm_set_client_cap { |
__u64 capability; |
__u64 value; |
}; |
#define DRM_CLOEXEC O_CLOEXEC |
struct drm_prime_handle { |
__u32 handle; |
/** Flags.. only applicable for handle->fd */ |
__u32 flags; |
/** Returned dmabuf file descriptor */ |
__s32 fd; |
}; |
#include <drm/drm_mode.h> |
#define DRM_IOCTL_BASE 'd' |
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) |
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) |
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) |
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) |
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) |
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) |
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) |
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) |
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) |
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) |
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) |
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) |
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) |
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) |
#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap) |
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) |
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) |
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) |
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) |
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) |
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) |
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) |
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) |
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) |
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) |
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) |
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) |
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) |
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) |
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) |
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) |
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) |
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) |
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) |
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) |
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) |
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) |
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) |
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) |
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) |
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) |
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) |
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) |
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) |
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) |
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) |
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) |
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) |
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) |
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) |
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) |
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) |
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) |
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) |
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) |
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) |
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) |
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) |
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) |
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) |
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) |
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) |
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) |
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) |
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) |
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) |
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) |
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) |
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) |
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) |
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) |
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) |
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) |
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) |
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) |
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) |
/** |
* Device specific ioctls should only be in their respective headers |
* The device specific ioctl range is from 0x40 to 0x9f. |
* Generic IOCTLS restart at 0xA0. |
* |
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and |
* drmCommandReadWrite(). |
*/ |
#define DRM_COMMAND_BASE 0x40 |
#define DRM_COMMAND_END 0xA0 |
/** |
* Header for events written back to userspace on the drm fd. The |
* type defines the type of event, the length specifies the total |
* length of the event (including the header), and user_data is |
* typically a 64 bit value passed with the ioctl that triggered the |
* event. A read on the drm fd will always only return complete |
* events, that is, if for example the read buffer is 100 bytes, and |
* there are two 64 byte events pending, only one will be returned. |
* |
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and |
* up are chipset specific. |
*/ |
struct drm_event { |
__u32 type; |
__u32 length; |
}; |
#define DRM_EVENT_VBLANK 0x01 |
#define DRM_EVENT_FLIP_COMPLETE 0x02 |
struct drm_event_vblank { |
struct drm_event base; |
__u64 user_data; |
__u32 tv_sec; |
__u32 tv_usec; |
__u32 sequence; |
__u32 reserved; |
}; |
/* typedef area */ |
#ifndef __KERNEL__ |
typedef struct drm_clip_rect drm_clip_rect_t; |
typedef struct drm_drawable_info drm_drawable_info_t; |
typedef struct drm_tex_region drm_tex_region_t; |
typedef struct drm_hw_lock drm_hw_lock_t; |
typedef struct drm_version drm_version_t; |
typedef struct drm_unique drm_unique_t; |
typedef struct drm_list drm_list_t; |
typedef struct drm_block drm_block_t; |
typedef struct drm_control drm_control_t; |
typedef enum drm_map_type drm_map_type_t; |
typedef enum drm_map_flags drm_map_flags_t; |
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; |
typedef struct drm_map drm_map_t; |
typedef struct drm_client drm_client_t; |
typedef enum drm_stat_type drm_stat_type_t; |
typedef struct drm_stats drm_stats_t; |
typedef enum drm_lock_flags drm_lock_flags_t; |
typedef struct drm_lock drm_lock_t; |
typedef enum drm_dma_flags drm_dma_flags_t; |
typedef struct drm_buf_desc drm_buf_desc_t; |
typedef struct drm_buf_info drm_buf_info_t; |
typedef struct drm_buf_free drm_buf_free_t; |
typedef struct drm_buf_pub drm_buf_pub_t; |
typedef struct drm_buf_map drm_buf_map_t; |
typedef struct drm_dma drm_dma_t; |
typedef union drm_wait_vblank drm_wait_vblank_t; |
typedef struct drm_agp_mode drm_agp_mode_t; |
typedef enum drm_ctx_flags drm_ctx_flags_t; |
typedef struct drm_ctx drm_ctx_t; |
typedef struct drm_ctx_res drm_ctx_res_t; |
typedef struct drm_draw drm_draw_t; |
typedef struct drm_update_draw drm_update_draw_t; |
typedef struct drm_auth drm_auth_t; |
typedef struct drm_irq_busid drm_irq_busid_t; |
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; |
typedef struct drm_agp_buffer drm_agp_buffer_t; |
typedef struct drm_agp_binding drm_agp_binding_t; |
typedef struct drm_agp_info drm_agp_info_t; |
typedef struct drm_scatter_gather drm_scatter_gather_t; |
typedef struct drm_set_version drm_set_version_t; |
#endif |
#endif |
/drivers/include/uapi/drm/drm_fourcc.h |
---|
0,0 → 1,135 |
/* |
* Copyright 2011 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef DRM_FOURCC_H |
#define DRM_FOURCC_H |
#include <linux/types.h> |
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ |
((__u32)(c) << 16) | ((__u32)(d) << 24)) |
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */ |
/* color index */ |
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ |
/* 8 bpp RGB */ |
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ |
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ |
/* 16 bpp RGB */ |
#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */ |
#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */ |
#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */ |
#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */ |
#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */ |
#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */ |
#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */ |
#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */ |
#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */ |
#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */ |
#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */ |
#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */ |
#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */ |
#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */ |
#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */ |
#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */ |
#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */ |
#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */ |
/* 24 bpp RGB */ |
#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */ |
#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */ |
/* 32 bpp RGB */ |
#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */ |
#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */ |
#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */ |
#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */ |
#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */ |
#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */ |
#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */ |
#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */ |
#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */ |
#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */ |
#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */ |
#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */ |
#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */ |
#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */ |
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */ |
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */ |
/* packed YCbCr */ |
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */ |
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */ |
#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */ |
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */ |
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */ |
/* |
* 2 plane YCbCr |
* index 0 = Y plane, [7:0] Y |
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian |
* or |
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian |
*/ |
#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */ |
/* special NV12 tiled format */ |
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */ |
/* |
* 3 plane YCbCr |
* index 0: Y plane, [7:0] Y |
* index 1: Cb plane, [7:0] Cb |
* index 2: Cr plane, [7:0] Cr |
* or |
* index 1: Cr plane, [7:0] Cr |
* index 2: Cb plane, [7:0] Cb |
*/ |
#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ |
#endif /* DRM_FOURCC_H */ |
/drivers/include/uapi/drm/drm_mode.h |
---|
0,0 → 1,522 |
/* |
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie> |
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com> |
* Copyright (c) 2008 Red Hat Inc. |
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA |
* Copyright (c) 2007-2008 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
*/ |
#ifndef _DRM_MODE_H |
#define _DRM_MODE_H |
#include <linux/types.h> |
#define DRM_DISPLAY_INFO_LEN 32 |
#define DRM_CONNECTOR_NAME_LEN 32 |
#define DRM_DISPLAY_MODE_LEN 32 |
#define DRM_PROP_NAME_LEN 32 |
#define DRM_MODE_TYPE_BUILTIN (1<<0) |
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_PREFERRED (1<<3) |
#define DRM_MODE_TYPE_DEFAULT (1<<4) |
#define DRM_MODE_TYPE_USERDEF (1<<5) |
#define DRM_MODE_TYPE_DRIVER (1<<6) |
/* Video mode flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_FLAG_PHSYNC (1<<0) |
#define DRM_MODE_FLAG_NHSYNC (1<<1) |
#define DRM_MODE_FLAG_PVSYNC (1<<2) |
#define DRM_MODE_FLAG_NVSYNC (1<<3) |
#define DRM_MODE_FLAG_INTERLACE (1<<4) |
#define DRM_MODE_FLAG_DBLSCAN (1<<5) |
#define DRM_MODE_FLAG_CSYNC (1<<6) |
#define DRM_MODE_FLAG_PCSYNC (1<<7) |
#define DRM_MODE_FLAG_NCSYNC (1<<8) |
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ |
#define DRM_MODE_FLAG_BCAST (1<<10) |
#define DRM_MODE_FLAG_PIXMUX (1<<11) |
#define DRM_MODE_FLAG_DBLCLK (1<<12) |
#define DRM_MODE_FLAG_CLKDIV2 (1<<13) |
/* |
* When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX |
* (define not exposed to user space). |
*/ |
#define DRM_MODE_FLAG_3D_MASK (0x1f<<14) |
#define DRM_MODE_FLAG_3D_NONE (0<<14) |
#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) |
#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14) |
#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14) |
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14) |
#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14) |
#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14) |
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14) |
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14) |
/* DPMS flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_DPMS_ON 0 |
#define DRM_MODE_DPMS_STANDBY 1 |
#define DRM_MODE_DPMS_SUSPEND 2 |
#define DRM_MODE_DPMS_OFF 3 |
/* Scaling mode options */ |
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or |
software can still scale) */ |
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */ |
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ |
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ |
/* Picture aspect ratio options */ |
#define DRM_MODE_PICTURE_ASPECT_NONE 0 |
#define DRM_MODE_PICTURE_ASPECT_4_3 1 |
#define DRM_MODE_PICTURE_ASPECT_16_9 2 |
/* Dithering mode options */ |
#define DRM_MODE_DITHERING_OFF 0 |
#define DRM_MODE_DITHERING_ON 1 |
#define DRM_MODE_DITHERING_AUTO 2 |
/* Dirty info options */ |
#define DRM_MODE_DIRTY_OFF 0 |
#define DRM_MODE_DIRTY_ON 1 |
#define DRM_MODE_DIRTY_ANNOTATE 2 |
struct drm_mode_modeinfo { |
__u32 clock; |
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew; |
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; |
__u32 vrefresh; |
__u32 flags; |
__u32 type; |
char name[DRM_DISPLAY_MODE_LEN]; |
}; |
struct drm_mode_card_res { |
__u64 fb_id_ptr; |
__u64 crtc_id_ptr; |
__u64 connector_id_ptr; |
__u64 encoder_id_ptr; |
__u32 count_fbs; |
__u32 count_crtcs; |
__u32 count_connectors; |
__u32 count_encoders; |
__u32 min_width, max_width; |
__u32 min_height, max_height; |
}; |
struct drm_mode_crtc { |
__u64 set_connectors_ptr; |
__u32 count_connectors; |
__u32 crtc_id; /**< Id */ |
__u32 fb_id; /**< Id of framebuffer */ |
__u32 x, y; /**< Position on the frameuffer */ |
__u32 gamma_size; |
__u32 mode_valid; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_PRESENT_TOP_FIELD (1<<0) |
#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1) |
/* Planes blend with or override other bits on the CRTC */ |
struct drm_mode_set_plane { |
__u32 plane_id; |
__u32 crtc_id; |
__u32 fb_id; /* fb object contains surface format type */ |
__u32 flags; /* see above flags */ |
/* Signed dest location allows it to be partially off screen */ |
__s32 crtc_x, crtc_y; |
__u32 crtc_w, crtc_h; |
/* Source values are 16.16 fixed point */ |
__u32 src_x, src_y; |
__u32 src_h, src_w; |
}; |
struct drm_mode_get_plane { |
__u32 plane_id; |
__u32 crtc_id; |
__u32 fb_id; |
__u32 possible_crtcs; |
__u32 gamma_size; |
__u32 count_format_types; |
__u64 format_type_ptr; |
}; |
struct drm_mode_get_plane_res { |
__u64 plane_id_ptr; |
__u32 count_planes; |
}; |
#define DRM_MODE_ENCODER_NONE 0 |
#define DRM_MODE_ENCODER_DAC 1 |
#define DRM_MODE_ENCODER_TMDS 2 |
#define DRM_MODE_ENCODER_LVDS 3 |
#define DRM_MODE_ENCODER_TVDAC 4 |
#define DRM_MODE_ENCODER_VIRTUAL 5 |
#define DRM_MODE_ENCODER_DSI 6 |
#define DRM_MODE_ENCODER_DPMST 7 |
struct drm_mode_get_encoder { |
__u32 encoder_id; |
__u32 encoder_type; |
__u32 crtc_id; /**< Id of crtc */ |
__u32 possible_crtcs; |
__u32 possible_clones; |
}; |
/* This is for connectors with multiple signal types. */ |
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ |
#define DRM_MODE_SUBCONNECTOR_Automatic 0 |
#define DRM_MODE_SUBCONNECTOR_Unknown 0 |
#define DRM_MODE_SUBCONNECTOR_DVID 3 |
#define DRM_MODE_SUBCONNECTOR_DVIA 4 |
#define DRM_MODE_SUBCONNECTOR_Composite 5 |
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6 |
#define DRM_MODE_SUBCONNECTOR_Component 8 |
#define DRM_MODE_SUBCONNECTOR_SCART 9 |
#define DRM_MODE_CONNECTOR_Unknown 0 |
#define DRM_MODE_CONNECTOR_VGA 1 |
#define DRM_MODE_CONNECTOR_DVII 2 |
#define DRM_MODE_CONNECTOR_DVID 3 |
#define DRM_MODE_CONNECTOR_DVIA 4 |
#define DRM_MODE_CONNECTOR_Composite 5 |
#define DRM_MODE_CONNECTOR_SVIDEO 6 |
#define DRM_MODE_CONNECTOR_LVDS 7 |
#define DRM_MODE_CONNECTOR_Component 8 |
#define DRM_MODE_CONNECTOR_9PinDIN 9 |
#define DRM_MODE_CONNECTOR_DisplayPort 10 |
#define DRM_MODE_CONNECTOR_HDMIA 11 |
#define DRM_MODE_CONNECTOR_HDMIB 12 |
#define DRM_MODE_CONNECTOR_TV 13 |
#define DRM_MODE_CONNECTOR_eDP 14 |
#define DRM_MODE_CONNECTOR_VIRTUAL 15 |
#define DRM_MODE_CONNECTOR_DSI 16 |
struct drm_mode_get_connector { |
__u64 encoders_ptr; |
__u64 modes_ptr; |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_modes; |
__u32 count_props; |
__u32 count_encoders; |
__u32 encoder_id; /**< Current Encoder */ |
__u32 connector_id; /**< Id */ |
__u32 connector_type; |
__u32 connector_type_id; |
__u32 connection; |
__u32 mm_width, mm_height; /**< HxW in millimeters */ |
__u32 subpixel; |
__u32 pad; |
}; |
#define DRM_MODE_PROP_PENDING (1<<0) |
#define DRM_MODE_PROP_RANGE (1<<1) |
#define DRM_MODE_PROP_IMMUTABLE (1<<2) |
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ |
#define DRM_MODE_PROP_BLOB (1<<4) |
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ |
/* non-extended types: legacy bitmask, one bit per type: */ |
#define DRM_MODE_PROP_LEGACY_TYPE ( \ |
DRM_MODE_PROP_RANGE | \ |
DRM_MODE_PROP_ENUM | \ |
DRM_MODE_PROP_BLOB | \ |
DRM_MODE_PROP_BITMASK) |
/* extended-types: rather than continue to consume a bit per type, |
* grab a chunk of the bits to use as integer type id. |
*/ |
#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0 |
#define DRM_MODE_PROP_TYPE(n) ((n) << 6) |
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1) |
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2) |
struct drm_mode_property_enum { |
__u64 value; |
char name[DRM_PROP_NAME_LEN]; |
}; |
struct drm_mode_get_property { |
__u64 values_ptr; /* values and blob lengths */ |
__u64 enum_blob_ptr; /* enum and blob id ptrs */ |
__u32 prop_id; |
__u32 flags; |
char name[DRM_PROP_NAME_LEN]; |
__u32 count_values; |
/* This is only used to count enum values, not blobs. The _blobs is |
* simply because of a historical reason, i.e. backwards compat. */ |
__u32 count_enum_blobs; |
}; |
struct drm_mode_connector_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 connector_id; |
}; |
struct drm_mode_obj_get_properties { |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_props; |
__u32 obj_id; |
__u32 obj_type; |
}; |
struct drm_mode_obj_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 obj_id; |
__u32 obj_type; |
}; |
struct drm_mode_get_blob { |
__u32 blob_id; |
__u32 length; |
__u64 data; |
}; |
struct drm_mode_fb_cmd { |
__u32 fb_id; |
__u32 width, height; |
__u32 pitch; |
__u32 bpp; |
__u32 depth; |
/* driver specific handle */ |
__u32 handle; |
}; |
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ |
struct drm_mode_fb_cmd2 { |
__u32 fb_id; |
__u32 width, height; |
__u32 pixel_format; /* fourcc code from drm_fourcc.h */ |
__u32 flags; /* see above flags */ |
/* |
* In case of planar formats, this ioctl allows up to 4 |
* buffer objects with offets and pitches per plane. |
* The pitch and offset order is dictated by the fourcc, |
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as: |
* |
* YUV 4:2:0 image with a plane of 8 bit Y samples |
* followed by an interleaved U/V plane containing |
* 8 bit 2x2 subsampled colour difference samples. |
* |
* So it would consist of Y as offset[0] and UV as |
* offeset[1]. Note that offset[0] will generally |
* be 0. |
*/ |
__u32 handles[4]; |
__u32 pitches[4]; /* pitch for each plane */ |
__u32 offsets[4]; /* offset of each plane */ |
}; |
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 |
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 |
#define DRM_MODE_FB_DIRTY_FLAGS 0x03 |
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 |
/* |
* Mark a region of a framebuffer as dirty. |
* |
* Some hardware does not automatically update display contents |
* as a hardware or software draw to a framebuffer. This ioctl |
* allows userspace to tell the kernel and the hardware what |
* regions of the framebuffer have changed. |
* |
* The kernel or hardware is free to update more then just the |
* region specified by the clip rects. The kernel or hardware |
* may also delay and/or coalesce several calls to dirty into a |
* single update. |
* |
* Userspace may annotate the updates, the annotates are a |
* promise made by the caller that the change is either a copy |
* of pixels or a fill of a single color in the region specified. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then |
* the number of updated regions are half of num_clips given, |
* where the clip rects are paired in src and dst. The width and |
* height of each one of the pairs must match. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller |
* promises that the region specified of the clip rects is filled |
* completely with a single color as given in the color argument. |
*/ |
struct drm_mode_fb_dirty_cmd { |
__u32 fb_id; |
__u32 flags; |
__u32 color; |
__u32 num_clips; |
__u64 clips_ptr; |
}; |
struct drm_mode_mode_cmd { |
__u32 connector_id; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_CURSOR_BO 0x01 |
#define DRM_MODE_CURSOR_MOVE 0x02 |
#define DRM_MODE_CURSOR_FLAGS 0x03 |
/* |
* depending on the value in flags different members are used. |
* |
* CURSOR_BO uses |
* crtc_id |
* width |
* height |
* handle - if 0 turns the cursor off |
* |
* CURSOR_MOVE uses |
* crtc_id |
* x |
* y |
*/ |
struct drm_mode_cursor { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
}; |
struct drm_mode_cursor2 { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
__s32 hot_x; |
__s32 hot_y; |
}; |
struct drm_mode_crtc_lut { |
__u32 crtc_id; |
__u32 gamma_size; |
/* pointers to arrays */ |
__u64 red; |
__u64 green; |
__u64 blue; |
}; |
#define DRM_MODE_PAGE_FLIP_EVENT 0x01 |
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02 |
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC) |
/* |
* Request a page flip on the specified crtc. |
* |
* This ioctl will ask KMS to schedule a page flip for the specified |
* crtc. Once any pending rendering targeting the specified fb (as of |
* ioctl time) has completed, the crtc will be reprogrammed to display |
* that fb after the next vertical refresh. The ioctl returns |
* immediately, but subsequent rendering to the current fb will block |
* in the execbuffer ioctl until the page flip happens. If a page |
* flip is already pending as the ioctl is called, EBUSY will be |
* returned. |
* |
* Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank |
* event (see drm.h: struct drm_event_vblank) when the page flip is |
* done. The user_data field passed in with this ioctl will be |
* returned as the user_data field in the vblank event struct. |
* |
* Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen |
* 'as soon as possible', meaning that it not delay waiting for vblank. |
* This may cause tearing on the screen. |
* |
* The reserved field must be zero until we figure out something |
* clever to use it for. |
*/ |
struct drm_mode_crtc_page_flip { |
__u32 crtc_id; |
__u32 fb_id; |
__u32 flags; |
__u32 reserved; |
__u64 user_data; |
}; |
/* create a dumb scanout buffer */ |
struct drm_mode_create_dumb { |
uint32_t height; |
uint32_t width; |
uint32_t bpp; |
uint32_t flags; |
/* handle, pitch, size will be returned */ |
uint32_t handle; |
uint32_t pitch; |
uint64_t size; |
}; |
/* set up for mmap of a dumb scanout buffer */ |
struct drm_mode_map_dumb { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** |
* Fake offset to use for subsequent mmap call |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 offset; |
}; |
struct drm_mode_destroy_dumb { |
uint32_t handle; |
}; |
#endif |
/drivers/include/uapi/drm/drm_sarea.h |
---|
0,0 → 1,86 |
/** |
* \file drm_sarea.h |
* \brief SAREA definitions |
* |
* \author Michel Dänzer <michel@daenzer.net> |
*/ |
/* |
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _DRM_SAREA_H_ |
#define _DRM_SAREA_H_ |
#include <drm/drm.h> |
/* SAREA area needs to be at least a page */ |
#if defined(__alpha__) |
#define SAREA_MAX 0x2000U |
#elif defined(__mips__) |
#define SAREA_MAX 0x4000U |
#elif defined(__ia64__) |
#define SAREA_MAX 0x10000U /* 64kB */ |
#else |
/* Intel 830M driver needs at least 8k SAREA */ |
#define SAREA_MAX 0x2000U |
#endif |
/** Maximum number of drawables in the SAREA */ |
#define SAREA_MAX_DRAWABLES 256 |
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000 |
/** SAREA drawable */ |
struct drm_sarea_drawable { |
unsigned int stamp; |
unsigned int flags; |
}; |
/** SAREA frame */ |
struct drm_sarea_frame { |
unsigned int x; |
unsigned int y; |
unsigned int width; |
unsigned int height; |
unsigned int fullscreen; |
}; |
/** SAREA */ |
struct drm_sarea { |
/** first thing is always the DRM locking structure */ |
struct drm_hw_lock lock; |
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */ |
struct drm_hw_lock drawable_lock; |
struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */ |
struct drm_sarea_frame frame; /**< frame */ |
drm_context_t dummy_context; |
}; |
#ifndef __KERNEL__ |
typedef struct drm_sarea_drawable drm_sarea_drawable_t; |
typedef struct drm_sarea_frame drm_sarea_frame_t; |
typedef struct drm_sarea drm_sarea_t; |
#endif |
#endif /* _DRM_SAREA_H_ */ |
/drivers/include/uapi/drm/i915_drm.h |
---|
0,0 → 1,1106 |
/* |
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
*/ |
#ifndef _UAPI_I915_DRM_H_ |
#define _UAPI_I915_DRM_H_ |
#include <drm/drm.h> |
/* Please note that modifications to all structs defined here are |
* subject to backwards-compatibility constraints. |
*/ |
/** |
* DOC: uevents generated by i915 on it's device node |
* |
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch |
* event from the gpu l3 cache. Additional information supplied is ROW, |
* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep |
* track of these events and if a specific cache-line seems to have a |
* persistent error remap it with the l3 remapping tool supplied in |
* intel-gpu-tools. The value supplied with the event is always 1. |
* |
* I915_ERROR_UEVENT - Generated upon error detection, currently only via |
* hangcheck. The error detection event is a good indicator of when things |
* began to go badly. The value supplied with the event is a 1 upon error |
* detection, and a 0 upon reset completion, signifying no more error |
* exists. NOTE: Disabling hangcheck or reset via module parameter will |
* cause the related events to not be seen. |
* |
* I915_RESET_UEVENT - Event is generated just before an attempt to reset the |
* the GPU. The value supplied with the event is always 1. NOTE: Disable |
* reset via module parameter will cause this event to not be seen. |
*/ |
#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" |
#define I915_ERROR_UEVENT "ERROR" |
#define I915_RESET_UEVENT "RESET" |
/* Each region is a minimum of 16k, and there are at most 255 of them. |
*/ |
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use |
* of chars for next/prev indices */ |
#define I915_LOG_MIN_TEX_REGION_SIZE 14 |
typedef struct _drm_i915_init { |
enum { |
I915_INIT_DMA = 0x01, |
I915_CLEANUP_DMA = 0x02, |
I915_RESUME_DMA = 0x03 |
} func; |
unsigned int mmio_offset; |
int sarea_priv_offset; |
unsigned int ring_start; |
unsigned int ring_end; |
unsigned int ring_size; |
unsigned int front_offset; |
unsigned int back_offset; |
unsigned int depth_offset; |
unsigned int w; |
unsigned int h; |
unsigned int pitch; |
unsigned int pitch_bits; |
unsigned int back_pitch; |
unsigned int depth_pitch; |
unsigned int cpp; |
unsigned int chipset; |
} drm_i915_init_t; |
typedef struct _drm_i915_sarea { |
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; |
int last_upload; /* last time texture was uploaded */ |
int last_enqueue; /* last time a buffer was enqueued */ |
int last_dispatch; /* age of the most recently dispatched buffer */ |
int ctxOwner; /* last context to upload state */ |
int texAge; |
int pf_enabled; /* is pageflipping allowed? */ |
int pf_active; |
int pf_current_page; /* which buffer is being displayed? */ |
int perf_boxes; /* performance boxes to be displayed */ |
int width, height; /* screen size in pixels */ |
drm_handle_t front_handle; |
int front_offset; |
int front_size; |
drm_handle_t back_handle; |
int back_offset; |
int back_size; |
drm_handle_t depth_handle; |
int depth_offset; |
int depth_size; |
drm_handle_t tex_handle; |
int tex_offset; |
int tex_size; |
int log_tex_granularity; |
int pitch; |
int rotation; /* 0, 90, 180 or 270 */ |
int rotated_offset; |
int rotated_size; |
int rotated_pitch; |
int virtualX, virtualY; |
unsigned int front_tiled; |
unsigned int back_tiled; |
unsigned int depth_tiled; |
unsigned int rotated_tiled; |
unsigned int rotated2_tiled; |
int pipeA_x; |
int pipeA_y; |
int pipeA_w; |
int pipeA_h; |
int pipeB_x; |
int pipeB_y; |
int pipeB_w; |
int pipeB_h; |
/* fill out some space for old userspace triple buffer */ |
drm_handle_t unused_handle; |
__u32 unused1, unused2, unused3; |
/* buffer object handles for static buffers. May change |
* over the lifetime of the client. |
*/ |
__u32 front_bo_handle; |
__u32 back_bo_handle; |
__u32 unused_bo_handle; |
__u32 depth_bo_handle; |
} drm_i915_sarea_t; |
/* due to userspace building against these headers we need some compat here */ |
#define planeA_x pipeA_x |
#define planeA_y pipeA_y |
#define planeA_w pipeA_w |
#define planeA_h pipeA_h |
#define planeB_x pipeB_x |
#define planeB_y pipeB_y |
#define planeB_w pipeB_w |
#define planeB_h pipeB_h |
/* Flags for perf_boxes |
*/ |
#define I915_BOX_RING_EMPTY 0x1 |
#define I915_BOX_FLIP 0x2 |
#define I915_BOX_WAIT 0x4 |
#define I915_BOX_TEXTURE_LOAD 0x8 |
#define I915_BOX_LOST_CONTEXT 0x10 |
/* I915 specific ioctls |
* The device specific ioctl range is 0x40 to 0x79. |
*/ |
#define DRM_I915_INIT 0x00 |
#define DRM_I915_FLUSH 0x01 |
#define DRM_I915_FLIP 0x02 |
#define DRM_I915_BATCHBUFFER 0x03 |
#define DRM_I915_IRQ_EMIT 0x04 |
#define DRM_I915_IRQ_WAIT 0x05 |
#define DRM_I915_GETPARAM 0x06 |
#define DRM_I915_SETPARAM 0x07 |
#define DRM_I915_ALLOC 0x08 |
#define DRM_I915_FREE 0x09 |
#define DRM_I915_INIT_HEAP 0x0a |
#define DRM_I915_CMDBUFFER 0x0b |
#define DRM_I915_DESTROY_HEAP 0x0c |
#define DRM_I915_SET_VBLANK_PIPE 0x0d |
#define DRM_I915_GET_VBLANK_PIPE 0x0e |
#define DRM_I915_VBLANK_SWAP 0x0f |
#define DRM_I915_HWS_ADDR 0x11 |
#define DRM_I915_GEM_INIT 0x13 |
#define DRM_I915_GEM_EXECBUFFER 0x14 |
#define DRM_I915_GEM_PIN 0x15 |
#define DRM_I915_GEM_UNPIN 0x16 |
#define DRM_I915_GEM_BUSY 0x17 |
#define DRM_I915_GEM_THROTTLE 0x18 |
#define DRM_I915_GEM_ENTERVT 0x19 |
#define DRM_I915_GEM_LEAVEVT 0x1a |
#define DRM_I915_GEM_CREATE 0x1b |
#define DRM_I915_GEM_PREAD 0x1c |
#define DRM_I915_GEM_PWRITE 0x1d |
#define DRM_I915_GEM_MMAP 0x1e |
#define DRM_I915_GEM_SET_DOMAIN 0x1f |
#define DRM_I915_GEM_SW_FINISH 0x20 |
#define DRM_I915_GEM_SET_TILING 0x21 |
#define DRM_I915_GEM_GET_TILING 0x22 |
#define DRM_I915_GEM_GET_APERTURE 0x23 |
#define DRM_I915_GEM_MMAP_GTT 0x24 |
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 |
#define DRM_I915_GEM_MADVISE 0x26 |
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 |
#define DRM_I915_OVERLAY_ATTRS 0x28 |
#define DRM_I915_GEM_EXECBUFFER2 0x29 |
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a |
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b |
#define DRM_I915_GEM_WAIT 0x2c |
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d |
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e |
#define DRM_I915_GEM_SET_CACHING 0x2f |
#define DRM_I915_GEM_GET_CACHING 0x30 |
#define DRM_I915_REG_READ 0x31 |
#define DRM_I915_GET_RESET_STATS 0x32 |
#define DRM_I915_GEM_USERPTR 0x33 |
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) |
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) |
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) |
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) |
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) |
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) |
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) |
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) |
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) |
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) |
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) |
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) |
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) |
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) |
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) |
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) |
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) |
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) |
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) |
#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) |
#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) |
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) |
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) |
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) |
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) |
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) |
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) |
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) |
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) |
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) |
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) |
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) |
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) |
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) |
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) |
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) |
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) |
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) |
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) |
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) |
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) |
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) |
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) |
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) |
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) |
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) |
/* Allow drivers to submit batchbuffers directly to hardware, relying |
* on the security mechanisms provided by hardware. |
*/ |
typedef struct drm_i915_batchbuffer { |
int start; /* agp offset */ |
int used; /* nr bytes in use */ |
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ |
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ |
int num_cliprects; /* mulitpass with multiple cliprects? */ |
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ |
} drm_i915_batchbuffer_t; |
/* As above, but pass a pointer to userspace buffer which can be |
* validated by the kernel prior to sending to hardware. |
*/ |
typedef struct _drm_i915_cmdbuffer { |
char __user *buf; /* pointer to userspace command buffer */ |
int sz; /* nr bytes in buf */ |
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ |
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ |
int num_cliprects; /* mulitpass with multiple cliprects? */ |
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ |
} drm_i915_cmdbuffer_t; |
/* Userspace can request & wait on irq's: |
*/ |
typedef struct drm_i915_irq_emit { |
int __user *irq_seq; |
} drm_i915_irq_emit_t; |
typedef struct drm_i915_irq_wait { |
int irq_seq; |
} drm_i915_irq_wait_t; |
/* Ioctl to query kernel params: |
*/ |
#define I915_PARAM_IRQ_ACTIVE 1 |
#define I915_PARAM_ALLOW_BATCHBUFFER 2 |
#define I915_PARAM_LAST_DISPATCH 3 |
#define I915_PARAM_CHIPSET_ID 4 |
#define I915_PARAM_HAS_GEM 5 |
#define I915_PARAM_NUM_FENCES_AVAIL 6 |
#define I915_PARAM_HAS_OVERLAY 7 |
#define I915_PARAM_HAS_PAGEFLIPPING 8 |
#define I915_PARAM_HAS_EXECBUF2 9 |
#define I915_PARAM_HAS_BSD 10 |
#define I915_PARAM_HAS_BLT 11 |
#define I915_PARAM_HAS_RELAXED_FENCING 12 |
#define I915_PARAM_HAS_COHERENT_RINGS 13 |
#define I915_PARAM_HAS_EXEC_CONSTANTS 14 |
#define I915_PARAM_HAS_RELAXED_DELTA 15 |
#define I915_PARAM_HAS_GEN7_SOL_RESET 16 |
#define I915_PARAM_HAS_LLC 17 |
#define I915_PARAM_HAS_ALIASING_PPGTT 18 |
#define I915_PARAM_HAS_WAIT_TIMEOUT 19 |
#define I915_PARAM_HAS_SEMAPHORES 20 |
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 |
#define I915_PARAM_HAS_VEBOX 22 |
#define I915_PARAM_HAS_SECURE_BATCHES 23 |
#define I915_PARAM_HAS_PINNED_BATCHES 24 |
#define I915_PARAM_HAS_EXEC_NO_RELOC 25 |
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 |
#define I915_PARAM_HAS_WT 27 |
#define I915_PARAM_CMD_PARSER_VERSION 28 |
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 |
typedef struct drm_i915_getparam { |
int param; |
int __user *value; |
} drm_i915_getparam_t; |
/* Ioctl to set kernel params: |
*/ |
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 |
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 |
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 |
#define I915_SETPARAM_NUM_USED_FENCES 4 |
typedef struct drm_i915_setparam { |
int param; |
int value; |
} drm_i915_setparam_t; |
/* A memory manager for regions of shared memory: |
*/ |
#define I915_MEM_REGION_AGP 1 |
typedef struct drm_i915_mem_alloc { |
int region; |
int alignment; |
int size; |
int __user *region_offset; /* offset from start of fb or agp */ |
} drm_i915_mem_alloc_t; |
typedef struct drm_i915_mem_free { |
int region; |
int region_offset; |
} drm_i915_mem_free_t; |
typedef struct drm_i915_mem_init_heap { |
int region; |
int size; |
int start; |
} drm_i915_mem_init_heap_t; |
/* Allow memory manager to be torn down and re-initialized (eg on |
* rotate): |
*/ |
typedef struct drm_i915_mem_destroy_heap { |
int region; |
} drm_i915_mem_destroy_heap_t; |
/* Allow X server to configure which pipes to monitor for vblank signals |
*/ |
#define DRM_I915_VBLANK_PIPE_A 1 |
#define DRM_I915_VBLANK_PIPE_B 2 |
typedef struct drm_i915_vblank_pipe { |
int pipe; |
} drm_i915_vblank_pipe_t; |
/* Schedule buffer swap at given vertical blank: |
*/ |
typedef struct drm_i915_vblank_swap { |
drm_drawable_t drawable; |
enum drm_vblank_seq_type seqtype; |
unsigned int sequence; |
} drm_i915_vblank_swap_t; |
typedef struct drm_i915_hws_addr { |
__u64 addr; |
} drm_i915_hws_addr_t; |
struct drm_i915_gem_init { |
/** |
* Beginning offset in the GTT to be managed by the DRM memory |
* manager. |
*/ |
__u64 gtt_start; |
/** |
* Ending offset in the GTT to be managed by the DRM memory |
* manager. |
*/ |
__u64 gtt_end; |
}; |
struct drm_i915_gem_create { |
/** |
* Requested size for the object. |
* |
* The (page-aligned) allocated size for the object will be returned. |
*/ |
__u64 size; |
/** |
* Returned handle for the object. |
* |
* Object handles are nonzero. |
*/ |
__u32 handle; |
__u32 pad; |
}; |
struct drm_i915_gem_pread { |
/** Handle for the object being read. */ |
__u32 handle; |
__u32 pad; |
/** Offset into the object to read from */ |
__u64 offset; |
/** Length of data to read */ |
__u64 size; |
/** |
* Pointer to write the data into. |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 data_ptr; |
}; |
struct drm_i915_gem_pwrite { |
/** Handle for the object being written to. */ |
__u32 handle; |
__u32 pad; |
/** Offset into the object to write to */ |
__u64 offset; |
/** Length of data to write */ |
__u64 size; |
/** |
* Pointer to read the data from. |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 data_ptr; |
}; |
struct drm_i915_gem_mmap { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** Offset in the object to map. */ |
__u64 offset; |
/** |
* Length of data to map. |
* |
* The value will be page-aligned. |
*/ |
__u64 size; |
/** |
* Returned pointer the data was mapped at. |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 addr_ptr; |
}; |
struct drm_i915_gem_mmap_gtt { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** |
* Fake offset to use for subsequent mmap call |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 offset; |
}; |
struct drm_i915_gem_set_domain { |
/** Handle for the object */ |
__u32 handle; |
/** New read domains */ |
__u32 read_domains; |
/** New write domain */ |
__u32 write_domain; |
}; |
struct drm_i915_gem_sw_finish { |
/** Handle for the object */ |
__u32 handle; |
}; |
struct drm_i915_gem_relocation_entry { |
/** |
* Handle of the buffer being pointed to by this relocation entry. |
* |
* It's appealing to make this be an index into the mm_validate_entry |
* list to refer to the buffer, but this allows the driver to create |
* a relocation list for state buffers and not re-write it per |
* exec using the buffer. |
*/ |
__u32 target_handle; |
/** |
* Value to be added to the offset of the target buffer to make up |
* the relocation entry. |
*/ |
__u32 delta; |
/** Offset in the buffer the relocation entry will be written into */ |
__u64 offset; |
/** |
* Offset value of the target buffer that the relocation entry was last |
* written as. |
* |
* If the buffer has the same offset as last time, we can skip syncing |
* and writing the relocation. This value is written back out by |
* the execbuffer ioctl when the relocation is written. |
*/ |
__u64 presumed_offset; |
/** |
* Target memory domains read by this operation. |
*/ |
__u32 read_domains; |
/** |
* Target memory domains written by this operation. |
* |
* Note that only one domain may be written by the whole |
* execbuffer operation, so that where there are conflicts, |
* the application will get -EINVAL back. |
*/ |
__u32 write_domain; |
}; |
/** @{ |
* Intel memory domains |
* |
* Most of these just align with the various caches in |
* the system and are used to flush and invalidate as |
* objects end up cached in different domains. |
*/ |
/** CPU cache */ |
#define I915_GEM_DOMAIN_CPU 0x00000001 |
/** Render cache, used by 2D and 3D drawing */ |
#define I915_GEM_DOMAIN_RENDER 0x00000002 |
/** Sampler cache, used by texture engine */ |
#define I915_GEM_DOMAIN_SAMPLER 0x00000004 |
/** Command queue, used to load batch buffers */ |
#define I915_GEM_DOMAIN_COMMAND 0x00000008 |
/** Instruction cache, used by shader programs */ |
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 |
/** Vertex address cache */ |
#define I915_GEM_DOMAIN_VERTEX 0x00000020 |
/** GTT domain - aperture and scanout */ |
#define I915_GEM_DOMAIN_GTT 0x00000040 |
/** @} */ |
struct drm_i915_gem_exec_object { |
/** |
* User's handle for a buffer to be bound into the GTT for this |
* operation. |
*/ |
__u32 handle; |
/** Number of relocations to be performed on this buffer */ |
__u32 relocation_count; |
/** |
* Pointer to array of struct drm_i915_gem_relocation_entry containing |
* the relocations to be performed in this buffer. |
*/ |
__u64 relocs_ptr; |
/** Required alignment in graphics aperture */ |
__u64 alignment; |
/** |
* Returned value of the updated offset of the object, for future |
* presumed_offset writes. |
*/ |
__u64 offset; |
}; |
struct drm_i915_gem_execbuffer { |
/** |
* List of buffers to be validated with their relocations to be |
* performend on them. |
* |
* This is a pointer to an array of struct drm_i915_gem_validate_entry. |
* |
* These buffers must be listed in an order such that all relocations |
* a buffer is performing refer to buffers that have already appeared |
* in the validate list. |
*/ |
__u64 buffers_ptr; |
__u32 buffer_count; |
/** Offset in the batchbuffer to start execution from. */ |
__u32 batch_start_offset; |
/** Bytes used in batchbuffer from batch_start_offset */ |
__u32 batch_len; |
__u32 DR1; |
__u32 DR4; |
__u32 num_cliprects; |
/** This is a struct drm_clip_rect *cliprects */ |
__u64 cliprects_ptr; |
}; |
struct drm_i915_gem_exec_object2 { |
/** |
* User's handle for a buffer to be bound into the GTT for this |
* operation. |
*/ |
__u32 handle; |
/** Number of relocations to be performed on this buffer */ |
__u32 relocation_count; |
/** |
* Pointer to array of struct drm_i915_gem_relocation_entry containing |
* the relocations to be performed in this buffer. |
*/ |
__u64 relocs_ptr; |
/** Required alignment in graphics aperture */ |
__u64 alignment; |
/** |
* Returned value of the updated offset of the object, for future |
* presumed_offset writes. |
*/ |
__u64 offset; |
#define EXEC_OBJECT_NEEDS_FENCE (1<<0) |
#define EXEC_OBJECT_NEEDS_GTT (1<<1) |
#define EXEC_OBJECT_WRITE (1<<2) |
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) |
__u64 flags; |
__u64 rsvd1; |
__u64 rsvd2; |
}; |
struct drm_i915_gem_execbuffer2 { |
/** |
* List of gem_exec_object2 structs |
*/ |
__u64 buffers_ptr; |
__u32 buffer_count; |
/** Offset in the batchbuffer to start execution from. */ |
__u32 batch_start_offset; |
/** Bytes used in batchbuffer from batch_start_offset */ |
__u32 batch_len; |
__u32 DR1; |
__u32 DR4; |
__u32 num_cliprects; |
/** This is a struct drm_clip_rect *cliprects */ |
__u64 cliprects_ptr; |
#define I915_EXEC_RING_MASK (7<<0) |
#define I915_EXEC_DEFAULT (0<<0) |
#define I915_EXEC_RENDER (1<<0) |
#define I915_EXEC_BSD (2<<0) |
#define I915_EXEC_BLT (3<<0) |
#define I915_EXEC_VEBOX (4<<0) |
/* Used for switching the constants addressing mode on gen4+ RENDER ring. |
* Gen6+ only supports relative addressing to dynamic state (default) and |
* absolute addressing. |
* |
* These flags are ignored for the BSD and BLT rings. |
*/ |
#define I915_EXEC_CONSTANTS_MASK (3<<6) |
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ |
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) |
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ |
__u64 flags; |
__u64 rsvd1; /* now used for context info */ |
__u64 rsvd2; |
}; |
/** Resets the SO write offset registers for transform feedback on gen7. */ |
#define I915_EXEC_GEN7_SOL_RESET (1<<8) |
/** Request a privileged ("secure") batch buffer. Note only available for |
* DRM_ROOT_ONLY | DRM_MASTER processes. |
*/ |
#define I915_EXEC_SECURE (1<<9) |
/** Inform the kernel that the batch is and will always be pinned. This |
* negates the requirement for a workaround to be performed to avoid |
* an incoherent CS (such as can be found on 830/845). If this flag is |
* not passed, the kernel will endeavour to make sure the batch is |
* coherent with the CS before execution. If this flag is passed, |
* userspace assumes the responsibility for ensuring the same. |
*/ |
#define I915_EXEC_IS_PINNED (1<<10) |
/** Provide a hint to the kernel that the command stream and auxiliary |
* state buffers already holds the correct presumed addresses and so the |
* relocation process may be skipped if no buffers need to be moved in |
* preparation for the execbuffer. |
*/ |
#define I915_EXEC_NO_RELOC (1<<11) |
/** Use the reloc.handle as an index into the exec object array rather |
* than as the per-file handle. |
*/ |
#define I915_EXEC_HANDLE_LUT (1<<12) |
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1) |
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) |
#define i915_execbuffer2_set_context_id(eb2, context) \ |
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK |
#define i915_execbuffer2_get_context_id(eb2) \ |
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) |
struct drm_i915_gem_pin { |
/** Handle of the buffer to be pinned. */ |
__u32 handle; |
__u32 pad; |
/** alignment required within the aperture */ |
__u64 alignment; |
/** Returned GTT offset of the buffer. */ |
__u64 offset; |
}; |
struct drm_i915_gem_unpin { |
/** Handle of the buffer to be unpinned. */ |
__u32 handle; |
__u32 pad; |
}; |
struct drm_i915_gem_busy { |
/** Handle of the buffer to check for busy */ |
__u32 handle; |
/** Return busy status (1 if busy, 0 if idle). |
* The high word is used to indicate on which rings the object |
* currently resides: |
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) |
*/ |
__u32 busy; |
}; |
/** |
* I915_CACHING_NONE |
* |
* GPU access is not coherent with cpu caches. Default for machines without an |
* LLC. |
*/ |
#define I915_CACHING_NONE 0 |
/** |
* I915_CACHING_CACHED |
* |
* GPU access is coherent with cpu caches and furthermore the data is cached in |
* last-level caches shared between cpu cores and the gpu GT. Default on |
* machines with HAS_LLC. |
*/ |
#define I915_CACHING_CACHED 1 |
/** |
* I915_CACHING_DISPLAY |
* |
* Special GPU caching mode which is coherent with the scanout engines. |
* Transparently falls back to I915_CACHING_NONE on platforms where no special |
* cache mode (like write-through or gfdt flushing) is available. The kernel |
* automatically sets this mode when using a buffer as a scanout target. |
* Userspace can manually set this mode to avoid a costly stall and clflush in |
* the hotpath of drawing the first frame. |
*/ |
#define I915_CACHING_DISPLAY 2 |
struct drm_i915_gem_caching { |
/** |
* Handle of the buffer to set/get the caching level of. */ |
__u32 handle; |
/** |
* Cacheing level to apply or return value |
* |
* bits0-15 are for generic caching control (i.e. the above defined |
* values). bits16-31 are reserved for platform-specific variations |
* (e.g. l3$ caching on gen7). */ |
__u32 caching; |
}; |
#define I915_TILING_NONE 0 |
#define I915_TILING_X 1 |
#define I915_TILING_Y 2 |
#define I915_BIT_6_SWIZZLE_NONE 0 |
#define I915_BIT_6_SWIZZLE_9 1 |
#define I915_BIT_6_SWIZZLE_9_10 2 |
#define I915_BIT_6_SWIZZLE_9_11 3 |
#define I915_BIT_6_SWIZZLE_9_10_11 4 |
/* Not seen by userland */ |
#define I915_BIT_6_SWIZZLE_UNKNOWN 5 |
/* Seen by userland. */ |
#define I915_BIT_6_SWIZZLE_9_17 6 |
#define I915_BIT_6_SWIZZLE_9_10_17 7 |
struct drm_i915_gem_set_tiling { |
/** Handle of the buffer to have its tiling state updated */ |
__u32 handle; |
/** |
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, |
* I915_TILING_Y). |
* |
* This value is to be set on request, and will be updated by the |
* kernel on successful return with the actual chosen tiling layout. |
* |
* The tiling mode may be demoted to I915_TILING_NONE when the system |
* has bit 6 swizzling that can't be managed correctly by GEM. |
* |
* Buffer contents become undefined when changing tiling_mode. |
*/ |
__u32 tiling_mode; |
/** |
* Stride in bytes for the object when in I915_TILING_X or |
* I915_TILING_Y. |
*/ |
__u32 stride; |
/** |
* Returned address bit 6 swizzling required for CPU access through |
* mmap mapping. |
*/ |
__u32 swizzle_mode; |
}; |
struct drm_i915_gem_get_tiling { |
/** Handle of the buffer to get tiling state for. */ |
__u32 handle; |
/** |
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, |
* I915_TILING_Y). |
*/ |
__u32 tiling_mode; |
/** |
* Returned address bit 6 swizzling required for CPU access through |
* mmap mapping. |
*/ |
__u32 swizzle_mode; |
/** |
* Returned address bit 6 swizzling required for CPU access through |
* mmap mapping whilst bound. |
*/ |
__u32 phys_swizzle_mode; |
}; |
struct drm_i915_gem_get_aperture { |
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */ |
__u64 aper_size; |
/** |
* Available space in the aperture used by i915_gem_execbuffer, in |
* bytes |
*/ |
__u64 aper_available_size; |
}; |
struct drm_i915_get_pipe_from_crtc_id { |
/** ID of CRTC being requested **/ |
__u32 crtc_id; |
/** pipe of requested CRTC **/ |
__u32 pipe; |
}; |
#define I915_MADV_WILLNEED 0 |
#define I915_MADV_DONTNEED 1 |
#define __I915_MADV_PURGED 2 /* internal state */ |
struct drm_i915_gem_madvise { |
/** Handle of the buffer to change the backing store advice */ |
__u32 handle; |
/* Advice: either the buffer will be needed again in the near future, |
* or wont be and could be discarded under memory pressure. |
*/ |
__u32 madv; |
/** Whether the backing store still exists. */ |
__u32 retained; |
}; |
/* flags */ |
#define I915_OVERLAY_TYPE_MASK 0xff |
#define I915_OVERLAY_YUV_PLANAR 0x01 |
#define I915_OVERLAY_YUV_PACKED 0x02 |
#define I915_OVERLAY_RGB 0x03 |
#define I915_OVERLAY_DEPTH_MASK 0xff00 |
#define I915_OVERLAY_RGB24 0x1000 |
#define I915_OVERLAY_RGB16 0x2000 |
#define I915_OVERLAY_RGB15 0x3000 |
#define I915_OVERLAY_YUV422 0x0100 |
#define I915_OVERLAY_YUV411 0x0200 |
#define I915_OVERLAY_YUV420 0x0300 |
#define I915_OVERLAY_YUV410 0x0400 |
#define I915_OVERLAY_SWAP_MASK 0xff0000 |
#define I915_OVERLAY_NO_SWAP 0x000000 |
#define I915_OVERLAY_UV_SWAP 0x010000 |
#define I915_OVERLAY_Y_SWAP 0x020000 |
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 |
#define I915_OVERLAY_FLAGS_MASK 0xff000000 |
#define I915_OVERLAY_ENABLE 0x01000000 |
struct drm_intel_overlay_put_image { |
/* various flags and src format description */ |
__u32 flags; |
/* source picture description */ |
__u32 bo_handle; |
/* stride values and offsets are in bytes, buffer relative */ |
__u16 stride_Y; /* stride for packed formats */ |
__u16 stride_UV; |
__u32 offset_Y; /* offset for packet formats */ |
__u32 offset_U; |
__u32 offset_V; |
/* in pixels */ |
__u16 src_width; |
__u16 src_height; |
/* to compensate the scaling factors for partially covered surfaces */ |
__u16 src_scan_width; |
__u16 src_scan_height; |
/* output crtc description */ |
__u32 crtc_id; |
__u16 dst_x; |
__u16 dst_y; |
__u16 dst_width; |
__u16 dst_height; |
}; |
/* flags */ |
#define I915_OVERLAY_UPDATE_ATTRS (1<<0) |
#define I915_OVERLAY_UPDATE_GAMMA (1<<1) |
struct drm_intel_overlay_attrs { |
__u32 flags; |
__u32 color_key; |
__s32 brightness; |
__u32 contrast; |
__u32 saturation; |
__u32 gamma0; |
__u32 gamma1; |
__u32 gamma2; |
__u32 gamma3; |
__u32 gamma4; |
__u32 gamma5; |
}; |
/* |
* Intel sprite handling |
* |
* Color keying works with a min/mask/max tuple. Both source and destination |
* color keying is allowed. |
* |
* Source keying: |
* Sprite pixels within the min & max values, masked against the color channels |
* specified in the mask field, will be transparent. All other pixels will |
* be displayed on top of the primary plane. For RGB surfaces, only the min |
* and mask fields will be used; ranged compares are not allowed. |
* |
* Destination keying: |
* Primary plane pixels that match the min value, masked against the color |
* channels specified in the mask field, will be replaced by corresponding |
* pixels from the sprite plane. |
* |
* Note that source & destination keying are exclusive; only one can be |
* active on a given plane. |
*/ |
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ |
#define I915_SET_COLORKEY_DESTINATION (1<<1) |
#define I915_SET_COLORKEY_SOURCE (1<<2) |
struct drm_intel_sprite_colorkey { |
__u32 plane_id; |
__u32 min_value; |
__u32 channel_mask; |
__u32 max_value; |
__u32 flags; |
}; |
struct drm_i915_gem_wait { |
/** Handle of BO we shall wait on */ |
__u32 bo_handle; |
__u32 flags; |
/** Number of nanoseconds to wait, Returns time remaining. */ |
__s64 timeout_ns; |
}; |
struct drm_i915_gem_context_create { |
/* output: id of new context*/ |
__u32 ctx_id; |
__u32 pad; |
}; |
struct drm_i915_gem_context_destroy { |
__u32 ctx_id; |
__u32 pad; |
}; |
struct drm_i915_reg_read { |
__u64 offset; |
__u64 val; /* Return value */ |
}; |
struct drm_i915_reset_stats { |
__u32 ctx_id; |
__u32 flags; |
/* All resets since boot/module reload, for all contexts */ |
__u32 reset_count; |
/* Number of batches lost when active in GPU, for this context */ |
__u32 batch_active; |
/* Number of batches lost pending for execution, for this context */ |
__u32 batch_pending; |
__u32 pad; |
}; |
struct drm_i915_gem_userptr { |
__u64 user_ptr; |
__u64 user_size; |
__u32 flags; |
#define I915_USERPTR_READ_ONLY 0x1 |
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000 |
/** |
* Returned handle for the object. |
* |
* Object handles are nonzero. |
*/ |
__u32 handle; |
}; |
struct drm_i915_mask { |
__u32 handle; |
__u32 width; |
__u32 height; |
__u32 bo_size; |
__u32 bo_pitch; |
__u32 bo_map; |
}; |
struct drm_i915_fb_info { |
__u32 name; |
__u32 width; |
__u32 height; |
__u32 pitch; |
__u32 tiling; |
__u32 crtc; |
__u32 pipe; |
}; |
struct drm_i915_mask_update { |
__u32 handle; |
__u32 dx; |
__u32 dy; |
__u32 width; |
__u32 height; |
__u32 bo_pitch; |
__u32 bo_map; |
}; |
#endif /* _UAPI_I915_DRM_H_ */ |
/drivers/include/uapi/drm/radeon_drm.h |
---|
0,0 → 1,1064 |
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*- |
* |
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Fremont, California. |
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* Kevin E. Martin <martin@valinux.com> |
* Gareth Hughes <gareth@valinux.com> |
* Keith Whitwell <keith@tungstengraphics.com> |
*/ |
#ifndef __RADEON_DRM_H__ |
#define __RADEON_DRM_H__ |
#include <drm/drm.h> |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the X server file (radeon_sarea.h) |
*/ |
#ifndef __RADEON_SAREA_DEFINES__ |
#define __RADEON_SAREA_DEFINES__ |
/* Old style state flags, required for sarea interface (1.1 and 1.2 |
* clears) and 1.2 drm_vertex2 ioctl. |
*/ |
#define RADEON_UPLOAD_CONTEXT 0x00000001 |
#define RADEON_UPLOAD_VERTFMT 0x00000002 |
#define RADEON_UPLOAD_LINE 0x00000004 |
#define RADEON_UPLOAD_BUMPMAP 0x00000008 |
#define RADEON_UPLOAD_MASKS 0x00000010 |
#define RADEON_UPLOAD_VIEWPORT 0x00000020 |
#define RADEON_UPLOAD_SETUP 0x00000040 |
#define RADEON_UPLOAD_TCL 0x00000080 |
#define RADEON_UPLOAD_MISC 0x00000100 |
#define RADEON_UPLOAD_TEX0 0x00000200 |
#define RADEON_UPLOAD_TEX1 0x00000400 |
#define RADEON_UPLOAD_TEX2 0x00000800 |
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000 |
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000 |
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000 |
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ |
#define RADEON_REQUIRE_QUIESCENCE 0x00010000 |
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ |
#define RADEON_UPLOAD_ALL 0x003effff |
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff |
/* New style per-packet identifiers for use in cmd_buffer ioctl with |
* the RADEON_EMIT_PACKET command. Comments relate new packets to old |
* state bits and the packet size: |
*/ |
#define RADEON_EMIT_PP_MISC 0 /* context/7 */ |
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ |
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ |
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ |
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ |
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ |
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ |
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ |
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ |
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ |
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ |
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ |
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ |
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ |
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ |
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ |
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ |
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ |
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ |
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ |
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ |
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ |
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ |
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ |
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ |
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ |
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ |
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ |
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ |
#define R200_EMIT_VAP_CTL 32 /* vap/1 */ |
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ |
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ |
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ |
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ |
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ |
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ |
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ |
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ |
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ |
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ |
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ |
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ |
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ |
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ |
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ |
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ |
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ |
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ |
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ |
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ |
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ |
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ |
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ |
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ |
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ |
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ |
#define R200_EMIT_PP_CUBIC_FACES_0 61 |
#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 |
#define R200_EMIT_PP_CUBIC_FACES_1 63 |
#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 |
#define R200_EMIT_PP_CUBIC_FACES_2 65 |
#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 |
#define R200_EMIT_PP_CUBIC_FACES_3 67 |
#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 |
#define R200_EMIT_PP_CUBIC_FACES_4 69 |
#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 |
#define R200_EMIT_PP_CUBIC_FACES_5 71 |
#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 |
#define RADEON_EMIT_PP_TEX_SIZE_0 73 |
#define RADEON_EMIT_PP_TEX_SIZE_1 74 |
#define RADEON_EMIT_PP_TEX_SIZE_2 75 |
#define R200_EMIT_RB3D_BLENDCOLOR 76 |
#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77 |
#define RADEON_EMIT_PP_CUBIC_FACES_0 78 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79 |
#define RADEON_EMIT_PP_CUBIC_FACES_1 80 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81 |
#define RADEON_EMIT_PP_CUBIC_FACES_2 82 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83 |
#define R200_EMIT_PP_TRI_PERF_CNTL 84 |
#define R200_EMIT_PP_AFS_0 85 |
#define R200_EMIT_PP_AFS_1 86 |
#define R200_EMIT_ATF_TFACTOR 87 |
#define R200_EMIT_PP_TXCTLALL_0 88 |
#define R200_EMIT_PP_TXCTLALL_1 89 |
#define R200_EMIT_PP_TXCTLALL_2 90 |
#define R200_EMIT_PP_TXCTLALL_3 91 |
#define R200_EMIT_PP_TXCTLALL_4 92 |
#define R200_EMIT_PP_TXCTLALL_5 93 |
#define R200_EMIT_VAP_PVS_CNTL 94 |
#define RADEON_MAX_STATE_PACKETS 95 |
/* Commands understood by cmd_buffer ioctl. More can be added but |
* obviously these can't be removed or changed: |
*/ |
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ |
#define RADEON_CMD_SCALARS 2 /* emit scalar data */ |
#define RADEON_CMD_VECTORS 3 /* emit vector data */ |
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ |
#define RADEON_CMD_PACKET3 5 /* emit hw packet */ |
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ |
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ |
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: |
* doesn't make the cpu wait, just |
* the graphics hardware */ |
#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */ |
typedef union { |
int i; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, packet_id, pad0, pad1; |
} packet; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} scalars; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} vectors; |
struct { |
unsigned char cmd_type, addr_lo, addr_hi, count; |
} veclinear; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
} drm_radeon_cmd_header_t; |
#define RADEON_WAIT_2D 0x1 |
#define RADEON_WAIT_3D 0x2 |
/* Allowed parameters for R300_CMD_PACKET3 |
*/ |
#define R300_CMD_PACKET3_CLEAR 0 |
#define R300_CMD_PACKET3_RAW 1 |
/* Commands understood by cmd_buffer ioctl for R300. |
* The interface has not been stabilized, so some of these may be removed |
* and eventually reordered before stabilization. |
*/ |
#define R300_CMD_PACKET0 1 |
#define R300_CMD_VPU 2 /* emit vertex program upload */ |
#define R300_CMD_PACKET3 3 /* emit a packet3 */ |
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */ |
#define R300_CMD_CP_DELAY 5 |
#define R300_CMD_DMA_DISCARD 6 |
#define R300_CMD_WAIT 7 |
# define R300_WAIT_2D 0x1 |
# define R300_WAIT_3D 0x2 |
/* these two defines are DOING IT WRONG - however |
* we have userspace which relies on using these. |
* The wait interface is backwards compat new |
* code should use the NEW_WAIT defines below |
* THESE ARE NOT BIT FIELDS |
*/ |
# define R300_WAIT_2D_CLEAN 0x3 |
# define R300_WAIT_3D_CLEAN 0x4 |
# define R300_NEW_WAIT_2D_3D 0x3 |
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4 |
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6 |
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 |
#define R300_CMD_SCRATCH 8 |
#define R300_CMD_R500FP 9 |
typedef union { |
unsigned int u; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, count, reglo, reghi; |
} packet0; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi; |
} vpu; |
struct { |
unsigned char cmd_type, packet, pad0, pad1; |
} packet3; |
struct { |
unsigned char cmd_type, packet; |
unsigned short count; /* amount of packet2 to emit */ |
} delay; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
struct { |
unsigned char cmd_type, reg, n_bufs, flags; |
} scratch; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi_flags; |
} r500fp; |
} drm_r300_cmd_header_t; |
#define RADEON_FRONT 0x1 |
#define RADEON_BACK 0x2 |
#define RADEON_DEPTH 0x4 |
#define RADEON_STENCIL 0x8 |
#define RADEON_CLEAR_FASTZ 0x80000000 |
#define RADEON_USE_HIERZ 0x40000000 |
#define RADEON_USE_COMP_ZBUF 0x20000000 |
#define R500FP_CONSTANT_TYPE (1 << 1) |
#define R500FP_CONSTANT_CLAMP (1 << 2) |
/* Primitive types |
*/ |
#define RADEON_POINTS 0x1 |
#define RADEON_LINES 0x2 |
#define RADEON_LINE_STRIP 0x3 |
#define RADEON_TRIANGLES 0x4 |
#define RADEON_TRIANGLE_FAN 0x5 |
#define RADEON_TRIANGLE_STRIP 0x6 |
/* Vertex/indirect buffer size |
*/ |
#define RADEON_BUFFER_SIZE 65536 |
/* Byte offsets for indirect buffer data |
*/ |
#define RADEON_INDEX_PRIM_OFFSET 20 |
#define RADEON_SCRATCH_REG_OFFSET 32 |
#define R600_SCRATCH_REG_OFFSET 256 |
#define RADEON_NR_SAREA_CLIPRECTS 12 |
/* There are 2 heaps (local/GART). Each region within a heap is a |
* minimum of 64k, and there are at most 64 of them per heap. |
*/ |
#define RADEON_LOCAL_TEX_HEAP 0 |
#define RADEON_GART_TEX_HEAP 1 |
#define RADEON_NR_TEX_HEAPS 2 |
#define RADEON_NR_TEX_REGIONS 64 |
#define RADEON_LOG_TEX_GRANULARITY 16 |
#define RADEON_MAX_TEXTURE_LEVELS 12 |
#define RADEON_MAX_TEXTURE_UNITS 3 |
#define RADEON_MAX_SURFACES 8 |
/* Blits have strict offset rules. All blit offset must be aligned on |
* a 1K-byte boundary. |
*/ |
#define RADEON_OFFSET_SHIFT 10 |
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT) |
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1) |
#endif /* __RADEON_SAREA_DEFINES__ */ |
typedef struct { |
unsigned int red; |
unsigned int green; |
unsigned int blue; |
unsigned int alpha; |
} radeon_color_regs_t; |
typedef struct { |
/* Context state */ |
unsigned int pp_misc; /* 0x1c14 */ |
unsigned int pp_fog_color; |
unsigned int re_solid_color; |
unsigned int rb3d_blendcntl; |
unsigned int rb3d_depthoffset; |
unsigned int rb3d_depthpitch; |
unsigned int rb3d_zstencilcntl; |
unsigned int pp_cntl; /* 0x1c38 */ |
unsigned int rb3d_cntl; |
unsigned int rb3d_coloroffset; |
unsigned int re_width_height; |
unsigned int rb3d_colorpitch; |
unsigned int se_cntl; |
/* Vertex format state */ |
unsigned int se_coord_fmt; /* 0x1c50 */ |
/* Line state */ |
unsigned int re_line_pattern; /* 0x1cd0 */ |
unsigned int re_line_state; |
unsigned int se_line_width; /* 0x1db8 */ |
/* Bumpmap state */ |
unsigned int pp_lum_matrix; /* 0x1d00 */ |
unsigned int pp_rot_matrix_0; /* 0x1d58 */ |
unsigned int pp_rot_matrix_1; |
/* Mask state */ |
unsigned int rb3d_stencilrefmask; /* 0x1d7c */ |
unsigned int rb3d_ropcntl; |
unsigned int rb3d_planemask; |
/* Viewport state */ |
unsigned int se_vport_xscale; /* 0x1d98 */ |
unsigned int se_vport_xoffset; |
unsigned int se_vport_yscale; |
unsigned int se_vport_yoffset; |
unsigned int se_vport_zscale; |
unsigned int se_vport_zoffset; |
/* Setup state */ |
unsigned int se_cntl_status; /* 0x2140 */ |
/* Misc state */ |
unsigned int re_top_left; /* 0x26c0 */ |
unsigned int re_misc; |
} drm_radeon_context_regs_t; |
typedef struct { |
/* Zbias state */ |
unsigned int se_zbias_factor; /* 0x1dac */ |
unsigned int se_zbias_constant; |
} drm_radeon_context2_regs_t; |
/* Setup registers for each texture unit |
*/ |
typedef struct { |
unsigned int pp_txfilter; |
unsigned int pp_txformat; |
unsigned int pp_txoffset; |
unsigned int pp_txcblend; |
unsigned int pp_txablend; |
unsigned int pp_tfactor; |
unsigned int pp_border_color; |
} drm_radeon_texture_regs_t; |
typedef struct { |
unsigned int start; |
unsigned int finish; |
unsigned int prim:8; |
unsigned int stateidx:8; |
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ |
unsigned int vc_format; /* vertex format */ |
} drm_radeon_prim_t; |
typedef struct { |
drm_radeon_context_regs_t context; |
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; |
drm_radeon_context2_regs_t context2; |
unsigned int dirty; |
} drm_radeon_state_t; |
typedef struct { |
/* The channel for communication of state information to the |
* kernel on firing a vertex buffer with either of the |
* obsoleted vertex/index ioctls. |
*/ |
drm_radeon_context_regs_t context_state; |
drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; |
unsigned int dirty; |
unsigned int vertsize; |
unsigned int vc_format; |
/* The current cliprects, or a subset thereof. |
*/ |
struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; |
unsigned int nbox; |
/* Counters for client-side throttling of rendering clients. |
*/ |
unsigned int last_frame; |
unsigned int last_dispatch; |
unsigned int last_clear; |
struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + |
1]; |
unsigned int tex_age[RADEON_NR_TEX_HEAPS]; |
int ctx_owner; |
int pfState; /* number of 3d windows (0,1,2ormore) */ |
int pfCurrentPage; /* which buffer is being displayed? */ |
int crtc2_base; /* CRTC2 frame offset */ |
int tiling_enabled; /* set by drm, read by 2d + 3d clients */ |
} drm_radeon_sarea_t; |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the Xserver file (xf86drmRadeon.h) |
* |
* KW: actually it's illegal to change any of this (backwards compatibility). |
*/ |
/* Radeon specific ioctls |
* The device specific ioctl range is 0x40 to 0x79. |
*/ |
#define DRM_RADEON_CP_INIT 0x00 |
#define DRM_RADEON_CP_START 0x01 |
#define DRM_RADEON_CP_STOP 0x02 |
#define DRM_RADEON_CP_RESET 0x03 |
#define DRM_RADEON_CP_IDLE 0x04 |
#define DRM_RADEON_RESET 0x05 |
#define DRM_RADEON_FULLSCREEN 0x06 |
#define DRM_RADEON_SWAP 0x07 |
#define DRM_RADEON_CLEAR 0x08 |
#define DRM_RADEON_VERTEX 0x09 |
#define DRM_RADEON_INDICES 0x0A |
#define DRM_RADEON_NOT_USED |
#define DRM_RADEON_STIPPLE 0x0C |
#define DRM_RADEON_INDIRECT 0x0D |
#define DRM_RADEON_TEXTURE 0x0E |
#define DRM_RADEON_VERTEX2 0x0F |
#define DRM_RADEON_CMDBUF 0x10 |
#define DRM_RADEON_GETPARAM 0x11 |
#define DRM_RADEON_FLIP 0x12 |
#define DRM_RADEON_ALLOC 0x13 |
#define DRM_RADEON_FREE 0x14 |
#define DRM_RADEON_INIT_HEAP 0x15 |
#define DRM_RADEON_IRQ_EMIT 0x16 |
#define DRM_RADEON_IRQ_WAIT 0x17 |
#define DRM_RADEON_CP_RESUME 0x18 |
#define DRM_RADEON_SETPARAM 0x19 |
#define DRM_RADEON_SURF_ALLOC 0x1a |
#define DRM_RADEON_SURF_FREE 0x1b |
/* KMS ioctl */ |
#define DRM_RADEON_GEM_INFO 0x1c |
#define DRM_RADEON_GEM_CREATE 0x1d |
#define DRM_RADEON_GEM_MMAP 0x1e |
#define DRM_RADEON_GEM_PREAD 0x21 |
#define DRM_RADEON_GEM_PWRITE 0x22 |
#define DRM_RADEON_GEM_SET_DOMAIN 0x23 |
#define DRM_RADEON_GEM_WAIT_IDLE 0x24 |
#define DRM_RADEON_CS 0x26 |
#define DRM_RADEON_INFO 0x27 |
#define DRM_RADEON_GEM_SET_TILING 0x28 |
#define DRM_RADEON_GEM_GET_TILING 0x29 |
#define DRM_RADEON_GEM_BUSY 0x2a |
#define DRM_RADEON_GEM_VA 0x2b |
#define DRM_RADEON_GEM_OP 0x2c |
#define DRM_RADEON_GEM_USERPTR 0x2d |
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) |
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) |
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t) |
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET) |
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE) |
#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET) |
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t) |
#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP) |
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t) |
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t) |
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t) |
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t) |
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t) |
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t) |
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t) |
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t) |
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t) |
#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP) |
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t) |
#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t) |
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t) |
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t) |
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t) |
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME) |
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) |
#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) |
#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) |
/* KMS */ |
#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info) |
#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create) |
#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap) |
#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread) |
#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite) |
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain) |
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) |
#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) |
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) |
#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) |
#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) |
#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr) |
typedef struct drm_radeon_init { |
enum { |
RADEON_INIT_CP = 0x01, |
RADEON_CLEANUP_CP = 0x02, |
RADEON_INIT_R200_CP = 0x03, |
RADEON_INIT_R300_CP = 0x04, |
RADEON_INIT_R600_CP = 0x05 |
} func; |
unsigned long sarea_priv_offset; |
int is_pci; |
int cp_mode; |
int gart_size; |
int ring_size; |
int usec_timeout; |
unsigned int fb_bpp; |
unsigned int front_offset, front_pitch; |
unsigned int back_offset, back_pitch; |
unsigned int depth_bpp; |
unsigned int depth_offset, depth_pitch; |
unsigned long fb_offset; |
unsigned long mmio_offset; |
unsigned long ring_offset; |
unsigned long ring_rptr_offset; |
unsigned long buffers_offset; |
unsigned long gart_textures_offset; |
} drm_radeon_init_t; |
typedef struct drm_radeon_cp_stop { |
int flush; |
int idle; |
} drm_radeon_cp_stop_t; |
typedef struct drm_radeon_fullscreen { |
enum { |
RADEON_INIT_FULLSCREEN = 0x01, |
RADEON_CLEANUP_FULLSCREEN = 0x02 |
} func; |
} drm_radeon_fullscreen_t; |
#define CLEAR_X1 0 |
#define CLEAR_Y1 1 |
#define CLEAR_X2 2 |
#define CLEAR_Y2 3 |
#define CLEAR_DEPTH 4 |
typedef union drm_radeon_clear_rect { |
float f[5]; |
unsigned int ui[5]; |
} drm_radeon_clear_rect_t; |
typedef struct drm_radeon_clear { |
unsigned int flags; |
unsigned int clear_color; |
unsigned int clear_depth; |
unsigned int color_mask; |
unsigned int depth_mask; /* misnamed field: should be stencil */ |
drm_radeon_clear_rect_t __user *depth_boxes; |
} drm_radeon_clear_t; |
typedef struct drm_radeon_vertex { |
int prim; |
int idx; /* Index of vertex buffer */ |
int count; /* Number of vertices in buffer */ |
int discard; /* Client finished with buffer? */ |
} drm_radeon_vertex_t; |
typedef struct drm_radeon_indices { |
int prim; |
int idx; |
int start; |
int end; |
int discard; /* Client finished with buffer? */ |
} drm_radeon_indices_t; |
/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices |
* - allows multiple primitives and state changes in a single ioctl |
* - supports driver change to emit native primitives |
*/ |
typedef struct drm_radeon_vertex2 { |
int idx; /* Index of vertex buffer */ |
int discard; /* Client finished with buffer? */ |
int nr_states; |
drm_radeon_state_t __user *state; |
int nr_prims; |
drm_radeon_prim_t __user *prim; |
} drm_radeon_vertex2_t; |
/* v1.3 - obsoletes drm_radeon_vertex2 |
* - allows arbitrarily large cliprect list |
* - allows updating of tcl packet, vector and scalar state |
* - allows memory-efficient description of state updates |
* - allows state to be emitted without a primitive |
* (for clears, ctx switches) |
* - allows more than one dma buffer to be referenced per ioctl |
* - supports tcl driver |
* - may be extended in future versions with new cmd types, packets |
*/ |
typedef struct drm_radeon_cmd_buffer { |
int bufsz; |
char __user *buf; |
int nbox; |
struct drm_clip_rect __user *boxes; |
} drm_radeon_cmd_buffer_t; |
typedef struct drm_radeon_tex_image { |
unsigned int x, y; /* Blit coordinates */ |
unsigned int width, height; |
const void __user *data; |
} drm_radeon_tex_image_t; |
typedef struct drm_radeon_texture { |
unsigned int offset; |
int pitch; |
int format; |
int width; /* Texture image coordinates */ |
int height; |
drm_radeon_tex_image_t __user *image; |
} drm_radeon_texture_t; |
typedef struct drm_radeon_stipple { |
unsigned int __user *mask; |
} drm_radeon_stipple_t; |
typedef struct drm_radeon_indirect { |
int idx; |
int start; |
int end; |
int discard; |
} drm_radeon_indirect_t; |
/* enum for card type parameters */ |
#define RADEON_CARD_PCI 0 |
#define RADEON_CARD_AGP 1 |
#define RADEON_CARD_PCIE 2 |
/* 1.3: An ioctl to get parameters that aren't available to the 3d |
* client any other way. |
*/ |
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */ |
#define RADEON_PARAM_LAST_FRAME 2 |
#define RADEON_PARAM_LAST_DISPATCH 3 |
#define RADEON_PARAM_LAST_CLEAR 4 |
/* Added with DRM version 1.6. */ |
#define RADEON_PARAM_IRQ_NR 5 |
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */ |
/* Added with DRM version 1.8. */ |
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */ |
#define RADEON_PARAM_STATUS_HANDLE 8 |
#define RADEON_PARAM_SAREA_HANDLE 9 |
#define RADEON_PARAM_GART_TEX_HANDLE 10 |
#define RADEON_PARAM_SCRATCH_OFFSET 11 |
#define RADEON_PARAM_CARD_TYPE 12 |
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ |
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ |
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ |
#define RADEON_PARAM_DEVICE_ID 16 |
#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */ |
typedef struct drm_radeon_getparam { |
int param; |
void __user *value; |
} drm_radeon_getparam_t; |
/* 1.6: Set up a memory manager for regions of shared memory: |
*/ |
#define RADEON_MEM_REGION_GART 1 |
#define RADEON_MEM_REGION_FB 2 |
typedef struct drm_radeon_mem_alloc { |
int region; |
int alignment; |
int size; |
int __user *region_offset; /* offset from start of fb or GART */ |
} drm_radeon_mem_alloc_t; |
typedef struct drm_radeon_mem_free { |
int region; |
int region_offset; |
} drm_radeon_mem_free_t; |
typedef struct drm_radeon_mem_init_heap { |
int region; |
int size; |
int start; |
} drm_radeon_mem_init_heap_t; |
/* 1.6: Userspace can request & wait on irq's: |
*/ |
typedef struct drm_radeon_irq_emit { |
int __user *irq_seq; |
} drm_radeon_irq_emit_t; |
typedef struct drm_radeon_irq_wait { |
int irq_seq; |
} drm_radeon_irq_wait_t; |
/* 1.10: Clients tell the DRM where they think the framebuffer is located in |
* the card's address space, via a new generic ioctl to set parameters |
*/ |
typedef struct drm_radeon_setparam { |
unsigned int param; |
__s64 value; |
} drm_radeon_setparam_t; |
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */ |
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */ |
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */ |
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ |
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ |
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ |
/* 1.14: Clients can allocate/free a surface |
*/ |
typedef struct drm_radeon_surface_alloc { |
unsigned int address; |
unsigned int size; |
unsigned int flags; |
} drm_radeon_surface_alloc_t; |
typedef struct drm_radeon_surface_free { |
unsigned int address; |
} drm_radeon_surface_free_t; |
#define DRM_RADEON_VBLANK_CRTC1 1 |
#define DRM_RADEON_VBLANK_CRTC2 2 |
/* |
* Kernel modesetting world below. |
*/ |
#define RADEON_GEM_DOMAIN_CPU 0x1 |
#define RADEON_GEM_DOMAIN_GTT 0x2 |
#define RADEON_GEM_DOMAIN_VRAM 0x4 |
struct drm_radeon_gem_info { |
uint64_t gart_size; |
uint64_t vram_size; |
uint64_t vram_visible; |
}; |
#define RADEON_GEM_NO_BACKING_STORE (1 << 0) |
#define RADEON_GEM_GTT_UC (1 << 1) |
#define RADEON_GEM_GTT_WC (1 << 2) |
/* BO is expected to be accessed by the CPU */ |
#define RADEON_GEM_CPU_ACCESS (1 << 3) |
/* CPU access is not expected to work for this BO */ |
#define RADEON_GEM_NO_CPU_ACCESS (1 << 4) |
struct drm_radeon_gem_create { |
uint64_t size; |
uint64_t alignment; |
uint32_t handle; |
uint32_t initial_domain; |
uint32_t flags; |
}; |
/* |
* This is not a reliable API and you should expect it to fail for any |
* number of reasons and have fallback path that do not use userptr to |
* perform any operation. |
*/ |
#define RADEON_GEM_USERPTR_READONLY (1 << 0) |
#define RADEON_GEM_USERPTR_ANONONLY (1 << 1) |
#define RADEON_GEM_USERPTR_VALIDATE (1 << 2) |
#define RADEON_GEM_USERPTR_REGISTER (1 << 3) |
struct drm_radeon_gem_userptr { |
uint64_t addr; |
uint64_t size; |
uint32_t flags; |
uint32_t handle; |
}; |
#define RADEON_TILING_MACRO 0x1 |
#define RADEON_TILING_MICRO 0x2 |
#define RADEON_TILING_SWAP_16BIT 0x4 |
#define RADEON_TILING_SWAP_32BIT 0x8 |
/* this object requires a surface when mapped - i.e. front buffer */ |
#define RADEON_TILING_SURFACE 0x10 |
#define RADEON_TILING_MICRO_SQUARE 0x20 |
#define RADEON_TILING_EG_BANKW_SHIFT 8 |
#define RADEON_TILING_EG_BANKW_MASK 0xf |
#define RADEON_TILING_EG_BANKH_SHIFT 12 |
#define RADEON_TILING_EG_BANKH_MASK 0xf |
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16 |
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf |
#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24 |
#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf |
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28 |
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf |
struct drm_radeon_gem_set_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_get_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_mmap { |
uint32_t handle; |
uint32_t pad; |
uint64_t offset; |
uint64_t size; |
uint64_t addr_ptr; |
}; |
struct drm_radeon_gem_set_domain { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
}; |
struct drm_radeon_gem_wait_idle { |
uint32_t handle; |
uint32_t pad; |
}; |
struct drm_radeon_gem_busy { |
uint32_t handle; |
uint32_t domain; |
}; |
struct drm_radeon_gem_pread { |
/** Handle for the object being read. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to read from */ |
uint64_t offset; |
/** Length of data to read */ |
uint64_t size; |
/** Pointer to write the data into. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
struct drm_radeon_gem_pwrite { |
/** Handle for the object being written to. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to write to */ |
uint64_t offset; |
/** Length of data to write */ |
uint64_t size; |
/** Pointer to read the data from. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
/* Sets or returns a value associated with a buffer. */ |
struct drm_radeon_gem_op { |
uint32_t handle; /* buffer */ |
uint32_t op; /* RADEON_GEM_OP_* */ |
uint64_t value; /* input or return value */ |
}; |
#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 |
#define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1 |
#define RADEON_VA_MAP 1 |
#define RADEON_VA_UNMAP 2 |
#define RADEON_VA_RESULT_OK 0 |
#define RADEON_VA_RESULT_ERROR 1 |
#define RADEON_VA_RESULT_VA_EXIST 2 |
#define RADEON_VM_PAGE_VALID (1 << 0) |
#define RADEON_VM_PAGE_READABLE (1 << 1) |
#define RADEON_VM_PAGE_WRITEABLE (1 << 2) |
#define RADEON_VM_PAGE_SYSTEM (1 << 3) |
#define RADEON_VM_PAGE_SNOOPED (1 << 4) |
struct drm_radeon_gem_va { |
uint32_t handle; |
uint32_t operation; |
uint32_t vm_id; |
uint32_t flags; |
uint64_t offset; |
}; |
#define RADEON_CHUNK_ID_RELOCS 0x01 |
#define RADEON_CHUNK_ID_IB 0x02 |
#define RADEON_CHUNK_ID_FLAGS 0x03 |
#define RADEON_CHUNK_ID_CONST_IB 0x04 |
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ |
#define RADEON_CS_KEEP_TILING_FLAGS 0x01 |
#define RADEON_CS_USE_VM 0x02 |
#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */ |
/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */ |
#define RADEON_CS_RING_GFX 0 |
#define RADEON_CS_RING_COMPUTE 1 |
#define RADEON_CS_RING_DMA 2 |
#define RADEON_CS_RING_UVD 3 |
#define RADEON_CS_RING_VCE 4 |
/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */ |
/* 0 = normal, + = higher priority, - = lower priority */ |
struct drm_radeon_cs_chunk { |
uint32_t chunk_id; |
uint32_t length_dw; |
uint64_t chunk_data; |
}; |
/* drm_radeon_cs_reloc.flags */ |
#define RADEON_RELOC_PRIO_MASK (0xf << 0) |
struct drm_radeon_cs_reloc { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
uint32_t flags; |
}; |
struct drm_radeon_cs { |
uint32_t num_chunks; |
uint32_t cs_id; |
/* this points to uint64_t * which point to cs chunks */ |
uint64_t chunks; |
/* updates to the limits after this CS ioctl */ |
uint64_t gart_limit; |
uint64_t vram_limit; |
}; |
#define RADEON_INFO_DEVICE_ID 0x00 |
#define RADEON_INFO_NUM_GB_PIPES 0x01 |
#define RADEON_INFO_NUM_Z_PIPES 0x02 |
#define RADEON_INFO_ACCEL_WORKING 0x03 |
#define RADEON_INFO_CRTC_FROM_ID 0x04 |
#define RADEON_INFO_ACCEL_WORKING2 0x05 |
#define RADEON_INFO_TILING_CONFIG 0x06 |
#define RADEON_INFO_WANT_HYPERZ 0x07 |
#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */ |
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ |
#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */ |
#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */ |
#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */ |
#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */ |
/* virtual address start, va < start are reserved by the kernel */ |
#define RADEON_INFO_VA_START 0x0e |
/* maximum size of ib using the virtual memory cs */ |
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f |
/* max pipes - needed for compute shaders */ |
#define RADEON_INFO_MAX_PIPES 0x10 |
/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */ |
#define RADEON_INFO_TIMESTAMP 0x11 |
/* max shader engines (SE) - needed for geometry shaders, etc. */ |
#define RADEON_INFO_MAX_SE 0x12 |
/* max SH per SE */ |
#define RADEON_INFO_MAX_SH_PER_SE 0x13 |
/* fast fb access is enabled */ |
#define RADEON_INFO_FASTFB_WORKING 0x14 |
/* query if a RADEON_CS_RING_* submission is supported */ |
#define RADEON_INFO_RING_WORKING 0x15 |
/* SI tile mode array */ |
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 |
/* query if CP DMA is supported on the compute ring */ |
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 |
/* CIK macrotile mode array */ |
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 |
/* query the number of render backends */ |
#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 |
/* max engine clock - needed for OpenCL */ |
#define RADEON_INFO_MAX_SCLK 0x1a |
/* version of VCE firmware */ |
#define RADEON_INFO_VCE_FW_VERSION 0x1b |
/* version of VCE feedback */ |
#define RADEON_INFO_VCE_FB_VERSION 0x1c |
#define RADEON_INFO_NUM_BYTES_MOVED 0x1d |
#define RADEON_INFO_VRAM_USAGE 0x1e |
#define RADEON_INFO_GTT_USAGE 0x1f |
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20 |
struct drm_radeon_info { |
uint32_t request; |
uint32_t pad; |
uint64_t value; |
}; |
/* Those correspond to the tile index to use, this is to explicitly state |
* the API that is implicitly defined by the tile mode array. |
*/ |
#define SI_TILE_MODE_COLOR_LINEAR_ALIGNED 8 |
#define SI_TILE_MODE_COLOR_1D 13 |
#define SI_TILE_MODE_COLOR_1D_SCANOUT 9 |
#define SI_TILE_MODE_COLOR_2D_8BPP 14 |
#define SI_TILE_MODE_COLOR_2D_16BPP 15 |
#define SI_TILE_MODE_COLOR_2D_32BPP 16 |
#define SI_TILE_MODE_COLOR_2D_64BPP 17 |
#define SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP 11 |
#define SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP 12 |
#define SI_TILE_MODE_DEPTH_STENCIL_1D 4 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D 0 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_2AA 3 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 |
#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5 |
#endif |
/drivers/include/uapi/drm/vmwgfx_drm.h |
---|
0,0 → 1,1062 |
/************************************************************************** |
* |
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
#ifndef __VMWGFX_DRM_H__ |
#define __VMWGFX_DRM_H__ |
#ifndef __KERNEL__ |
#include <drm/drm.h> |
#endif |
#define DRM_VMW_MAX_SURFACE_FACES 6 |
#define DRM_VMW_MAX_MIP_LEVELS 24 |
#define DRM_VMW_GET_PARAM 0 |
#define DRM_VMW_ALLOC_DMABUF 1 |
#define DRM_VMW_UNREF_DMABUF 2 |
#define DRM_VMW_CURSOR_BYPASS 3 |
/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ |
#define DRM_VMW_CONTROL_STREAM 4 |
#define DRM_VMW_CLAIM_STREAM 5 |
#define DRM_VMW_UNREF_STREAM 6 |
/* guarded by DRM_VMW_PARAM_3D == 1 */ |
#define DRM_VMW_CREATE_CONTEXT 7 |
#define DRM_VMW_UNREF_CONTEXT 8 |
#define DRM_VMW_CREATE_SURFACE 9 |
#define DRM_VMW_UNREF_SURFACE 10 |
#define DRM_VMW_REF_SURFACE 11 |
#define DRM_VMW_EXECBUF 12 |
#define DRM_VMW_GET_3D_CAP 13 |
#define DRM_VMW_FENCE_WAIT 14 |
#define DRM_VMW_FENCE_SIGNALED 15 |
#define DRM_VMW_FENCE_UNREF 16 |
#define DRM_VMW_FENCE_EVENT 17 |
#define DRM_VMW_PRESENT 18 |
#define DRM_VMW_PRESENT_READBACK 19 |
#define DRM_VMW_UPDATE_LAYOUT 20 |
#define DRM_VMW_CREATE_SHADER 21 |
#define DRM_VMW_UNREF_SHADER 22 |
#define DRM_VMW_GB_SURFACE_CREATE 23 |
#define DRM_VMW_GB_SURFACE_REF 24 |
#define DRM_VMW_SYNCCPU 25 |
/*************************************************************************/ |
/** |
* DRM_VMW_GET_PARAM - get device information. |
* |
* DRM_VMW_PARAM_FIFO_OFFSET: |
* Offset to use to map the first page of the FIFO read-only. |
* The fifo is mapped using the mmap() system call on the drm device. |
* |
* DRM_VMW_PARAM_OVERLAY_IOCTL: |
* Does the driver support the overlay ioctl. |
*/ |
#define DRM_VMW_PARAM_NUM_STREAMS 0 |
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 |
#define DRM_VMW_PARAM_3D 2 |
#define DRM_VMW_PARAM_HW_CAPS 3 |
#define DRM_VMW_PARAM_FIFO_CAPS 4 |
#define DRM_VMW_PARAM_MAX_FB_SIZE 5 |
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6 |
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 |
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8 |
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 |
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 |
/** |
* enum drm_vmw_handle_type - handle type for ref ioctls |
* |
*/ |
enum drm_vmw_handle_type { |
DRM_VMW_HANDLE_LEGACY = 0, |
DRM_VMW_HANDLE_PRIME = 1 |
}; |
/** |
* struct drm_vmw_getparam_arg |
* |
* @value: Returned value. //Out |
* @param: Parameter to query. //In. |
* |
* Argument to the DRM_VMW_GET_PARAM Ioctl. |
*/ |
struct drm_vmw_getparam_arg { |
uint64_t value; |
uint32_t param; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_CONTEXT - Create a host context. |
* |
* Allocates a device unique context id, and queues a create context command |
* for the host. Does not wait for host completion. |
*/ |
/** |
* struct drm_vmw_context_arg |
* |
* @cid: Device unique context ID. |
* |
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. |
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. |
*/ |
struct drm_vmw_context_arg { |
int32_t cid; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_CONTEXT - Create a host context. |
* |
* Frees a global context id, and queues a destroy host command for the host. |
* Does not wait for host completion. The context ID can be used directly |
* in the command stream and shows up as the same context ID on the host. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_SURFACE - Create a host suface. |
* |
* Allocates a device unique surface id, and queues a create surface command |
* for the host. Does not wait for host completion. The surface ID can be |
* used directly in the command stream and shows up as the same surface |
* ID on the host. |
*/ |
/** |
* struct drm_wmv_surface_create_req |
* |
* @flags: Surface flags as understood by the host. |
* @format: Surface format as understood by the host. |
* @mip_levels: Number of mip levels for each face. |
* An unused face should have 0 encoded. |
* @size_addr: Address of a user-space array of sruct drm_vmw_size |
* cast to an uint64_t for 32-64 bit compatibility. |
* The size of the array should equal the total number of mipmap levels. |
* @shareable: Boolean whether other clients (as identified by file descriptors) |
* may reference this surface. |
* @scanout: Boolean whether the surface is intended to be used as a |
* scanout. |
* |
* Input data to the DRM_VMW_CREATE_SURFACE Ioctl. |
* Output data from the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
struct drm_vmw_surface_create_req { |
uint32_t flags; |
uint32_t format; |
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
uint64_t size_addr; |
int32_t shareable; |
int32_t scanout; |
}; |
/** |
* struct drm_wmv_surface_arg |
* |
* @sid: Surface id of created surface or surface to destroy or reference. |
* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. |
* |
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl. |
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. |
* Input argument to the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
struct drm_vmw_surface_arg { |
int32_t sid; |
enum drm_vmw_handle_type handle_type; |
}; |
/** |
* struct drm_vmw_size ioctl. |
* |
* @width - mip level width |
* @height - mip level height |
* @depth - mip level depth |
* |
* Description of a mip level. |
* Input data to the DRM_WMW_CREATE_SURFACE Ioctl. |
*/ |
struct drm_vmw_size { |
uint32_t width; |
uint32_t height; |
uint32_t depth; |
uint32_t pad64; |
}; |
/** |
* union drm_vmw_surface_create_arg |
* |
* @rep: Output data as described above. |
* @req: Input data as described above. |
* |
* Argument to the DRM_VMW_CREATE_SURFACE Ioctl. |
*/ |
union drm_vmw_surface_create_arg { |
struct drm_vmw_surface_arg rep; |
struct drm_vmw_surface_create_req req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_REF_SURFACE - Reference a host surface. |
* |
* Puts a reference on a host surface with a give sid, as previously |
* returned by the DRM_VMW_CREATE_SURFACE ioctl. |
* A reference will make sure the surface isn't destroyed while we hold |
* it and will allow the calling client to use the surface ID in the command |
* stream. |
* |
* On successful return, the Ioctl returns the surface information given |
* in the DRM_VMW_CREATE_SURFACE ioctl. |
*/ |
/** |
* union drm_vmw_surface_reference_arg |
* |
* @rep: Output data as described above. |
* @req: Input data as described above. |
* |
* Argument to the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
union drm_vmw_surface_reference_arg { |
struct drm_vmw_surface_create_req rep; |
struct drm_vmw_surface_arg req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_SURFACE - Unreference a host surface. |
* |
* Clear a reference previously put on a host surface. |
* When all references are gone, including the one implicitly placed |
* on creation, |
* a destroy surface command will be queued for the host. |
* Does not wait for completion. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_EXECBUF |
* |
* Submit a command buffer for execution on the host, and return a |
* fence seqno that when signaled, indicates that the command buffer has |
* executed. |
*/ |
/** |
* struct drm_vmw_execbuf_arg |
* |
* @commands: User-space address of a command buffer cast to an uint64_t. |
* @command-size: Size in bytes of the command buffer. |
* @throttle-us: Sleep until software is less than @throttle_us |
* microseconds ahead of hardware. The driver may round this value |
* to the nearest kernel tick. |
* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an |
* uint64_t. |
* @version: Allows expanding the execbuf ioctl parameters without breaking |
* backwards compatibility, since user-space will always tell the kernel |
* which version it uses. |
* @flags: Execbuf flags. None currently. |
* |
* Argument to the DRM_VMW_EXECBUF Ioctl. |
*/ |
#define DRM_VMW_EXECBUF_VERSION 1 |
struct drm_vmw_execbuf_arg { |
uint64_t commands; |
uint32_t command_size; |
uint32_t throttle_us; |
uint64_t fence_rep; |
uint32_t version; |
uint32_t flags; |
}; |
/** |
* struct drm_vmw_fence_rep |
* |
* @handle: Fence object handle for fence associated with a command submission. |
* @mask: Fence flags relevant for this fence object. |
* @seqno: Fence sequence number in fifo. A fence object with a lower |
* seqno will signal the EXEC flag before a fence object with a higher |
* seqno. This can be used by user-space to avoid kernel calls to determine |
* whether a fence has signaled the EXEC flag. Note that @seqno will |
* wrap at 32-bit. |
* @passed_seqno: The highest seqno number processed by the hardware |
* so far. This can be used to mark user-space fence objects as signaled, and |
* to determine whether a fence seqno might be stale. |
* @error: This member should've been set to -EFAULT on submission. |
* The following actions should be take on completion: |
* error == -EFAULT: Fence communication failed. The host is synchronized. |
* Use the last fence id read from the FIFO fence register. |
* error != 0 && error != -EFAULT: |
* Fence submission failed. The host is synchronized. Use the fence_seq member. |
* error == 0: All is OK, The host may not be synchronized. |
* Use the fence_seq member. |
* |
* Input / Output data to the DRM_VMW_EXECBUF Ioctl. |
*/ |
struct drm_vmw_fence_rep { |
uint32_t handle; |
uint32_t mask; |
uint32_t seqno; |
uint32_t passed_seqno; |
uint32_t pad64; |
int32_t error; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_ALLOC_DMABUF |
* |
* Allocate a DMA buffer that is visible also to the host. |
* NOTE: The buffer is |
* identified by a handle and an offset, which are private to the guest, but |
* useable in the command stream. The guest kernel may translate these |
* and patch up the command stream accordingly. In the future, the offset may |
* be zero at all times, or it may disappear from the interface before it is |
* fixed. |
* |
* The DMA buffer may stay user-space mapped in the guest at all times, |
* and is thus suitable for sub-allocation. |
* |
* DMA buffers are mapped using the mmap() syscall on the drm device. |
*/ |
/** |
* struct drm_vmw_alloc_dmabuf_req |
* |
* @size: Required minimum size of the buffer. |
* |
* Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
struct drm_vmw_alloc_dmabuf_req { |
uint32_t size; |
uint32_t pad64; |
}; |
/** |
* struct drm_vmw_dmabuf_rep |
* |
* @map_handle: Offset to use in the mmap() call used to map the buffer. |
* @handle: Handle unique to this buffer. Used for unreferencing. |
* @cur_gmr_id: GMR id to use in the command stream when this buffer is |
* referenced. See not above. |
* @cur_gmr_offset: Offset to use in the command stream when this buffer is |
* referenced. See note above. |
* |
* Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
struct drm_vmw_dmabuf_rep { |
uint64_t map_handle; |
uint32_t handle; |
uint32_t cur_gmr_id; |
uint32_t cur_gmr_offset; |
uint32_t pad64; |
}; |
/** |
* union drm_vmw_dmabuf_arg |
* |
* @req: Input data as described above. |
* @rep: Output data as described above. |
* |
* Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
union drm_vmw_alloc_dmabuf_arg { |
struct drm_vmw_alloc_dmabuf_req req; |
struct drm_vmw_dmabuf_rep rep; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_DMABUF - Free a DMA buffer. |
* |
*/ |
/** |
* struct drm_vmw_unref_dmabuf_arg |
* |
* @handle: Handle indicating what buffer to free. Obtained from the |
* DRM_VMW_ALLOC_DMABUF Ioctl. |
* |
* Argument to the DRM_VMW_UNREF_DMABUF Ioctl. |
*/ |
struct drm_vmw_unref_dmabuf_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. |
* |
* This IOCTL controls the overlay units of the svga device. |
* The SVGA overlay units does not work like regular hardware units in |
* that they do not automaticaly read back the contents of the given dma |
* buffer. But instead only read back for each call to this ioctl, and |
* at any point between this call being made and a following call that |
* either changes the buffer or disables the stream. |
*/ |
/** |
* struct drm_vmw_rect |
* |
* Defines a rectangle. Used in the overlay ioctl to define |
* source and destination rectangle. |
*/ |
struct drm_vmw_rect { |
int32_t x; |
int32_t y; |
uint32_t w; |
uint32_t h; |
}; |
/** |
* struct drm_vmw_control_stream_arg |
* |
* @stream_id: Stearm to control |
* @enabled: If false all following arguments are ignored. |
* @handle: Handle to buffer for getting data from. |
* @format: Format of the overlay as understood by the host. |
* @width: Width of the overlay. |
* @height: Height of the overlay. |
* @size: Size of the overlay in bytes. |
* @pitch: Array of pitches, the two last are only used for YUV12 formats. |
* @offset: Offset from start of dma buffer to overlay. |
* @src: Source rect, must be within the defined area above. |
* @dst: Destination rect, x and y may be negative. |
* |
* Argument to the DRM_VMW_CONTROL_STREAM Ioctl. |
*/ |
struct drm_vmw_control_stream_arg { |
uint32_t stream_id; |
uint32_t enabled; |
uint32_t flags; |
uint32_t color_key; |
uint32_t handle; |
uint32_t offset; |
int32_t format; |
uint32_t size; |
uint32_t width; |
uint32_t height; |
uint32_t pitch[3]; |
uint32_t pad64; |
struct drm_vmw_rect src; |
struct drm_vmw_rect dst; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. |
* |
*/ |
#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) |
#define DRM_VMW_CURSOR_BYPASS_FLAGS (1) |
/** |
* struct drm_vmw_cursor_bypass_arg |
* |
* @flags: Flags. |
* @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. |
* @xpos: X position of cursor. |
* @ypos: Y position of cursor. |
* @xhot: X hotspot. |
* @yhot: Y hotspot. |
* |
* Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. |
*/ |
struct drm_vmw_cursor_bypass_arg { |
uint32_t flags; |
uint32_t crtc_id; |
int32_t xpos; |
int32_t ypos; |
int32_t xhot; |
int32_t yhot; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CLAIM_STREAM - Claim a single stream. |
*/ |
/** |
* struct drm_vmw_context_arg |
* |
* @stream_id: Device unique context ID. |
* |
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. |
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. |
*/ |
struct drm_vmw_stream_arg { |
uint32_t stream_id; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_STREAM - Unclaim a stream. |
* |
* Return a single stream that was claimed by this process. Also makes |
* sure that the stream has been stopped. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_GET_3D_CAP |
* |
* Read 3D capabilities from the FIFO |
* |
*/ |
/** |
* struct drm_vmw_get_3d_cap_arg |
* |
* @buffer: Pointer to a buffer for capability data, cast to an uint64_t |
* @size: Max size to copy |
* |
* Input argument to the DRM_VMW_GET_3D_CAP_IOCTL |
* ioctls. |
*/ |
struct drm_vmw_get_3d_cap_arg { |
uint64_t buffer; |
uint32_t max_size; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_WAIT |
* |
* Waits for a fence object to signal. The wait is interruptible, so that |
* signals may be delivered during the interrupt. The wait may timeout, |
* in which case the calls returns -EBUSY. If the wait is restarted, |
* that is restarting without resetting @cookie_valid to zero, |
* the timeout is computed from the first call. |
* |
* The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait |
* on: |
* DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command |
* stream |
* have executed. |
* DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish |
* commands |
* in the buffer given to the EXECBUF ioctl returning the fence object handle |
* are available to user-space. |
* |
* DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the |
* fenc wait ioctl returns 0, the fence object has been unreferenced after |
* the wait. |
*/ |
#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) |
#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) |
#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) |
/** |
* struct drm_vmw_fence_wait_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* @cookie_valid: Must be reset to 0 on first call. Left alone on restart. |
* @kernel_cookie: Set to 0 on first call. Left alone on restart. |
* @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. |
* @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick |
* before returning. |
* @flags: Fence flags to wait on. |
* @wait_options: Options that control the behaviour of the wait ioctl. |
* |
* Input argument to the DRM_VMW_FENCE_WAIT ioctl. |
*/ |
struct drm_vmw_fence_wait_arg { |
uint32_t handle; |
int32_t cookie_valid; |
uint64_t kernel_cookie; |
uint64_t timeout_us; |
int32_t lazy; |
int32_t flags; |
int32_t wait_options; |
int32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_SIGNALED |
* |
* Checks if a fence object is signaled.. |
*/ |
/** |
* struct drm_vmw_fence_signaled_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl |
* @signaled: Out: Flags signaled. |
* @sequence: Out: Highest sequence passed so far. Can be used to signal the |
* EXEC flag of user-space fence objects. |
* |
* Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF |
* ioctls. |
*/ |
struct drm_vmw_fence_signaled_arg { |
uint32_t handle; |
uint32_t flags; |
int32_t signaled; |
uint32_t passed_seqno; |
uint32_t signaled_flags; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_UNREF |
* |
* Unreferences a fence object, and causes it to be destroyed if there are no |
* other references to it. |
* |
*/ |
/** |
* struct drm_vmw_fence_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* |
* Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. |
*/ |
struct drm_vmw_fence_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_EVENT |
* |
* Queues an event on a fence to be delivered on the drm character device |
* when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. |
* Optionally the approximate time when the fence signaled is |
* given by the event. |
*/ |
/* |
* The event type |
*/ |
#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 |
struct drm_vmw_event_fence { |
struct drm_event base; |
uint64_t user_data; |
uint32_t tv_sec; |
uint32_t tv_usec; |
}; |
/* |
* Flags that may be given to the command. |
*/ |
/* Request fence signaled time on the event. */ |
#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) |
/** |
* struct drm_vmw_fence_event_arg |
* |
* @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if |
* the fence is not supposed to be referenced by user-space. |
* @user_info: Info to be delivered with the event. |
* @handle: Attach the event to this fence only. |
* @flags: A set of flags as defined above. |
*/ |
struct drm_vmw_fence_event_arg { |
uint64_t fence_rep; |
uint64_t user_data; |
uint32_t handle; |
uint32_t flags; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_PRESENT |
* |
* Executes an SVGA present on a given fb for a given surface. The surface |
* is placed on the framebuffer. Cliprects are given relative to the given |
* point (the point disignated by dest_{x|y}). |
* |
*/ |
/** |
* struct drm_vmw_present_arg |
* @fb_id: framebuffer id to present / read back from. |
* @sid: Surface id to present from. |
* @dest_x: X placement coordinate for surface. |
* @dest_y: Y placement coordinate for surface. |
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. |
* @num_clips: Number of cliprects given relative to the framebuffer origin, |
* in the same coordinate space as the frame buffer. |
* @pad64: Unused 64-bit padding. |
* |
* Input argument to the DRM_VMW_PRESENT ioctl. |
*/ |
struct drm_vmw_present_arg { |
uint32_t fb_id; |
uint32_t sid; |
int32_t dest_x; |
int32_t dest_y; |
uint64_t clips_ptr; |
uint32_t num_clips; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_PRESENT_READBACK |
* |
* Executes an SVGA present readback from a given fb to the dma buffer |
* currently bound as the fb. If there is no dma buffer bound to the fb, |
* an error will be returned. |
* |
*/ |
/** |
* struct drm_vmw_present_arg |
* @fb_id: fb_id to present / read back from. |
* @num_clips: Number of cliprects. |
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. |
* @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. |
* If this member is NULL, then the ioctl should not return a fence. |
*/ |
struct drm_vmw_present_readback_arg { |
uint32_t fb_id; |
uint32_t num_clips; |
uint64_t clips_ptr; |
uint64_t fence_rep; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UPDATE_LAYOUT - Update layout |
* |
* Updates the preferred modes and connection status for connectors. The |
* command consists of one drm_vmw_update_layout_arg pointing to an array |
* of num_outputs drm_vmw_rect's. |
*/ |
/** |
* struct drm_vmw_update_layout_arg |
* |
* @num_outputs: number of active connectors |
* @rects: pointer to array of drm_vmw_rect cast to an uint64_t |
* |
* Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. |
*/ |
struct drm_vmw_update_layout_arg { |
uint32_t num_outputs; |
uint32_t pad64; |
uint64_t rects; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_SHADER - Create shader |
* |
* Creates a shader and optionally binds it to a dma buffer containing |
* the shader byte-code. |
*/ |
/** |
* enum drm_vmw_shader_type - Shader types |
*/ |
enum drm_vmw_shader_type { |
drm_vmw_shader_type_vs = 0, |
drm_vmw_shader_type_ps, |
drm_vmw_shader_type_gs |
}; |
/** |
* struct drm_vmw_shader_create_arg |
* |
* @shader_type: Shader type of the shader to create. |
* @size: Size of the byte-code in bytes. |
* where the shader byte-code starts |
* @buffer_handle: Buffer handle identifying the buffer containing the |
* shader byte-code |
* @shader_handle: On successful completion contains a handle that |
* can be used to subsequently identify the shader. |
* @offset: Offset in bytes into the buffer given by @buffer_handle, |
* |
* Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. |
*/ |
struct drm_vmw_shader_create_arg { |
enum drm_vmw_shader_type shader_type; |
uint32_t size; |
uint32_t buffer_handle; |
uint32_t shader_handle; |
uint64_t offset; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_SHADER - Unreferences a shader |
* |
* Destroys a user-space reference to a shader, optionally destroying |
* it. |
*/ |
/** |
* struct drm_vmw_shader_arg |
* |
* @handle: Handle identifying the shader to destroy. |
* |
* Input argument to the DRM_VMW_UNREF_SHADER ioctl. |
*/ |
struct drm_vmw_shader_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. |
* |
* Allocates a surface handle and queues a create surface command |
* for the host on the first use of the surface. The surface ID can |
* be used as the surface ID in commands referencing the surface. |
*/ |
/** |
* enum drm_vmw_surface_flags |
* |
* @drm_vmw_surface_flag_shareable: Whether the surface is shareable |
* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout |
* surface. |
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is |
* given. |
*/ |
enum drm_vmw_surface_flags { |
drm_vmw_surface_flag_shareable = (1 << 0), |
drm_vmw_surface_flag_scanout = (1 << 1), |
drm_vmw_surface_flag_create_buffer = (1 << 2) |
}; |
/** |
* struct drm_vmw_gb_surface_create_req |
* |
* @svga3d_flags: SVGA3d surface flags for the device. |
* @format: SVGA3d format. |
* @mip_level: Number of mip levels for all faces. |
* @drm_surface_flags Flags as described above. |
* @multisample_count Future use. Set to 0. |
* @autogen_filter Future use. Set to 0. |
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID |
* if none. |
* @base_size Size of the base mip level for all faces. |
* |
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. |
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. |
*/ |
struct drm_vmw_gb_surface_create_req { |
uint32_t svga3d_flags; |
uint32_t format; |
uint32_t mip_levels; |
enum drm_vmw_surface_flags drm_surface_flags; |
uint32_t multisample_count; |
uint32_t autogen_filter; |
uint32_t buffer_handle; |
uint32_t pad64; |
struct drm_vmw_size base_size; |
}; |
/** |
* struct drm_vmw_gb_surface_create_rep |
* |
* @handle: Surface handle. |
* @backup_size: Size of backup buffers for this surface. |
* @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. |
* @buffer_size: Actual size of the buffer identified by |
* @buffer_handle |
* @buffer_map_handle: Offset into device address space for the buffer |
* identified by @buffer_handle. |
* |
* Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. |
* Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. |
*/ |
struct drm_vmw_gb_surface_create_rep { |
uint32_t handle; |
uint32_t backup_size; |
uint32_t buffer_handle; |
uint32_t buffer_size; |
uint64_t buffer_map_handle; |
}; |
/** |
* union drm_vmw_gb_surface_create_arg |
* |
* @req: Input argument as described above. |
* @rep: Output argument as described above. |
* |
* Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. |
*/ |
union drm_vmw_gb_surface_create_arg { |
struct drm_vmw_gb_surface_create_rep rep; |
struct drm_vmw_gb_surface_create_req req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_GB_SURFACE_REF - Reference a host surface. |
* |
* Puts a reference on a host surface with a given handle, as previously |
* returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. |
* A reference will make sure the surface isn't destroyed while we hold |
* it and will allow the calling client to use the surface handle in |
* the command stream. |
* |
* On successful return, the Ioctl returns the surface information given |
* to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. |
*/ |
/** |
* struct drm_vmw_gb_surface_reference_arg |
* |
* @creq: The data used as input when the surface was created, as described |
* above at "struct drm_vmw_gb_surface_create_req" |
* @crep: Additional data output when the surface was created, as described |
* above at "struct drm_vmw_gb_surface_create_rep" |
* |
* Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. |
*/ |
struct drm_vmw_gb_surface_ref_rep { |
struct drm_vmw_gb_surface_create_req creq; |
struct drm_vmw_gb_surface_create_rep crep; |
}; |
/** |
* union drm_vmw_gb_surface_reference_arg |
* |
* @req: Input data as described above at "struct drm_vmw_surface_arg" |
* @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" |
* |
* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. |
*/ |
union drm_vmw_gb_surface_reference_arg { |
struct drm_vmw_gb_surface_ref_rep rep; |
struct drm_vmw_surface_arg req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. |
* |
* Idles any previously submitted GPU operations on the buffer and |
* by default blocks command submissions that reference the buffer. |
* If the file descriptor used to grab a blocking CPU sync is closed, the |
* cpu sync is released. |
* The flags argument indicates how the grab / release operation should be |
* performed: |
*/ |
/** |
* enum drm_vmw_synccpu_flags - Synccpu flags: |
* |
* @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a |
* hint to the kernel to allow command submissions that references the buffer |
* for read-only. |
* @drm_vmw_synccpu_write: Sync for write. Block all command submissions |
* referencing this buffer. |
* @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return |
* -EBUSY should the buffer be busy. |
* @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer |
* while the buffer is synced for CPU. This is similar to the GEM bo idle |
* behavior. |
*/ |
enum drm_vmw_synccpu_flags { |
drm_vmw_synccpu_read = (1 << 0), |
drm_vmw_synccpu_write = (1 << 1), |
drm_vmw_synccpu_dontblock = (1 << 2), |
drm_vmw_synccpu_allow_cs = (1 << 3) |
}; |
/** |
* enum drm_vmw_synccpu_op - Synccpu operations: |
* |
* @drm_vmw_synccpu_grab: Grab the buffer for CPU operations |
* @drm_vmw_synccpu_release: Release a previous grab. |
*/ |
enum drm_vmw_synccpu_op { |
drm_vmw_synccpu_grab, |
drm_vmw_synccpu_release |
}; |
/** |
* struct drm_vmw_synccpu_arg |
* |
* @op: The synccpu operation as described above. |
* @handle: Handle identifying the buffer object. |
* @flags: Flags as described above. |
*/ |
struct drm_vmw_synccpu_arg { |
enum drm_vmw_synccpu_op op; |
enum drm_vmw_synccpu_flags flags; |
uint32_t handle; |
uint32_t pad64; |
}; |
#endif |
/drivers/include/uapi/linux/const.h |
---|
0,0 → 1,27 |
/* const.h: Macros for dealing with constants. */ |
#ifndef _LINUX_CONST_H |
#define _LINUX_CONST_H |
/* Some constant macros are used in both assembler and |
* C code. Therefore we cannot annotate them always with |
* 'UL' and other type specifiers unilaterally. We |
* use the following macros to deal with this. |
* |
* Similarly, _AT() will cast an expression with a type in C, but |
* leave it unchanged in asm. |
*/ |
#ifdef __ASSEMBLY__ |
#define _AC(X,Y) X |
#define _AT(T,X) X |
#else |
#define __AC(X,Y) (X##Y) |
#define _AC(X,Y) __AC(X,Y) |
#define _AT(T,X) ((T)(X)) |
#endif |
#define _BITUL(x) (_AC(1,UL) << (x)) |
#define _BITULL(x) (_AC(1,ULL) << (x)) |
#endif /* !(_LINUX_CONST_H) */ |
/drivers/include/uapi/linux/errno.h |
---|
0,0 → 1,0 |
#include <asm/errno.h> |
/drivers/include/uapi/linux/ioctl.h |
---|
0,0 → 1,7 |
#ifndef _LINUX_IOCTL_H |
#define _LINUX_IOCTL_H |
#include <asm/ioctl.h> |
#endif /* _LINUX_IOCTL_H */ |
/drivers/include/uapi/linux/kernel.h |
---|
0,0 → 1,13 |
#ifndef _UAPI_LINUX_KERNEL_H |
#define _UAPI_LINUX_KERNEL_H |
//#include <linux/sysinfo.h> |
/* |
* 'kernel.h' contains some often-used function prototypes etc |
*/ |
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) |
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) |
#endif /* _UAPI_LINUX_KERNEL_H */ |
/drivers/include/uapi/linux/personality.h |
---|
0,0 → 1,69 |
#ifndef _UAPI_LINUX_PERSONALITY_H |
#define _UAPI_LINUX_PERSONALITY_H |
/* |
* Flags for bug emulation. |
* |
* These occupy the top three bytes. |
*/ |
enum { |
UNAME26 = 0x0020000, |
ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ |
FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors |
* (signal handling) |
*/ |
MMAP_PAGE_ZERO = 0x0100000, |
ADDR_COMPAT_LAYOUT = 0x0200000, |
READ_IMPLIES_EXEC = 0x0400000, |
ADDR_LIMIT_32BIT = 0x0800000, |
SHORT_INODE = 0x1000000, |
WHOLE_SECONDS = 0x2000000, |
STICKY_TIMEOUTS = 0x4000000, |
ADDR_LIMIT_3GB = 0x8000000, |
}; |
/* |
* Security-relevant compatibility flags that must be |
* cleared upon setuid or setgid exec: |
*/ |
#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \ |
ADDR_NO_RANDOMIZE | \ |
ADDR_COMPAT_LAYOUT | \ |
MMAP_PAGE_ZERO) |
/* |
* Personality types. |
* |
* These go in the low byte. Avoid using the top bit, it will |
* conflict with error returns. |
*/ |
enum { |
PER_LINUX = 0x0000, |
PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, |
PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, |
PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, |
PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, |
PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | |
WHOLE_SECONDS | SHORT_INODE, |
PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, |
PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, |
PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, |
PER_BSD = 0x0006, |
PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, |
PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, |
PER_LINUX32 = 0x0008, |
PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, |
PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ |
PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ |
PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ |
PER_RISCOS = 0x000c, |
PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, |
PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, |
PER_OSF4 = 0x000f, /* OSF/1 v4 */ |
PER_HPUX = 0x0010, |
PER_MASK = 0x00ff, |
}; |
#endif /* _UAPI_LINUX_PERSONALITY_H */ |
/drivers/include/uapi/linux/stddef.h |
---|
0,0 → 1,0 |
#include <linux/compiler.h> |
/drivers/include/uapi/linux/string.h |
---|
0,0 → 1,9 |
#ifndef _UAPI_LINUX_STRING_H_ |
#define _UAPI_LINUX_STRING_H_ |
/* We don't want strings.h stuff being used by user stuff by accident */ |
#ifndef __KERNEL__ |
#include <string.h> |
#endif /* __KERNEL__ */ |
#endif /* _UAPI_LINUX_STRING_H_ */ |
/drivers/include/uapi/linux/sysinfo.h |
---|
0,0 → 1,24 |
#ifndef _LINUX_SYSINFO_H |
#define _LINUX_SYSINFO_H |
#include <linux/types.h> |
#define SI_LOAD_SHIFT 16 |
struct sysinfo { |
__kernel_long_t uptime; /* Seconds since boot */ |
__kernel_ulong_t loads[3]; /* 1, 5, and 15 minute load averages */ |
__kernel_ulong_t totalram; /* Total usable main memory size */ |
__kernel_ulong_t freeram; /* Available memory size */ |
__kernel_ulong_t sharedram; /* Amount of shared memory */ |
__kernel_ulong_t bufferram; /* Memory used by buffers */ |
__kernel_ulong_t totalswap; /* Total swap space size */ |
__kernel_ulong_t freeswap; /* swap space still available */ |
__u16 procs; /* Number of current processes */ |
__u16 pad; /* Explicit padding for m68k */ |
__kernel_ulong_t totalhigh; /* Total high memory size */ |
__kernel_ulong_t freehigh; /* Available high memory size */ |
__u32 mem_unit; /* Memory unit size in bytes */ |
char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)]; /* Padding: libc5 uses this.. */ |
}; |
#endif /* _LINUX_SYSINFO_H */ |
/drivers/include/uapi/linux/time.h |
---|
0,0 → 1,69 |
#ifndef _UAPI_LINUX_TIME_H |
#define _UAPI_LINUX_TIME_H |
#include <linux/types.h> |
#ifndef _STRUCT_TIMESPEC |
#define _STRUCT_TIMESPEC |
struct timespec { |
__kernel_time_t tv_sec; /* seconds */ |
long tv_nsec; /* nanoseconds */ |
}; |
#endif |
struct timeval { |
__kernel_time_t tv_sec; /* seconds */ |
__kernel_suseconds_t tv_usec; /* microseconds */ |
}; |
struct timezone { |
int tz_minuteswest; /* minutes west of Greenwich */ |
int tz_dsttime; /* type of dst correction */ |
}; |
/* |
* Names of the interval timers, and structure |
* defining a timer setting: |
*/ |
#define ITIMER_REAL 0 |
#define ITIMER_VIRTUAL 1 |
#define ITIMER_PROF 2 |
struct itimerspec { |
struct timespec it_interval; /* timer period */ |
struct timespec it_value; /* timer expiration */ |
}; |
struct itimerval { |
struct timeval it_interval; /* timer interval */ |
struct timeval it_value; /* current value */ |
}; |
/* |
* The IDs of the various system clocks (for POSIX.1b interval timers): |
*/ |
#define CLOCK_REALTIME 0 |
#define CLOCK_MONOTONIC 1 |
#define CLOCK_PROCESS_CPUTIME_ID 2 |
#define CLOCK_THREAD_CPUTIME_ID 3 |
#define CLOCK_MONOTONIC_RAW 4 |
#define CLOCK_REALTIME_COARSE 5 |
#define CLOCK_MONOTONIC_COARSE 6 |
#define CLOCK_BOOTTIME 7 |
#define CLOCK_REALTIME_ALARM 8 |
#define CLOCK_BOOTTIME_ALARM 9 |
#define CLOCK_SGI_CYCLE 10 /* Hardware specific */ |
#define CLOCK_TAI 11 |
#define MAX_CLOCKS 16 |
#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC) |
#define CLOCKS_MONO CLOCK_MONOTONIC |
/* |
* The various flags for setting POSIX.1b interval timers: |
*/ |
#define TIMER_ABSTIME 0x01 |
#endif /* _UAPI_LINUX_TIME_H */ |
/drivers/include/uapi/linux/types.h |
---|
0,0 → 1,56 |
#ifndef _UAPI_LINUX_TYPES_H |
#define _UAPI_LINUX_TYPES_H |
#include <asm/types.h> |
#ifndef __ASSEMBLY__ |
#ifndef __KERNEL__ |
#ifndef __EXPORTED_HEADERS__ |
#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" |
#endif /* __EXPORTED_HEADERS__ */ |
#endif |
#include <linux/posix_types.h> |
/* |
* Below are truly Linux-specific types that should never collide with |
* any application/library that wants linux/types.h. |
*/ |
#ifdef __CHECKER__ |
#define __bitwise__ __attribute__((bitwise)) |
#else |
#define __bitwise__ |
#endif |
#ifdef __CHECK_ENDIAN__ |
#define __bitwise __bitwise__ |
#else |
#define __bitwise |
#endif |
typedef __u16 __bitwise __le16; |
typedef __u16 __bitwise __be16; |
typedef __u32 __bitwise __le32; |
typedef __u32 __bitwise __be32; |
typedef __u64 __bitwise __le64; |
typedef __u64 __bitwise __be64; |
typedef __u16 __bitwise __sum16; |
typedef __u32 __bitwise __wsum; |
/* |
* aligned_u64 should be used in defining kernel<->userspace ABIs to avoid |
* common 32/64-bit compat problems. |
* 64-bit values align to 4-byte boundaries on x86_32 (and possibly other |
* architectures) and to 8-byte boundaries on 64-bit architectures. The new |
* aligned_64 type enforces 8-byte alignment so that structs containing |
* aligned_64 values have the same alignment on 32-bit and 64-bit architectures. |
* No conversions are necessary between 32-bit user-space and a 64-bit kernel. |
*/ |
#define __aligned_u64 __u64 __attribute__((aligned(8))) |
#define __aligned_be64 __be64 __attribute__((aligned(8))) |
#define __aligned_le64 __le64 __attribute__((aligned(8))) |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_LINUX_TYPES_H */ |