/drivers/include/asm/acenv.h |
---|
0,0 → 1,45 |
/* |
* X86 specific ACPICA environments and implementation |
* |
* Copyright (C) 2014, Intel Corporation |
* Author: Lv Zheng <lv.zheng@intel.com> |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License version 2 as |
* published by the Free Software Foundation. |
*/ |
#ifndef _ASM_X86_ACENV_H |
#define _ASM_X86_ACENV_H |
#include <asm/special_insns.h> |
/* Asm macros */ |
#define ACPI_FLUSH_CPU_CACHE() wbinvd() |
int __acpi_acquire_global_lock(unsigned int *lock); |
int __acpi_release_global_lock(unsigned int *lock); |
#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ |
((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) |
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ |
((Acq) = __acpi_release_global_lock(&facs->global_lock)) |
/* |
* Math helper asm macros |
*/ |
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ |
asm("divl %2;" \ |
: "=a"(q32), "=d"(r32) \ |
: "r"(d32), \ |
"0"(n_lo), "1"(n_hi)) |
#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ |
asm("shrl $1,%2 ;" \ |
"rcrl $1,%3;" \ |
: "=r"(n_hi), "=r"(n_lo) \ |
: "0"(n_hi), "1"(n_lo)) |
#endif /* _ASM_X86_ACENV_H */ |
/drivers/include/asm/acpi.h |
---|
0,0 → 1,170 |
#ifndef _ASM_X86_ACPI_H |
#define _ASM_X86_ACPI_H |
/* |
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> |
* |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
*/ |
#include <acpi/pdc_intel.h> |
#include <asm/numa.h> |
#include <asm/fixmap.h> |
#include <asm/processor.h> |
#ifdef CONFIG_ACPI_APEI |
# include <asm/pgtable_types.h> |
#endif |
#ifdef CONFIG_ACPI |
extern int acpi_lapic; |
extern int acpi_ioapic; |
extern int acpi_noirq; |
extern int acpi_strict; |
extern int acpi_disabled; |
extern int acpi_pci_disabled; |
extern int acpi_skip_timer_override; |
extern int acpi_use_timer_override; |
extern int acpi_fix_pin2_polarity; |
extern int acpi_disable_cmcff; |
extern u8 acpi_sci_flags; |
extern int acpi_sci_override_gsi; |
void acpi_pic_sci_set_trigger(unsigned int, u16); |
extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi, |
int trigger, int polarity); |
extern void (*__acpi_unregister_gsi)(u32 gsi); |
static inline void disable_acpi(void) |
{ |
acpi_disabled = 1; |
acpi_pci_disabled = 1; |
acpi_noirq = 1; |
} |
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); |
static inline void acpi_noirq_set(void) { acpi_noirq = 1; } |
static inline void acpi_disable_pci(void) |
{ |
acpi_pci_disabled = 1; |
acpi_noirq_set(); |
} |
/* Low-level suspend routine. */ |
extern int (*acpi_suspend_lowlevel)(void); |
/* Physical address to resume after wakeup */ |
#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start)) |
/* |
* Check if the CPU can handle C2 and deeper |
*/ |
static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) |
{ |
/* |
* Early models (<=5) of AMD Opterons are not supposed to go into |
* C2 state. |
* |
* Steppings 0x0A and later are good |
*/ |
if (boot_cpu_data.x86 == 0x0F && |
boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
boot_cpu_data.x86_model <= 0x05 && |
boot_cpu_data.x86_mask < 0x0A) |
return 1; |
else if (amd_e400_c1e_detected) |
return 1; |
else |
return max_cstate; |
} |
static inline bool arch_has_acpi_pdc(void) |
{ |
struct cpuinfo_x86 *c = &cpu_data(0); |
return (c->x86_vendor == X86_VENDOR_INTEL || |
c->x86_vendor == X86_VENDOR_CENTAUR); |
} |
static inline void arch_acpi_set_pdc_bits(u32 *buf) |
{ |
struct cpuinfo_x86 *c = &cpu_data(0); |
buf[2] |= ACPI_PDC_C_CAPABILITY_SMP; |
if (cpu_has(c, X86_FEATURE_EST)) |
buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; |
if (cpu_has(c, X86_FEATURE_ACPI)) |
buf[2] |= ACPI_PDC_T_FFH; |
/* |
* If mwait/monitor is unsupported, C2/C3_FFH will be disabled |
*/ |
if (!cpu_has(c, X86_FEATURE_MWAIT)) |
buf[2] &= ~(ACPI_PDC_C_C2C3_FFH); |
} |
static inline bool acpi_has_cpu_in_madt(void) |
{ |
return !!acpi_lapic; |
} |
#else /* !CONFIG_ACPI */ |
#define acpi_lapic 0 |
#define acpi_ioapic 0 |
#define acpi_disable_cmcff 0 |
static inline void acpi_noirq_set(void) { } |
static inline void acpi_disable_pci(void) { } |
static inline void disable_acpi(void) { } |
#endif /* !CONFIG_ACPI */ |
#define ARCH_HAS_POWER_INIT 1 |
#ifdef CONFIG_ACPI_NUMA |
extern int acpi_numa; |
extern int x86_acpi_numa_init(void); |
#endif /* CONFIG_ACPI_NUMA */ |
#define acpi_unlazy_tlb(x) leave_mm(x) |
#ifdef CONFIG_ACPI_APEI |
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) |
{ |
/* |
* We currently have no way to look up the EFI memory map |
* attributes for a region in a consistent way, because the |
* memmap is discarded after efi_free_boot_services(). So if |
* you call efi_mem_attributes() during boot and at runtime, |
* you could theoretically see different attributes. |
* |
* Since we are yet to see any x86 platforms that require |
* anything other than PAGE_KERNEL (some arm64 platforms |
* require the equivalent of PAGE_KERNEL_NOCACHE), return that |
* until we know differently. |
*/ |
return PAGE_KERNEL; |
} |
#endif |
#endif /* _ASM_X86_ACPI_H */ |
/drivers/include/asm/apicdef.h |
---|
0,0 → 1,445 |
#ifndef _ASM_X86_APICDEF_H |
#define _ASM_X86_APICDEF_H |
/* |
* Constants for various Intel APICs. (local APIC, IOAPIC, etc.) |
* |
* Alan Cox <Alan.Cox@linux.org>, 1995. |
* Ingo Molnar <mingo@redhat.com>, 1999, 2000 |
*/ |
#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 |
#define APIC_DEFAULT_PHYS_BASE 0xfee00000 |
/* |
* This is the IO-APIC register space as specified |
* by Intel docs: |
*/ |
#define IO_APIC_SLOT_SIZE 1024 |
#define APIC_ID 0x20 |
#define APIC_LVR 0x30 |
#define APIC_LVR_MASK 0xFF00FF |
#define APIC_LVR_DIRECTED_EOI (1 << 24) |
#define GET_APIC_VERSION(x) ((x) & 0xFFu) |
#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) |
#ifdef CONFIG_X86_32 |
# define APIC_INTEGRATED(x) ((x) & 0xF0u) |
#else |
# define APIC_INTEGRATED(x) (1) |
#endif |
#define APIC_XAPIC(x) ((x) >= 0x14) |
#define APIC_EXT_SPACE(x) ((x) & 0x80000000) |
#define APIC_TASKPRI 0x80 |
#define APIC_TPRI_MASK 0xFFu |
#define APIC_ARBPRI 0x90 |
#define APIC_ARBPRI_MASK 0xFFu |
#define APIC_PROCPRI 0xA0 |
#define APIC_EOI 0xB0 |
#define APIC_EOI_ACK 0x0 /* Docs say 0 for future compat. */ |
#define APIC_RRR 0xC0 |
#define APIC_LDR 0xD0 |
#define APIC_LDR_MASK (0xFFu << 24) |
#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu) |
#define SET_APIC_LOGICAL_ID(x) (((x) << 24)) |
#define APIC_ALL_CPUS 0xFFu |
#define APIC_DFR 0xE0 |
#define APIC_DFR_CLUSTER 0x0FFFFFFFul |
#define APIC_DFR_FLAT 0xFFFFFFFFul |
#define APIC_SPIV 0xF0 |
#define APIC_SPIV_DIRECTED_EOI (1 << 12) |
#define APIC_SPIV_FOCUS_DISABLED (1 << 9) |
#define APIC_SPIV_APIC_ENABLED (1 << 8) |
#define APIC_ISR 0x100 |
#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ |
#define APIC_TMR 0x180 |
#define APIC_IRR 0x200 |
#define APIC_ESR 0x280 |
#define APIC_ESR_SEND_CS 0x00001 |
#define APIC_ESR_RECV_CS 0x00002 |
#define APIC_ESR_SEND_ACC 0x00004 |
#define APIC_ESR_RECV_ACC 0x00008 |
#define APIC_ESR_SENDILL 0x00020 |
#define APIC_ESR_RECVILL 0x00040 |
#define APIC_ESR_ILLREGA 0x00080 |
#define APIC_LVTCMCI 0x2f0 |
#define APIC_ICR 0x300 |
#define APIC_DEST_SELF 0x40000 |
#define APIC_DEST_ALLINC 0x80000 |
#define APIC_DEST_ALLBUT 0xC0000 |
#define APIC_ICR_RR_MASK 0x30000 |
#define APIC_ICR_RR_INVALID 0x00000 |
#define APIC_ICR_RR_INPROG 0x10000 |
#define APIC_ICR_RR_VALID 0x20000 |
#define APIC_INT_LEVELTRIG 0x08000 |
#define APIC_INT_ASSERT 0x04000 |
#define APIC_ICR_BUSY 0x01000 |
#define APIC_DEST_LOGICAL 0x00800 |
#define APIC_DEST_PHYSICAL 0x00000 |
#define APIC_DM_FIXED 0x00000 |
#define APIC_DM_FIXED_MASK 0x00700 |
#define APIC_DM_LOWEST 0x00100 |
#define APIC_DM_SMI 0x00200 |
#define APIC_DM_REMRD 0x00300 |
#define APIC_DM_NMI 0x00400 |
#define APIC_DM_INIT 0x00500 |
#define APIC_DM_STARTUP 0x00600 |
#define APIC_DM_EXTINT 0x00700 |
#define APIC_VECTOR_MASK 0x000FF |
#define APIC_ICR2 0x310 |
#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF) |
#define SET_APIC_DEST_FIELD(x) ((x) << 24) |
#define APIC_LVTT 0x320 |
#define APIC_LVTTHMR 0x330 |
#define APIC_LVTPC 0x340 |
#define APIC_LVT0 0x350 |
#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18) |
#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3) |
#define SET_APIC_TIMER_BASE(x) (((x) << 18)) |
#define APIC_TIMER_BASE_CLKIN 0x0 |
#define APIC_TIMER_BASE_TMBASE 0x1 |
#define APIC_TIMER_BASE_DIV 0x2 |
#define APIC_LVT_TIMER_ONESHOT (0 << 17) |
#define APIC_LVT_TIMER_PERIODIC (1 << 17) |
#define APIC_LVT_TIMER_TSCDEADLINE (2 << 17) |
#define APIC_LVT_MASKED (1 << 16) |
#define APIC_LVT_LEVEL_TRIGGER (1 << 15) |
#define APIC_LVT_REMOTE_IRR (1 << 14) |
#define APIC_INPUT_POLARITY (1 << 13) |
#define APIC_SEND_PENDING (1 << 12) |
#define APIC_MODE_MASK 0x700 |
#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7) |
#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8)) |
#define APIC_MODE_FIXED 0x0 |
#define APIC_MODE_NMI 0x4 |
#define APIC_MODE_EXTINT 0x7 |
#define APIC_LVT1 0x360 |
#define APIC_LVTERR 0x370 |
#define APIC_TMICT 0x380 |
#define APIC_TMCCT 0x390 |
#define APIC_TDCR 0x3E0 |
#define APIC_SELF_IPI 0x3F0 |
#define APIC_TDR_DIV_TMBASE (1 << 2) |
#define APIC_TDR_DIV_1 0xB |
#define APIC_TDR_DIV_2 0x0 |
#define APIC_TDR_DIV_4 0x1 |
#define APIC_TDR_DIV_8 0x2 |
#define APIC_TDR_DIV_16 0x3 |
#define APIC_TDR_DIV_32 0x8 |
#define APIC_TDR_DIV_64 0x9 |
#define APIC_TDR_DIV_128 0xA |
#define APIC_EFEAT 0x400 |
#define APIC_ECTRL 0x410 |
#define APIC_EILVTn(n) (0x500 + 0x10 * n) |
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ |
#define APIC_EILVT_NR_AMD_10H 4 |
#define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H |
#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) |
#define APIC_EILVT_MSG_FIX 0x0 |
#define APIC_EILVT_MSG_SMI 0x2 |
#define APIC_EILVT_MSG_NMI 0x4 |
#define APIC_EILVT_MSG_EXT 0x7 |
#define APIC_EILVT_MASKED (1 << 16) |
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) |
#define APIC_BASE_MSR 0x800 |
#define XAPIC_ENABLE (1UL << 11) |
#define X2APIC_ENABLE (1UL << 10) |
#ifdef CONFIG_X86_32 |
# define MAX_IO_APICS 64 |
# define MAX_LOCAL_APIC 256 |
#else |
# define MAX_IO_APICS 128 |
# define MAX_LOCAL_APIC 32768 |
#endif |
/* |
* All x86-64 systems are xAPIC compatible. |
* In the following, "apicid" is a physical APIC ID. |
*/ |
#define XAPIC_DEST_CPUS_SHIFT 4 |
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) |
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) |
#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) |
#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) |
#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) |
#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) |
/* |
* the local APIC register structure, memory mapped. Not terribly well |
* tested, but we might eventually use this one in the future - the |
* problem why we cannot use it right now is the P5 APIC, it has an |
* errata which cannot take 8-bit reads and writes, only 32-bit ones ... |
*/ |
#define u32 unsigned int |
struct local_apic { |
/*000*/ struct { u32 __reserved[4]; } __reserved_01; |
/*010*/ struct { u32 __reserved[4]; } __reserved_02; |
/*020*/ struct { /* APIC ID Register */ |
u32 __reserved_1 : 24, |
phys_apic_id : 4, |
__reserved_2 : 4; |
u32 __reserved[3]; |
} id; |
/*030*/ const |
struct { /* APIC Version Register */ |
u32 version : 8, |
__reserved_1 : 8, |
max_lvt : 8, |
__reserved_2 : 8; |
u32 __reserved[3]; |
} version; |
/*040*/ struct { u32 __reserved[4]; } __reserved_03; |
/*050*/ struct { u32 __reserved[4]; } __reserved_04; |
/*060*/ struct { u32 __reserved[4]; } __reserved_05; |
/*070*/ struct { u32 __reserved[4]; } __reserved_06; |
/*080*/ struct { /* Task Priority Register */ |
u32 priority : 8, |
__reserved_1 : 24; |
u32 __reserved_2[3]; |
} tpr; |
/*090*/ const |
struct { /* Arbitration Priority Register */ |
u32 priority : 8, |
__reserved_1 : 24; |
u32 __reserved_2[3]; |
} apr; |
/*0A0*/ const |
struct { /* Processor Priority Register */ |
u32 priority : 8, |
__reserved_1 : 24; |
u32 __reserved_2[3]; |
} ppr; |
/*0B0*/ struct { /* End Of Interrupt Register */ |
u32 eoi; |
u32 __reserved[3]; |
} eoi; |
/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; |
/*0D0*/ struct { /* Logical Destination Register */ |
u32 __reserved_1 : 24, |
logical_dest : 8; |
u32 __reserved_2[3]; |
} ldr; |
/*0E0*/ struct { /* Destination Format Register */ |
u32 __reserved_1 : 28, |
model : 4; |
u32 __reserved_2[3]; |
} dfr; |
/*0F0*/ struct { /* Spurious Interrupt Vector Register */ |
u32 spurious_vector : 8, |
apic_enabled : 1, |
focus_cpu : 1, |
__reserved_2 : 22; |
u32 __reserved_3[3]; |
} svr; |
/*100*/ struct { /* In Service Register */ |
/*170*/ u32 bitfield; |
u32 __reserved[3]; |
} isr [8]; |
/*180*/ struct { /* Trigger Mode Register */ |
/*1F0*/ u32 bitfield; |
u32 __reserved[3]; |
} tmr [8]; |
/*200*/ struct { /* Interrupt Request Register */ |
/*270*/ u32 bitfield; |
u32 __reserved[3]; |
} irr [8]; |
/*280*/ union { /* Error Status Register */ |
struct { |
u32 send_cs_error : 1, |
receive_cs_error : 1, |
send_accept_error : 1, |
receive_accept_error : 1, |
__reserved_1 : 1, |
send_illegal_vector : 1, |
receive_illegal_vector : 1, |
illegal_register_address : 1, |
__reserved_2 : 24; |
u32 __reserved_3[3]; |
} error_bits; |
struct { |
u32 errors; |
u32 __reserved_3[3]; |
} all_errors; |
} esr; |
/*290*/ struct { u32 __reserved[4]; } __reserved_08; |
/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; |
/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; |
/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; |
/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; |
/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; |
/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; |
/*300*/ struct { /* Interrupt Command Register 1 */ |
u32 vector : 8, |
delivery_mode : 3, |
destination_mode : 1, |
delivery_status : 1, |
__reserved_1 : 1, |
level : 1, |
trigger : 1, |
__reserved_2 : 2, |
shorthand : 2, |
__reserved_3 : 12; |
u32 __reserved_4[3]; |
} icr1; |
/*310*/ struct { /* Interrupt Command Register 2 */ |
union { |
u32 __reserved_1 : 24, |
phys_dest : 4, |
__reserved_2 : 4; |
u32 __reserved_3 : 24, |
logical_dest : 8; |
} dest; |
u32 __reserved_4[3]; |
} icr2; |
/*320*/ struct { /* LVT - Timer */ |
u32 vector : 8, |
__reserved_1 : 4, |
delivery_status : 1, |
__reserved_2 : 3, |
mask : 1, |
timer_mode : 1, |
__reserved_3 : 14; |
u32 __reserved_4[3]; |
} lvt_timer; |
/*330*/ struct { /* LVT - Thermal Sensor */ |
u32 vector : 8, |
delivery_mode : 3, |
__reserved_1 : 1, |
delivery_status : 1, |
__reserved_2 : 3, |
mask : 1, |
__reserved_3 : 15; |
u32 __reserved_4[3]; |
} lvt_thermal; |
/*340*/ struct { /* LVT - Performance Counter */ |
u32 vector : 8, |
delivery_mode : 3, |
__reserved_1 : 1, |
delivery_status : 1, |
__reserved_2 : 3, |
mask : 1, |
__reserved_3 : 15; |
u32 __reserved_4[3]; |
} lvt_pc; |
/*350*/ struct { /* LVT - LINT0 */ |
u32 vector : 8, |
delivery_mode : 3, |
__reserved_1 : 1, |
delivery_status : 1, |
polarity : 1, |
remote_irr : 1, |
trigger : 1, |
mask : 1, |
__reserved_2 : 15; |
u32 __reserved_3[3]; |
} lvt_lint0; |
/*360*/ struct { /* LVT - LINT1 */ |
u32 vector : 8, |
delivery_mode : 3, |
__reserved_1 : 1, |
delivery_status : 1, |
polarity : 1, |
remote_irr : 1, |
trigger : 1, |
mask : 1, |
__reserved_2 : 15; |
u32 __reserved_3[3]; |
} lvt_lint1; |
/*370*/ struct { /* LVT - Error */ |
u32 vector : 8, |
__reserved_1 : 4, |
delivery_status : 1, |
__reserved_2 : 3, |
mask : 1, |
__reserved_3 : 15; |
u32 __reserved_4[3]; |
} lvt_error; |
/*380*/ struct { /* Timer Initial Count Register */ |
u32 initial_count; |
u32 __reserved_2[3]; |
} timer_icr; |
/*390*/ const |
struct { /* Timer Current Count Register */ |
u32 curr_count; |
u32 __reserved_2[3]; |
} timer_ccr; |
/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; |
/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; |
/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; |
/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; |
/*3E0*/ struct { /* Timer Divide Configuration Register */ |
u32 divisor : 4, |
__reserved_1 : 28; |
u32 __reserved_2[3]; |
} timer_dcr; |
/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; |
} __attribute__ ((packed)); |
#undef u32 |
#ifdef CONFIG_X86_32 |
#define BAD_APICID 0xFFu |
#else |
#define BAD_APICID 0xFFFFu |
#endif |
enum ioapic_irq_destination_types { |
dest_Fixed = 0, |
dest_LowestPrio = 1, |
dest_SMI = 2, |
dest__reserved_1 = 3, |
dest_NMI = 4, |
dest_INIT = 5, |
dest__reserved_2 = 6, |
dest_ExtINT = 7 |
}; |
#endif /* _ASM_X86_APICDEF_H */ |
/drivers/include/asm/atomic64_32.h |
---|
312,4 → 312,18 |
#undef alternative_atomic64 |
#undef __alternative_atomic64 |
#define ATOMIC64_OP(op, c_op) \ |
static inline void atomic64_##op(long long i, atomic64_t *v) \ |
{ \ |
long long old, c = 0; \ |
while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \ |
c = old; \ |
} |
ATOMIC64_OP(and, &) |
ATOMIC64_OP(or, |) |
ATOMIC64_OP(xor, ^) |
#undef ATOMIC64_OP |
#endif /* _ASM_X86_ATOMIC64_32_H */ |
/drivers/include/asm/fixmap.h |
---|
0,0 → 1,170 |
/* |
* fixmap.h: compile-time virtual memory allocation |
* |
* This file is subject to the terms and conditions of the GNU General Public |
* License. See the file "COPYING" in the main directory of this archive |
* for more details. |
* |
* Copyright (C) 1998 Ingo Molnar |
* |
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 |
* x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 |
*/ |
#ifndef _ASM_X86_FIXMAP_H |
#define _ASM_X86_FIXMAP_H |
#ifndef __ASSEMBLY__ |
#include <linux/kernel.h> |
#include <asm/acpi.h> |
#include <asm/apicdef.h> |
#include <asm/page.h> |
#include <asm/pvclock.h> |
#ifdef CONFIG_X86_32 |
#include <linux/threads.h> |
#include <asm/kmap_types.h> |
#else |
#include <uapi/asm/vsyscall.h> |
#endif |
/* |
* We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall |
* uses fixmaps that relies on FIXADDR_TOP for proper address calculation. |
* Because of this, FIXADDR_TOP x86 integration was left as later work. |
*/ |
#ifdef CONFIG_X86_32 |
/* used by vmalloc.c, vsyscall.lds.S. |
* |
* Leave one empty page between vmalloc'ed areas and |
* the start of the fixmap. |
*/ |
extern unsigned long __FIXADDR_TOP; |
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) |
#else |
#define FIXADDR_TOP (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \ |
PAGE_SIZE) |
#endif |
/* |
* Here we define all the compile-time 'special' virtual |
* addresses. The point is to have a constant address at |
* compile time, but to set the physical address only |
* in the boot process. |
* for x86_32: We allocate these special addresses |
* from the end of virtual memory (0xfffff000) backwards. |
* Also this lets us do fail-safe vmalloc(), we |
* can guarantee that these special addresses and |
* vmalloc()-ed addresses never overlap. |
* |
* These 'compile-time allocated' memory buffers are |
* fixed-size 4k pages (or larger if used with an increment |
* higher than 1). Use set_fixmap(idx,phys) to associate |
* physical memory with fixmap indices. |
* |
* TLB entries of such buffers will not be flushed across |
* task switches. |
*/ |
enum fixed_addresses { |
#ifdef CONFIG_X86_32 |
FIX_HOLE, |
#else |
#ifdef CONFIG_X86_VSYSCALL_EMULATION |
VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT, |
#endif |
#ifdef CONFIG_PARAVIRT_CLOCK |
PVCLOCK_FIXMAP_BEGIN, |
PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1, |
#endif |
#endif |
FIX_DBGP_BASE, |
FIX_EARLYCON_MEM_BASE, |
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
FIX_OHCI1394_BASE, |
#endif |
#ifdef CONFIG_X86_LOCAL_APIC |
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ |
#endif |
#ifdef CONFIG_X86_IO_APIC |
FIX_IO_APIC_BASE_0, |
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, |
#endif |
FIX_RO_IDT, /* Virtual mapping for read-only IDT */ |
#ifdef CONFIG_X86_32 |
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ |
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, |
#ifdef CONFIG_PCI_MMCONFIG |
FIX_PCIE_MCFG, |
#endif |
#endif |
#ifdef CONFIG_PARAVIRT |
FIX_PARAVIRT_BOOTMAP, |
#endif |
FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ |
FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ |
#ifdef CONFIG_X86_INTEL_MID |
FIX_LNW_VRTC, |
#endif |
__end_of_permanent_fixed_addresses, |
/* |
* 512 temporary boot-time mappings, used by early_ioremap(), |
* before ioremap() is functional. |
* |
* If necessary we round it up to the next 512 pages boundary so |
* that we can have a single pgd entry and a single pte table: |
*/ |
#define NR_FIX_BTMAPS 64 |
#define FIX_BTMAPS_SLOTS 8 |
#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) |
FIX_BTMAP_END = |
(__end_of_permanent_fixed_addresses ^ |
(__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) & |
-PTRS_PER_PTE |
? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - |
(__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) |
: __end_of_permanent_fixed_addresses, |
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, |
#ifdef CONFIG_X86_32 |
FIX_WP_TEST, |
#endif |
#ifdef CONFIG_INTEL_TXT |
FIX_TBOOT_BASE, |
#endif |
__end_of_fixed_addresses |
}; |
extern void reserve_top_address(unsigned long reserve); |
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) |
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
extern int fixmaps_set; |
extern pte_t *kmap_pte; |
extern pgprot_t kmap_prot; |
extern pte_t *pkmap_page_table; |
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); |
void native_set_fixmap(enum fixed_addresses idx, |
phys_addr_t phys, pgprot_t flags); |
#ifndef CONFIG_PARAVIRT |
static inline void __set_fixmap(enum fixed_addresses idx, |
phys_addr_t phys, pgprot_t flags) |
{ |
native_set_fixmap(idx, phys, flags); |
} |
#endif |
#include <asm-generic/fixmap.h> |
#define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags) |
#define __late_clear_fixmap(idx) __set_fixmap(idx, 0, __pgprot(0)) |
void __early_set_fixmap(enum fixed_addresses idx, |
phys_addr_t phys, pgprot_t flags); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_FIXMAP_H */ |
/drivers/include/asm/kmap_types.h |
---|
0,0 → 1,12 |
#ifndef _ASM_X86_KMAP_TYPES_H |
#define _ASM_X86_KMAP_TYPES_H |
#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM) |
#define __WITH_KM_FENCE |
#endif |
#include <asm-generic/kmap_types.h> |
#undef __WITH_KM_FENCE |
#endif /* _ASM_X86_KMAP_TYPES_H */ |
/drivers/include/asm/numa.h |
---|
0,0 → 1,82 |
#ifndef _ASM_X86_NUMA_H |
#define _ASM_X86_NUMA_H |
#include <linux/nodemask.h> |
#include <asm/topology.h> |
#include <asm/apicdef.h> |
#ifdef CONFIG_NUMA |
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) |
/* |
* Too small node sizes may confuse the VM badly. Usually they |
* result from BIOS bugs. So dont recognize nodes as standalone |
* NUMA entities that have less than this amount of RAM listed: |
*/ |
#define NODE_MIN_SIZE (4*1024*1024) |
extern int numa_off; |
/* |
* __apicid_to_node[] stores the raw mapping between physical apicid and |
* node and is used to initialize cpu_to_node mapping. |
* |
* The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus |
* should be accessed by the accessors - set_apicid_to_node() and |
* numa_cpu_node(). |
*/ |
extern s16 __apicid_to_node[MAX_LOCAL_APIC]; |
extern nodemask_t numa_nodes_parsed __initdata; |
extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); |
extern void __init numa_set_distance(int from, int to, int distance); |
static inline void set_apicid_to_node(int apicid, s16 node) |
{ |
__apicid_to_node[apicid] = node; |
} |
extern int numa_cpu_node(int cpu); |
#else /* CONFIG_NUMA */ |
static inline void set_apicid_to_node(int apicid, s16 node) |
{ |
} |
static inline int numa_cpu_node(int cpu) |
{ |
return NUMA_NO_NODE; |
} |
#endif /* CONFIG_NUMA */ |
#ifdef CONFIG_X86_32 |
# include <asm/numa_32.h> |
#endif |
#ifdef CONFIG_NUMA |
extern void numa_set_node(int cpu, int node); |
extern void numa_clear_node(int cpu); |
extern void __init init_cpu_to_node(void); |
extern void numa_add_cpu(int cpu); |
extern void numa_remove_cpu(int cpu); |
#else /* CONFIG_NUMA */ |
static inline void numa_set_node(int cpu, int node) { } |
static inline void numa_clear_node(int cpu) { } |
static inline void init_cpu_to_node(void) { } |
static inline void numa_add_cpu(int cpu) { } |
static inline void numa_remove_cpu(int cpu) { } |
#endif /* CONFIG_NUMA */ |
#ifdef CONFIG_DEBUG_PER_CPU_MAPS |
void debug_cpumask_set_cpu(int cpu, int node, bool enable); |
#endif |
#ifdef CONFIG_NUMA_EMU |
#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) |
#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) |
void numa_emu_cmdline(char *); |
#endif /* CONFIG_NUMA_EMU */ |
#endif /* _ASM_X86_NUMA_H */ |
/drivers/include/asm/numa_32.h |
---|
0,0 → 1,12 |
#ifndef _ASM_X86_NUMA_32_H |
#define _ASM_X86_NUMA_32_H |
#ifdef CONFIG_HIGHMEM |
extern void set_highmem_pages_init(void); |
#else |
static inline void set_highmem_pages_init(void) |
{ |
} |
#endif |
#endif /* _ASM_X86_NUMA_32_H */ |
/drivers/include/asm/pgtable_types.h |
---|
363,10 → 363,9 |
} |
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) |
{ |
pgprotval_t val = pgprot_val(pgprot); |
pgprot_t new; |
unsigned long val; |
val = pgprot_val(pgprot); |
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | |
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); |
return new; |
373,10 → 372,9 |
} |
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) |
{ |
pgprotval_t val = pgprot_val(pgprot); |
pgprot_t new; |
unsigned long val; |
val = pgprot_val(pgprot); |
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | |
((val & _PAGE_PAT_LARGE) >> |
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); |
/drivers/include/asm/pvclock-abi.h |
---|
0,0 → 1,47 |
#ifndef _ASM_X86_PVCLOCK_ABI_H |
#define _ASM_X86_PVCLOCK_ABI_H |
#ifndef __ASSEMBLY__ |
/* |
* These structs MUST NOT be changed. |
* They are the ABI between hypervisor and guest OS. |
* Both Xen and KVM are using this. |
* |
* pvclock_vcpu_time_info holds the system time and the tsc timestamp |
* of the last update. So the guest can use the tsc delta to get a |
* more precise system time. There is one per virtual cpu. |
* |
* pvclock_wall_clock references the point in time when the system |
* time was zero (usually boot time), thus the guest calculates the |
* current wall clock by adding the system time. |
* |
* Protocol for the "version" fields is: hypervisor raises it (making |
* it uneven) before it starts updating the fields and raises it again |
* (making it even) when it is done. Thus the guest can make sure the |
* time values it got are consistent by checking the version before |
* and after reading them. |
*/ |
struct pvclock_vcpu_time_info { |
u32 version; |
u32 pad0; |
u64 tsc_timestamp; |
u64 system_time; |
u32 tsc_to_system_mul; |
s8 tsc_shift; |
u8 flags; |
u8 pad[2]; |
} __attribute__((__packed__)); /* 32 bytes */ |
struct pvclock_wall_clock { |
u32 version; |
u32 sec; |
u32 nsec; |
} __attribute__((__packed__)); |
#define PVCLOCK_TSC_STABLE_BIT (1 << 0) |
#define PVCLOCK_GUEST_STOPPED (1 << 1) |
/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ |
#define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) |
#endif /* __ASSEMBLY__ */ |
#endif /* _ASM_X86_PVCLOCK_ABI_H */ |
/drivers/include/asm/pvclock.h |
---|
0,0 → 1,74 |
#ifndef _ASM_X86_PVCLOCK_H |
#define _ASM_X86_PVCLOCK_H |
#include <linux/clocksource.h> |
#include <asm/pvclock-abi.h> |
/* some helper functions for xen and kvm pv clock sources */ |
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); |
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); |
void pvclock_set_flags(u8 flags); |
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); |
void pvclock_read_wallclock(struct pvclock_wall_clock *wall, |
struct pvclock_vcpu_time_info *vcpu, |
struct timespec *ts); |
void pvclock_resume(void); |
void pvclock_touch_watchdogs(void); |
/* |
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, |
* yielding a 64-bit result. |
*/ |
static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) |
{ |
u64 product; |
#ifdef __i386__ |
u32 tmp1, tmp2; |
#else |
ulong tmp; |
#endif |
if (shift < 0) |
delta >>= -shift; |
else |
delta <<= shift; |
#ifdef __i386__ |
__asm__ ( |
"mul %5 ; " |
"mov %4,%%eax ; " |
"mov %%edx,%4 ; " |
"mul %5 ; " |
"xor %5,%5 ; " |
"add %4,%%eax ; " |
"adc %5,%%edx ; " |
: "=A" (product), "=r" (tmp1), "=r" (tmp2) |
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); |
#elif defined(__x86_64__) |
__asm__ ( |
"mulq %[mul_frac] ; shrd $32, %[hi], %[lo]" |
: [lo]"=a"(product), |
[hi]"=d"(tmp) |
: "0"(delta), |
[mul_frac]"rm"((u64)mul_frac)); |
#else |
#error implement me! |
#endif |
return product; |
} |
struct pvclock_vsyscall_time_info { |
struct pvclock_vcpu_time_info pvti; |
} __attribute__((__aligned__(SMP_CACHE_BYTES))); |
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) |
#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1) |
int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, |
int size); |
struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu); |
#endif /* _ASM_X86_PVCLOCK_H */ |
/drivers/include/asm/scatterlist.h |
---|
36,6 → 36,4 |
int dma_map_sg(struct device *dev, struct scatterlist *sglist, |
int nelems, int dir); |
#define dma_unmap_sg(d, s, n, r) |
#endif /* __ASM_GENERIC_SCATTERLIST_H */ |
/drivers/include/asm/topology.h |
---|
0,0 → 1,138 |
/* |
* Written by: Matthew Dobson, IBM Corporation |
* |
* Copyright (C) 2002, IBM Corp. |
* |
* All rights reserved. |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, but |
* WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
* NON INFRINGEMENT. See the GNU General Public License for more |
* details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
* |
* Send feedback to <colpatch@us.ibm.com> |
*/ |
#ifndef _ASM_X86_TOPOLOGY_H |
#define _ASM_X86_TOPOLOGY_H |
#ifdef CONFIG_X86_32 |
# ifdef CONFIG_SMP |
# define ENABLE_TOPO_DEFINES |
# endif |
#else |
# ifdef CONFIG_SMP |
# define ENABLE_TOPO_DEFINES |
# endif |
#endif |
/* |
* to preserve the visibility of NUMA_NO_NODE definition, |
* moved to there from here. May be used independent of |
* CONFIG_NUMA. |
*/ |
#include <linux/numa.h> |
#ifdef CONFIG_NUMA |
#include <linux/cpumask.h> |
#include <asm/mpspec.h> |
/* Mappings between logical cpu number and node number */ |
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); |
#ifdef CONFIG_DEBUG_PER_CPU_MAPS |
/* |
* override generic percpu implementation of cpu_to_node |
*/ |
extern int __cpu_to_node(int cpu); |
#define cpu_to_node __cpu_to_node |
extern int early_cpu_to_node(int cpu); |
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
/* Same function but used if called before per_cpu areas are setup */ |
static inline int early_cpu_to_node(int cpu) |
{ |
return early_per_cpu(x86_cpu_to_node_map, cpu); |
} |
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
/* Mappings between node number and cpus on that node. */ |
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
#ifdef CONFIG_DEBUG_PER_CPU_MAPS |
extern const struct cpumask *cpumask_of_node(int node); |
#else |
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ |
static inline const struct cpumask *cpumask_of_node(int node) |
{ |
return node_to_cpumask_map[node]; |
} |
#endif |
extern void setup_node_to_cpumask_map(void); |
/* |
* Returns the number of the node containing Node 'node'. This |
* architecture is flat, so it is a pretty simple function! |
*/ |
#define parent_node(node) (node) |
#define pcibus_to_node(bus) __pcibus_to_node(bus) |
extern int __node_distance(int, int); |
#define node_distance(a, b) __node_distance(a, b) |
#else /* !CONFIG_NUMA */ |
static inline int numa_node_id(void) |
{ |
return 0; |
} |
/* |
* indicate override: |
*/ |
#define numa_node_id numa_node_id |
static inline int early_cpu_to_node(int cpu) |
{ |
return 0; |
} |
static inline void setup_node_to_cpumask_map(void) { } |
#endif |
#include <asm-generic/topology.h> |
extern const struct cpumask *cpu_coregroup_mask(int cpu); |
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) |
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
#ifdef ENABLE_TOPO_DEFINES |
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) |
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) |
#endif |
static inline void arch_fix_phys_package_id(int num, u32 slot) |
{ |
} |
struct pci_bus; |
int x86_pci_root_bus_node(int bus); |
void x86_pci_root_bus_resources(int bus, struct list_head *resources); |
#endif /* _ASM_X86_TOPOLOGY_H */ |