/drivers/include/asm/alternative.h |
---|
5,6 → 5,7 |
#include <linux/stddef.h> |
#include <linux/stringify.h> |
#include <asm/asm.h> |
#include <asm/ptrace.h> |
/* |
* Alternative inline assembly for SMP. |
47,9 → 48,16 |
s32 repl_offset; /* offset to replacement instruction */ |
u16 cpuid; /* cpuid bit set for replacement */ |
u8 instrlen; /* length of original instruction */ |
u8 replacementlen; /* length of new instruction, <= instrlen */ |
}; |
u8 replacementlen; /* length of new instruction */ |
u8 padlen; /* length of build-time padding */ |
} __packed; |
/* |
* Debug flag that can be tested to see whether alternative |
* instructions were patched in already: |
*/ |
extern int alternatives_patched; |
extern void alternative_instructions(void); |
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); |
75,50 → 83,69 |
} |
#endif /* CONFIG_SMP */ |
#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n" |
#define b_replacement(num) "664"#num |
#define e_replacement(num) "665"#num |
#define b_replacement(number) "663"#number |
#define e_replacement(number) "664"#number |
#define alt_end_marker "663" |
#define alt_slen "662b-661b" |
#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f" |
#define alt_pad_len alt_end_marker"b-662b" |
#define alt_total_slen alt_end_marker"b-661b" |
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f" |
#define ALTINSTR_ENTRY(feature, number) \ |
#define __OLDINSTR(oldinstr, num) \ |
"661:\n\t" oldinstr "\n662:\n" \ |
".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \ |
"((" alt_rlen(num) ")-(" alt_slen ")),0x90\n" |
#define OLDINSTR(oldinstr, num) \ |
__OLDINSTR(oldinstr, num) \ |
alt_end_marker ":\n" |
/* |
* max without conditionals. Idea adapted from: |
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax |
* |
* The additional "-" is needed because gas works with s32s. |
*/ |
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))" |
/* |
* Pad the second replacement alternative with additional NOPs if it is |
* additionally longer than the first replacement alternative. |
*/ |
#define OLDINSTR_2(oldinstr, num1, num2) \ |
"661:\n\t" oldinstr "\n662:\n" \ |
".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \ |
"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \ |
alt_end_marker ":\n" |
#define ALTINSTR_ENTRY(feature, num) \ |
" .long 661b - .\n" /* label */ \ |
" .long " b_replacement(number)"f - .\n" /* new instruction */ \ |
" .long " b_replacement(num)"f - .\n" /* new instruction */ \ |
" .word " __stringify(feature) "\n" /* feature bit */ \ |
" .byte " alt_slen "\n" /* source len */ \ |
" .byte " alt_rlen(number) "\n" /* replacement len */ |
" .byte " alt_total_slen "\n" /* source len */ \ |
" .byte " alt_rlen(num) "\n" /* replacement len */ \ |
" .byte " alt_pad_len "\n" /* pad len */ |
#define DISCARD_ENTRY(number) /* rlen <= slen */ \ |
" .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n" |
#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ |
b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t" |
#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \ |
b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t" |
/* alternative assembly primitive: */ |
#define ALTERNATIVE(oldinstr, newinstr, feature) \ |
OLDINSTR(oldinstr) \ |
OLDINSTR(oldinstr, 1) \ |
".pushsection .altinstructions,\"a\"\n" \ |
ALTINSTR_ENTRY(feature, 1) \ |
".popsection\n" \ |
".pushsection .discard,\"aw\",@progbits\n" \ |
DISCARD_ENTRY(1) \ |
".popsection\n" \ |
".pushsection .altinstr_replacement, \"ax\"\n" \ |
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ |
".popsection" |
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ |
OLDINSTR(oldinstr) \ |
OLDINSTR_2(oldinstr, 1, 2) \ |
".pushsection .altinstructions,\"a\"\n" \ |
ALTINSTR_ENTRY(feature1, 1) \ |
ALTINSTR_ENTRY(feature2, 2) \ |
".popsection\n" \ |
".pushsection .discard,\"aw\",@progbits\n" \ |
DISCARD_ENTRY(1) \ |
DISCARD_ENTRY(2) \ |
".popsection\n" \ |
".pushsection .altinstr_replacement, \"ax\"\n" \ |
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ |
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ |
145,6 → 172,9 |
#define alternative(oldinstr, newinstr, feature) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") |
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ |
asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory") |
/* |
* Alternative inline assembly with input. |
* |
/drivers/include/asm/arch_hweight.h |
---|
21,15 → 21,13 |
* ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective |
* compiler switches. |
*/ |
static inline unsigned int __arch_hweight32(unsigned int w) |
static __always_inline unsigned int __arch_hweight32(unsigned int w) |
{ |
unsigned int res = 0; |
asm ("call __sw_hweight32" |
: "="REG_OUT (res) |
: REG_IN (w)); |
return res; |
unsigned int res = w - ((w >> 1) & 0x55555555); |
res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
res = (res + (res >> 4)) & 0x0F0F0F0F; |
res = res + (res >> 8); |
return (res + (res >> 16)) & 0x000000FF; |
} |
static inline unsigned int __arch_hweight16(unsigned int w) |
42,20 → 40,23 |
return __arch_hweight32(w & 0xff); |
} |
#ifdef CONFIG_X86_32 |
static inline unsigned long __arch_hweight64(__u64 w) |
{ |
unsigned long res = 0; |
#ifdef CONFIG_X86_32 |
return __arch_hweight32((u32)w) + |
__arch_hweight32((u32)(w >> 32)); |
} |
#else |
static __always_inline unsigned long __arch_hweight64(__u64 w) |
{ |
unsigned long res = 0; |
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) |
: "="REG_OUT (res) |
: REG_IN (w)); |
#endif /* CONFIG_X86_32 */ |
return res; |
} |
#endif /* CONFIG_X86_32 */ |
#endif |
/drivers/include/asm/asm.h |
---|
63,6 → 63,31 |
_ASM_ALIGN ; \ |
_ASM_PTR (entry); \ |
.popsection |
.macro ALIGN_DESTINATION |
/* check for bad alignment of destination */ |
movl %edi,%ecx |
andl $7,%ecx |
jz 102f /* already aligned */ |
subl $8,%ecx |
negl %ecx |
subl %ecx,%edx |
100: movb (%rsi),%al |
101: movb %al,(%rdi) |
incq %rsi |
incq %rdi |
decl %ecx |
jnz 100b |
102: |
.section .fixup,"ax" |
103: addl %ecx,%edx /* ecx is zerorest also */ |
jmp copy_user_handle_tail |
.previous |
_ASM_EXTABLE(100b,103b) |
_ASM_EXTABLE(101b,103b) |
.endm |
#else |
# define _ASM_EXTABLE(from,to) \ |
" .pushsection \"__ex_table\",\"a\"\n" \ |
/drivers/include/asm/atomic.h |
---|
22,9 → 22,9 |
* |
* Atomically reads the value of @v. |
*/ |
static inline int atomic_read(const atomic_t *v) |
static __always_inline int atomic_read(const atomic_t *v) |
{ |
return ACCESS_ONCE((v)->counter); |
return READ_ONCE((v)->counter); |
} |
/** |
34,9 → 34,9 |
* |
* Atomically sets the value of @v to @i. |
*/ |
static inline void atomic_set(atomic_t *v, int i) |
static __always_inline void atomic_set(atomic_t *v, int i) |
{ |
v->counter = i; |
WRITE_ONCE(v->counter, i); |
} |
/** |
46,7 → 46,7 |
* |
* Atomically adds @i to @v. |
*/ |
static inline void atomic_add(int i, atomic_t *v) |
static __always_inline void atomic_add(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "addl %1,%0" |
: "+m" (v->counter) |
60,7 → 60,7 |
* |
* Atomically subtracts @i from @v. |
*/ |
static inline void atomic_sub(int i, atomic_t *v) |
static __always_inline void atomic_sub(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "subl %1,%0" |
: "+m" (v->counter) |
76,7 → 76,7 |
* true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_sub_and_test(int i, atomic_t *v) |
static __always_inline int atomic_sub_and_test(int i, atomic_t *v) |
{ |
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); |
} |
87,7 → 87,7 |
* |
* Atomically increments @v by 1. |
*/ |
static inline void atomic_inc(atomic_t *v) |
static __always_inline void atomic_inc(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "incl %0" |
: "+m" (v->counter)); |
99,7 → 99,7 |
* |
* Atomically decrements @v by 1. |
*/ |
static inline void atomic_dec(atomic_t *v) |
static __always_inline void atomic_dec(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "decl %0" |
: "+m" (v->counter)); |
113,7 → 113,7 |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
static inline int atomic_dec_and_test(atomic_t *v) |
static __always_inline int atomic_dec_and_test(atomic_t *v) |
{ |
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); |
} |
126,7 → 126,7 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_inc_and_test(atomic_t *v) |
static __always_inline int atomic_inc_and_test(atomic_t *v) |
{ |
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); |
} |
140,7 → 140,7 |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
static inline int atomic_add_negative(int i, atomic_t *v) |
static __always_inline int atomic_add_negative(int i, atomic_t *v) |
{ |
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); |
} |
152,7 → 152,7 |
* |
* Atomically adds @i to @v and returns @i + @v |
*/ |
static inline int atomic_add_return(int i, atomic_t *v) |
static __always_inline int atomic_add_return(int i, atomic_t *v) |
{ |
return i + xadd(&v->counter, i); |
} |
164,7 → 164,7 |
* |
* Atomically subtracts @i from @v and returns @v - @i |
*/ |
static inline int atomic_sub_return(int i, atomic_t *v) |
static __always_inline int atomic_sub_return(int i, atomic_t *v) |
{ |
return atomic_add_return(-i, v); |
} |
172,7 → 172,7 |
#define atomic_inc_return(v) (atomic_add_return(1, v)) |
#define atomic_dec_return(v) (atomic_sub_return(1, v)) |
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
{ |
return cmpxchg(&v->counter, old, new); |
} |
182,6 → 182,21 |
return xchg(&v->counter, new); |
} |
#define ATOMIC_OP(op) \ |
static inline void atomic_##op(int i, atomic_t *v) \ |
{ \ |
asm volatile(LOCK_PREFIX #op"l %1,%0" \ |
: "+m" (v->counter) \ |
: "ir" (i) \ |
: "memory"); \ |
} |
ATOMIC_OP(and) |
ATOMIC_OP(or) |
ATOMIC_OP(xor) |
#undef ATOMIC_OP |
/** |
* __atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
191,7 → 206,7 |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns the old value of @v. |
*/ |
static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) |
{ |
int c, old; |
c = atomic_read(v); |
213,22 → 228,12 |
* Atomically adds 1 to @v |
* Returns the new value of @u |
*/ |
static inline short int atomic_inc_short(short int *v) |
static __always_inline short int atomic_inc_short(short int *v) |
{ |
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); |
return *v; |
} |
/* These are x86-specific, used by some header files */ |
#define atomic_clear_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "andl %0,%1" \ |
: : "r" (~(mask)), "m" (*(addr)) : "memory") |
#define atomic_set_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "orl %0,%1" \ |
: : "r" ((unsigned)(mask)), "m" (*(addr)) \ |
: "memory") |
#ifdef CONFIG_X86_32 |
# include <asm/atomic64_32.h> |
#else |
/drivers/include/asm/atomic64_32.h |
---|
4,7 → 4,7 |
#include <linux/compiler.h> |
#include <linux/types.h> |
#include <asm/processor.h> |
//#include <asm/cmpxchg.h> |
#include <asm/cmpxchg.h> |
/* An 64bit atomic type */ |
/drivers/include/asm/barrier.h |
---|
35,12 → 35,12 |
#define smp_mb() mb() |
#define smp_rmb() dma_rmb() |
#define smp_wmb() barrier() |
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) |
#else /* !SMP */ |
#define smp_mb() barrier() |
#define smp_rmb() barrier() |
#define smp_wmb() barrier() |
#define set_mb(var, value) do { var = value; barrier(); } while (0) |
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) |
#endif /* SMP */ |
#define read_barrier_depends() do { } while (0) |
57,12 → 57,12 |
do { \ |
compiletime_assert_atomic_type(*p); \ |
smp_mb(); \ |
ACCESS_ONCE(*p) = (v); \ |
WRITE_ONCE(*p, v); \ |
} while (0) |
#define smp_load_acquire(p) \ |
({ \ |
typeof(*p) ___p1 = ACCESS_ONCE(*p); \ |
typeof(*p) ___p1 = READ_ONCE(*p); \ |
compiletime_assert_atomic_type(*p); \ |
smp_mb(); \ |
___p1; \ |
74,12 → 74,12 |
do { \ |
compiletime_assert_atomic_type(*p); \ |
barrier(); \ |
ACCESS_ONCE(*p) = (v); \ |
WRITE_ONCE(*p, v); \ |
} while (0) |
#define smp_load_acquire(p) \ |
({ \ |
typeof(*p) ___p1 = ACCESS_ONCE(*p); \ |
typeof(*p) ___p1 = READ_ONCE(*p); \ |
compiletime_assert_atomic_type(*p); \ |
barrier(); \ |
___p1; \ |
91,17 → 91,4 |
#define smp_mb__before_atomic() barrier() |
#define smp_mb__after_atomic() barrier() |
/* |
* Stop RDTSC speculation. This is needed when you need to use RDTSC |
* (or get_cycles or vread that possibly accesses the TSC) in a defined |
* code region. |
* |
* (Could use an alternative three way for this if there was one.) |
*/ |
static __always_inline void rdtsc_barrier(void) |
{ |
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); |
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); |
} |
#endif /* _ASM_X86_BARRIER_H */ |
/drivers/include/asm/cacheflush.h |
---|
8,7 → 8,7 |
/* |
* The set_memory_* API can be used to change various attributes of a virtual |
* address range. The attributes include: |
* Cachability : UnCached, WriteCombining, WriteBack |
* Cachability : UnCached, WriteCombining, WriteThrough, WriteBack |
* Executability : eXeutable, NoteXecutable |
* Read/Write : ReadOnly, ReadWrite |
* Presence : NotPresent |
35,9 → 35,11 |
int _set_memory_uc(unsigned long addr, int numpages); |
int _set_memory_wc(unsigned long addr, int numpages); |
int _set_memory_wt(unsigned long addr, int numpages); |
int _set_memory_wb(unsigned long addr, int numpages); |
int set_memory_uc(unsigned long addr, int numpages); |
int set_memory_wc(unsigned long addr, int numpages); |
int set_memory_wt(unsigned long addr, int numpages); |
int set_memory_wb(unsigned long addr, int numpages); |
int set_memory_x(unsigned long addr, int numpages); |
int set_memory_nx(unsigned long addr, int numpages); |
48,10 → 50,12 |
int set_memory_array_uc(unsigned long *addr, int addrinarray); |
int set_memory_array_wc(unsigned long *addr, int addrinarray); |
int set_memory_array_wt(unsigned long *addr, int addrinarray); |
int set_memory_array_wb(unsigned long *addr, int addrinarray); |
int set_pages_array_uc(struct page **pages, int addrinarray); |
int set_pages_array_wc(struct page **pages, int addrinarray); |
int set_pages_array_wt(struct page **pages, int addrinarray); |
int set_pages_array_wb(struct page **pages, int addrinarray); |
/* |
105,9 → 109,10 |
}; |
void clflush_cache_range(void *addr, unsigned int size); |
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size) |
#ifdef CONFIG_DEBUG_RODATA |
void mark_rodata_ro(void); |
extern const int rodata_test_data; |
/drivers/include/asm/cpufeature.h |
---|
12,7 → 12,7 |
#include <asm/disabled-features.h> |
#endif |
#define NCAPINTS 11 /* N 32-bit words worth of info */ |
#define NCAPINTS 14 /* N 32-bit words worth of info */ |
#define NBUGINTS 1 /* N 32-bit bug flags */ |
/* |
119,6 → 119,7 |
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ |
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ |
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ |
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ |
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ |
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ |
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ |
174,7 → 175,9 |
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ |
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ |
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ |
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ |
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ |
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ |
/* |
* Auxiliary flags: Linux defined - For features scattered in various |
190,10 → 193,11 |
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ |
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ |
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ |
#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ |
#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ |
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ |
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ |
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ |
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ |
/* Virtualization flags: Linux defined, word 8 */ |
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
212,6 → 216,7 |
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ |
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ |
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ |
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ |
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
225,15 → 230,19 |
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ |
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ |
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ |
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ |
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ |
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ |
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ |
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ |
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ |
#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ |
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ |
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ |
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ |
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ |
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ |
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ |
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ |
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ |
241,6 → 250,15 |
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ |
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ |
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ |
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ |
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ |
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ |
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ |
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ |
/* |
* BUG word(s) |
*/ |
254,6 → 272,7 |
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ |
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ |
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ |
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ |
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
388,6 → 407,7 |
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) |
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) |
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) |
#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT) |
#if __GNUC__ >= 4 |
extern void warn_pre_alternatives(void); |
416,6 → 436,7 |
" .word %P0\n" /* 1: do replace */ |
" .byte 2b - 1b\n" /* source len */ |
" .byte 0\n" /* replacement len */ |
" .byte 0\n" /* pad len */ |
".previous\n" |
/* skipping size check since replacement size = 0 */ |
: : "i" (X86_FEATURE_ALWAYS) : : t_warn); |
430,6 → 451,7 |
" .word %P0\n" /* feature bit */ |
" .byte 2b - 1b\n" /* source len */ |
" .byte 0\n" /* replacement len */ |
" .byte 0\n" /* pad len */ |
".previous\n" |
/* skipping size check since replacement size = 0 */ |
: : "i" (bit) : : t_no); |
455,6 → 477,7 |
" .word %P1\n" /* feature bit */ |
" .byte 2b - 1b\n" /* source len */ |
" .byte 4f - 3f\n" /* replacement len */ |
" .byte 0\n" /* pad len */ |
".previous\n" |
".section .discard,\"aw\",@progbits\n" |
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ |
481,31 → 504,30 |
static __always_inline __pure bool _static_cpu_has_safe(u16 bit) |
{ |
#ifdef CC_HAVE_ASM_GOTO |
/* |
* We need to spell the jumps to the compiler because, depending on the offset, |
* the replacement jump can be bigger than the original jump, and this we cannot |
* have. Thus, we force the jump to the widest, 4-byte, signed relative |
* offset even though the last would often fit in less bytes. |
*/ |
asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" |
asm_volatile_goto("1: jmp %l[t_dynamic]\n" |
"2:\n" |
".skip -(((5f-4f) - (2b-1b)) > 0) * " |
"((5f-4f) - (2b-1b)),0x90\n" |
"3:\n" |
".section .altinstructions,\"a\"\n" |
" .long 1b - .\n" /* src offset */ |
" .long 3f - .\n" /* repl offset */ |
" .long 4f - .\n" /* repl offset */ |
" .word %P1\n" /* always replace */ |
" .byte 2b - 1b\n" /* src len */ |
" .byte 4f - 3f\n" /* repl len */ |
" .byte 3b - 1b\n" /* src len */ |
" .byte 5f - 4f\n" /* repl len */ |
" .byte 3b - 2b\n" /* pad len */ |
".previous\n" |
".section .altinstr_replacement,\"ax\"\n" |
"3: .byte 0xe9\n .long %l[t_no] - 2b\n" |
"4:\n" |
"4: jmp %l[t_no]\n" |
"5:\n" |
".previous\n" |
".section .altinstructions,\"a\"\n" |
" .long 1b - .\n" /* src offset */ |
" .long 0\n" /* no replacement */ |
" .word %P0\n" /* feature bit */ |
" .byte 2b - 1b\n" /* src len */ |
" .byte 3b - 1b\n" /* src len */ |
" .byte 0\n" /* repl len */ |
" .byte 0\n" /* pad len */ |
".previous\n" |
: : "i" (bit), "i" (X86_FEATURE_ALWAYS) |
: : t_dynamic, t_no); |
525,6 → 547,7 |
" .word %P2\n" /* always replace */ |
" .byte 2b - 1b\n" /* source len */ |
" .byte 4f - 3f\n" /* replacement len */ |
" .byte 0\n" /* pad len */ |
".previous\n" |
".section .discard,\"aw\",@progbits\n" |
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ |
539,6 → 562,7 |
" .word %P1\n" /* feature bit */ |
" .byte 4b - 3b\n" /* src len */ |
" .byte 6f - 5f\n" /* repl len */ |
" .byte 0\n" /* pad len */ |
".previous\n" |
".section .discard,\"aw\",@progbits\n" |
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */ |
/drivers/include/asm/delay.h |
---|
4,5 → 4,6 |
#include <asm-generic/delay.h> |
void use_tsc_delay(void); |
void use_mwaitx_delay(void); |
#endif /* _ASM_X86_DELAY_H */ |
/drivers/include/asm/e820.h |
---|
40,14 → 40,6 |
} |
#endif |
#ifdef CONFIG_MEMTEST |
extern void early_memtest(unsigned long start, unsigned long end); |
#else |
static inline void early_memtest(unsigned long start, unsigned long end) |
{ |
} |
#endif |
extern unsigned long e820_end_of_ram_pfn(void); |
extern unsigned long e820_end_of_low_ram_pfn(void); |
extern u64 early_reserve_e820(u64 sizet, u64 align); |
/drivers/include/asm/fpu/types.h |
---|
0,0 → 1,355 |
/* |
* FPU data structures: |
*/ |
#ifndef _ASM_X86_FPU_H |
#define _ASM_X86_FPU_H |
/* |
* The legacy x87 FPU state format, as saved by FSAVE and |
* restored by the FRSTOR instructions: |
*/ |
struct fregs_state { |
u32 cwd; /* FPU Control Word */ |
u32 swd; /* FPU Status Word */ |
u32 twd; /* FPU Tag Word */ |
u32 fip; /* FPU IP Offset */ |
u32 fcs; /* FPU IP Selector */ |
u32 foo; /* FPU Operand Pointer Offset */ |
u32 fos; /* FPU Operand Pointer Selector */ |
/* 8*10 bytes for each FP-reg = 80 bytes: */ |
u32 st_space[20]; |
/* Software status information [not touched by FSAVE]: */ |
u32 status; |
}; |
/* |
* The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and |
* restored by the FXRSTOR instructions. It's similar to the FSAVE |
* format, but differs in some areas, plus has extensions at |
* the end for the XMM registers. |
*/ |
struct fxregs_state { |
u16 cwd; /* Control Word */ |
u16 swd; /* Status Word */ |
u16 twd; /* Tag Word */ |
u16 fop; /* Last Instruction Opcode */ |
union { |
struct { |
u64 rip; /* Instruction Pointer */ |
u64 rdp; /* Data Pointer */ |
}; |
struct { |
u32 fip; /* FPU IP Offset */ |
u32 fcs; /* FPU IP Selector */ |
u32 foo; /* FPU Operand Offset */ |
u32 fos; /* FPU Operand Selector */ |
}; |
}; |
u32 mxcsr; /* MXCSR Register State */ |
u32 mxcsr_mask; /* MXCSR Mask */ |
/* 8*16 bytes for each FP-reg = 128 bytes: */ |
u32 st_space[32]; |
/* 16*16 bytes for each XMM-reg = 256 bytes: */ |
u32 xmm_space[64]; |
u32 padding[12]; |
union { |
u32 padding1[12]; |
u32 sw_reserved[12]; |
}; |
} __attribute__((aligned(16))); |
/* Default value for fxregs_state.mxcsr: */ |
#define MXCSR_DEFAULT 0x1f80 |
/* |
* Software based FPU emulation state. This is arbitrary really, |
* it matches the x87 format to make it easier to understand: |
*/ |
struct swregs_state { |
u32 cwd; |
u32 swd; |
u32 twd; |
u32 fip; |
u32 fcs; |
u32 foo; |
u32 fos; |
/* 8*10 bytes for each FP-reg = 80 bytes: */ |
u32 st_space[20]; |
u8 ftop; |
u8 changed; |
u8 lookahead; |
u8 no_update; |
u8 rm; |
u8 alimit; |
struct math_emu_info *info; |
u32 entry_eip; |
}; |
/* |
* List of XSAVE features Linux knows about: |
*/ |
enum xfeature { |
XFEATURE_FP, |
XFEATURE_SSE, |
/* |
* Values above here are "legacy states". |
* Those below are "extended states". |
*/ |
XFEATURE_YMM, |
XFEATURE_BNDREGS, |
XFEATURE_BNDCSR, |
XFEATURE_OPMASK, |
XFEATURE_ZMM_Hi256, |
XFEATURE_Hi16_ZMM, |
XFEATURE_MAX, |
}; |
#define XFEATURE_MASK_FP (1 << XFEATURE_FP) |
#define XFEATURE_MASK_SSE (1 << XFEATURE_SSE) |
#define XFEATURE_MASK_YMM (1 << XFEATURE_YMM) |
#define XFEATURE_MASK_BNDREGS (1 << XFEATURE_BNDREGS) |
#define XFEATURE_MASK_BNDCSR (1 << XFEATURE_BNDCSR) |
#define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK) |
#define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256) |
#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM) |
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE) |
#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \ |
| XFEATURE_MASK_ZMM_Hi256 \ |
| XFEATURE_MASK_Hi16_ZMM) |
#define FIRST_EXTENDED_XFEATURE XFEATURE_YMM |
struct reg_128_bit { |
u8 regbytes[128/8]; |
}; |
struct reg_256_bit { |
u8 regbytes[256/8]; |
}; |
struct reg_512_bit { |
u8 regbytes[512/8]; |
}; |
/* |
* State component 2: |
* |
* There are 16x 256-bit AVX registers named YMM0-YMM15. |
* The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15) |
* and are stored in 'struct fxregs_state::xmm_space[]' in the |
* "legacy" area. |
* |
* The high 128 bits are stored here. |
*/ |
struct ymmh_struct { |
struct reg_128_bit hi_ymm[16]; |
} __packed; |
/* Intel MPX support: */ |
struct mpx_bndreg { |
u64 lower_bound; |
u64 upper_bound; |
} __packed; |
/* |
* State component 3 is used for the 4 128-bit bounds registers |
*/ |
struct mpx_bndreg_state { |
struct mpx_bndreg bndreg[4]; |
} __packed; |
/* |
* State component 4 is used for the 64-bit user-mode MPX |
* configuration register BNDCFGU and the 64-bit MPX status |
* register BNDSTATUS. We call the pair "BNDCSR". |
*/ |
struct mpx_bndcsr { |
u64 bndcfgu; |
u64 bndstatus; |
} __packed; |
/* |
* The BNDCSR state is padded out to be 64-bytes in size. |
*/ |
struct mpx_bndcsr_state { |
union { |
struct mpx_bndcsr bndcsr; |
u8 pad_to_64_bytes[64]; |
}; |
} __packed; |
/* AVX-512 Components: */ |
/* |
* State component 5 is used for the 8 64-bit opmask registers |
* k0-k7 (opmask state). |
*/ |
struct avx_512_opmask_state { |
u64 opmask_reg[8]; |
} __packed; |
/* |
* State component 6 is used for the upper 256 bits of the |
* registers ZMM0-ZMM15. These 16 256-bit values are denoted |
* ZMM0_H-ZMM15_H (ZMM_Hi256 state). |
*/ |
struct avx_512_zmm_uppers_state { |
struct reg_256_bit zmm_upper[16]; |
} __packed; |
/* |
* State component 7 is used for the 16 512-bit registers |
* ZMM16-ZMM31 (Hi16_ZMM state). |
*/ |
struct avx_512_hi16_state { |
struct reg_512_bit hi16_zmm[16]; |
} __packed; |
struct xstate_header { |
u64 xfeatures; |
u64 xcomp_bv; |
u64 reserved[6]; |
} __attribute__((packed)); |
/* |
* This is our most modern FPU state format, as saved by the XSAVE |
* and restored by the XRSTOR instructions. |
* |
* It consists of a legacy fxregs portion, an xstate header and |
* subsequent areas as defined by the xstate header. Not all CPUs |
* support all the extensions, so the size of the extended area |
* can vary quite a bit between CPUs. |
*/ |
struct xregs_state { |
struct fxregs_state i387; |
struct xstate_header header; |
u8 extended_state_area[0]; |
} __attribute__ ((packed, aligned (64))); |
/* |
* This is a union of all the possible FPU state formats |
* put together, so that we can pick the right one runtime. |
* |
* The size of the structure is determined by the largest |
* member - which is the xsave area. The padding is there |
* to ensure that statically-allocated task_structs (just |
* the init_task today) have enough space. |
*/ |
union fpregs_state { |
struct fregs_state fsave; |
struct fxregs_state fxsave; |
struct swregs_state soft; |
struct xregs_state xsave; |
u8 __padding[PAGE_SIZE]; |
}; |
/* |
* Highest level per task FPU state data structure that |
* contains the FPU register state plus various FPU |
* state fields: |
*/ |
struct fpu { |
/* |
* @last_cpu: |
* |
* Records the last CPU on which this context was loaded into |
* FPU registers. (In the lazy-restore case we might be |
* able to reuse FPU registers across multiple context switches |
* this way, if no intermediate task used the FPU.) |
* |
* A value of -1 is used to indicate that the FPU state in context |
* memory is newer than the FPU state in registers, and that the |
* FPU state should be reloaded next time the task is run. |
*/ |
unsigned int last_cpu; |
/* |
* @fpstate_active: |
* |
* This flag indicates whether this context is active: if the task |
* is not running then we can restore from this context, if the task |
* is running then we should save into this context. |
*/ |
unsigned char fpstate_active; |
/* |
* @fpregs_active: |
* |
* This flag determines whether a given context is actively |
* loaded into the FPU's registers and that those registers |
* represent the task's current FPU state. |
* |
* Note the interaction with fpstate_active: |
* |
* # task does not use the FPU: |
* fpstate_active == 0 |
* |
* # task uses the FPU and regs are active: |
* fpstate_active == 1 && fpregs_active == 1 |
* |
* # the regs are inactive but still match fpstate: |
* fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu |
* |
* The third state is what we use for the lazy restore optimization |
* on lazy-switching CPUs. |
*/ |
unsigned char fpregs_active; |
/* |
* @counter: |
* |
* This counter contains the number of consecutive context switches |
* during which the FPU stays used. If this is over a threshold, the |
* lazy FPU restore logic becomes eager, to save the trap overhead. |
* This is an unsigned char so that after 256 iterations the counter |
* wraps and the context switch behavior turns lazy again; this is to |
* deal with bursty apps that only use the FPU for a short time: |
*/ |
unsigned char counter; |
/* |
* @state: |
* |
* In-memory copy of all FPU registers that we save/restore |
* over context switches. If the task is using the FPU then |
* the registers in the FPU are more recent than this state |
* copy. If the task context-switches away then they get |
* saved here and represent the FPU state. |
* |
* After context switches there may be a (short) time period |
* during which the in-FPU hardware registers are unchanged |
* and still perfectly match this state, if the tasks |
* scheduled afterwards are not using the FPU. |
* |
* This is the 'lazy restore' window of optimization, which |
* we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'. |
* |
* We detect whether a subsequent task uses the FPU via setting |
* CR0::TS to 1, which causes any FPU use to raise a #NM fault. |
* |
* During this window, if the task gets scheduled again, we |
* might be able to skip having to do a restore from this |
* memory buffer to the hardware registers - at the cost of |
* incurring the overhead of #NM fault traps. |
* |
* Note that on modern CPUs that support the XSAVEOPT (or other |
* optimized XSAVE instructions), we don't use #NM traps anymore, |
* as the hardware can track whether FPU registers need saving |
* or not. On such CPUs we activate the non-lazy ('eagerfpu') |
* logic, which unconditionally saves/restores all FPU state |
* across context switches. (if FPU state exists.) |
*/ |
union fpregs_state state; |
/* |
* WARNING: 'state' is dynamically-sized. Do not put |
* anything after it here. |
*/ |
}; |
#endif /* _ASM_X86_FPU_H */ |
/drivers/include/asm/intel-mid.h |
---|
0,0 → 1,151 |
/* |
* intel-mid.h: Intel MID specific setup code |
* |
* (C) Copyright 2009 Intel Corporation |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License |
* as published by the Free Software Foundation; version 2 |
* of the License. |
*/ |
#ifndef _ASM_X86_INTEL_MID_H |
#define _ASM_X86_INTEL_MID_H |
#include <linux/sfi.h> |
//#include <linux/platform_device.h> |
extern int intel_mid_pci_init(void); |
extern int get_gpio_by_name(const char *name); |
extern void intel_scu_device_register(struct platform_device *pdev); |
extern int __init sfi_parse_mrtc(struct sfi_table_header *table); |
extern int __init sfi_parse_mtmr(struct sfi_table_header *table); |
extern int sfi_mrtc_num; |
extern struct sfi_rtc_table_entry sfi_mrtc_array[]; |
/* |
* Here defines the array of devices platform data that IAFW would export |
* through SFI "DEVS" table, we use name and type to match the device and |
* its platform data. |
*/ |
struct devs_id { |
char name[SFI_NAME_LEN + 1]; |
u8 type; |
u8 delay; |
void *(*get_platform_data)(void *info); |
/* Custom handler for devices */ |
void (*device_handler)(struct sfi_device_table_entry *pentry, |
struct devs_id *dev); |
}; |
#define sfi_device(i) \ |
static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \ |
__attribute__((__section__(".x86_intel_mid_dev.init"))) = &i |
/* |
* Medfield is the follow-up of Moorestown, it combines two chip solution into |
* one. Other than that it also added always-on and constant tsc and lapic |
* timers. Medfield is the platform name, and the chip name is called Penwell |
* we treat Medfield/Penwell as a variant of Moorestown. Penwell can be |
* identified via MSRs. |
*/ |
enum intel_mid_cpu_type { |
/* 1 was Moorestown */ |
INTEL_MID_CPU_CHIP_PENWELL = 2, |
INTEL_MID_CPU_CHIP_CLOVERVIEW, |
INTEL_MID_CPU_CHIP_TANGIER, |
}; |
extern enum intel_mid_cpu_type __intel_mid_cpu_chip; |
/** |
* struct intel_mid_ops - Interface between intel-mid & sub archs |
* @arch_setup: arch_setup function to re-initialize platform |
* structures (x86_init, x86_platform_init) |
* |
* This structure can be extended if any new interface is required |
* between intel-mid & its sub arch files. |
*/ |
struct intel_mid_ops { |
void (*arch_setup)(void); |
}; |
/* Helper API's for INTEL_MID_OPS_INIT */ |
#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \ |
[cpuid] = get_##cpuname##_ops |
/* Maximum number of CPU ops */ |
#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *)) |
/* |
* For every new cpu addition, a weak get_<cpuname>_ops() function needs be |
* declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h. |
*/ |
#define INTEL_MID_OPS_INIT {\ |
DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \ |
DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \ |
DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \ |
}; |
#ifdef CONFIG_X86_INTEL_MID |
static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) |
{ |
return __intel_mid_cpu_chip; |
} |
static inline bool intel_mid_has_msic(void) |
{ |
return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL); |
} |
#else /* !CONFIG_X86_INTEL_MID */ |
#define intel_mid_identify_cpu() (0) |
#define intel_mid_has_msic() (0) |
#endif /* !CONFIG_X86_INTEL_MID */ |
enum intel_mid_timer_options { |
INTEL_MID_TIMER_DEFAULT, |
INTEL_MID_TIMER_APBT_ONLY, |
INTEL_MID_TIMER_LAPIC_APBT, |
}; |
extern enum intel_mid_timer_options intel_mid_timer_options; |
/* |
* Penwell uses spread spectrum clock, so the freq number is not exactly |
* the same as reported by MSR based on SDM. |
*/ |
#define FSB_FREQ_83SKU 83200 |
#define FSB_FREQ_100SKU 99840 |
#define FSB_FREQ_133SKU 133000 |
#define FSB_FREQ_167SKU 167000 |
#define FSB_FREQ_200SKU 200000 |
#define FSB_FREQ_267SKU 267000 |
#define FSB_FREQ_333SKU 333000 |
#define FSB_FREQ_400SKU 400000 |
/* Bus Select SoC Fuse value */ |
#define BSEL_SOC_FUSE_MASK 0x7 |
#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */ |
#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */ |
#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */ |
#define SFI_MTMR_MAX_NUM 8 |
#define SFI_MRTC_MAX 8 |
extern void intel_scu_devices_create(void); |
extern void intel_scu_devices_destroy(void); |
/* VRTC timer */ |
#define MRST_VRTC_MAP_SZ (1024) |
/*#define MRST_VRTC_PGOFFSET (0xc00) */ |
extern void intel_mid_rtc_init(void); |
/* the offset for the mapping of global gpio pin to irq */ |
#define INTEL_MID_IRQ_OFFSET 0x100 |
#endif /* _ASM_X86_INTEL_MID_H */ |
/drivers/include/asm/irqflags.h |
---|
136,10 → 136,6 |
#define USERGS_SYSRET32 \ |
swapgs; \ |
sysretl |
#define ENABLE_INTERRUPTS_SYSEXIT32 \ |
swapgs; \ |
sti; \ |
sysexit |
#else |
#define INTERRUPT_RETURN iret |
163,22 → 159,27 |
return arch_irqs_disabled_flags(flags); |
} |
#endif /* !__ASSEMBLY__ */ |
#ifdef __ASSEMBLY__ |
#ifdef CONFIG_TRACE_IRQFLAGS |
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk; |
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk; |
#else |
# define TRACE_IRQS_ON |
# define TRACE_IRQS_OFF |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
#ifdef CONFIG_X86_64 |
#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk |
#define ARCH_LOCKDEP_SYS_EXIT_IRQ \ |
# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk |
# define LOCKDEP_SYS_EXIT_IRQ \ |
TRACE_IRQS_ON; \ |
sti; \ |
SAVE_REST; \ |
LOCKDEP_SYS_EXIT; \ |
RESTORE_REST; \ |
call lockdep_sys_exit_thunk; \ |
cli; \ |
TRACE_IRQS_OFF; |
#else |
#define ARCH_LOCKDEP_SYS_EXIT \ |
# define LOCKDEP_SYS_EXIT \ |
pushl %eax; \ |
pushl %ecx; \ |
pushl %edx; \ |
186,24 → 187,12 |
popl %edx; \ |
popl %ecx; \ |
popl %eax; |
#define ARCH_LOCKDEP_SYS_EXIT_IRQ |
# define LOCKDEP_SYS_EXIT_IRQ |
#endif |
#ifdef CONFIG_TRACE_IRQFLAGS |
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk; |
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk; |
#else |
# define TRACE_IRQS_ON |
# define TRACE_IRQS_OFF |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT |
# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ |
# else |
# define LOCKDEP_SYS_EXIT |
# define LOCKDEP_SYS_EXIT_IRQ |
# endif |
#endif /* __ASSEMBLY__ */ |
#endif /* __ASSEMBLY__ */ |
#endif |
/drivers/include/asm/math_emu.h |
---|
2,7 → 2,6 |
#define _ASM_X86_MATH_EMU_H |
#include <asm/ptrace.h> |
#include <asm/vm86.h> |
/* This structure matches the layout of the data saved to the stack |
following a device-not-present interrupt, part of it saved |
10,9 → 9,6 |
*/ |
struct math_emu_info { |
long ___orig_eip; |
union { |
struct pt_regs *regs; |
struct kernel_vm86_regs *vm86; |
}; |
}; |
#endif /* _ASM_X86_MATH_EMU_H */ |
/drivers/include/asm/msr-index.h |
---|
0,0 → 1,694 |
#ifndef _ASM_X86_MSR_INDEX_H |
#define _ASM_X86_MSR_INDEX_H |
/* CPU model specific register (MSR) numbers */ |
/* x86-64 specific MSRs */ |
#define MSR_EFER 0xc0000080 /* extended feature register */ |
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ |
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ |
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ |
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ |
#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ |
#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ |
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ |
#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */ |
/* EFER bits: */ |
#define _EFER_SCE 0 /* SYSCALL/SYSRET */ |
#define _EFER_LME 8 /* Long mode enable */ |
#define _EFER_LMA 10 /* Long mode active (read-only) */ |
#define _EFER_NX 11 /* No execute enable */ |
#define _EFER_SVME 12 /* Enable virtualization */ |
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */ |
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ |
#define EFER_SCE (1<<_EFER_SCE) |
#define EFER_LME (1<<_EFER_LME) |
#define EFER_LMA (1<<_EFER_LMA) |
#define EFER_NX (1<<_EFER_NX) |
#define EFER_SVME (1<<_EFER_SVME) |
#define EFER_LMSLE (1<<_EFER_LMSLE) |
#define EFER_FFXSR (1<<_EFER_FFXSR) |
/* Intel MSRs. Some also available on other CPUs */ |
#define MSR_IA32_PERFCTR0 0x000000c1 |
#define MSR_IA32_PERFCTR1 0x000000c2 |
#define MSR_FSB_FREQ 0x000000cd |
#define MSR_PLATFORM_INFO 0x000000ce |
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 |
#define NHM_C3_AUTO_DEMOTE (1UL << 25) |
#define NHM_C1_AUTO_DEMOTE (1UL << 26) |
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) |
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) |
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) |
#define MSR_MTRRcap 0x000000fe |
#define MSR_IA32_BBL_CR_CTL 0x00000119 |
#define MSR_IA32_BBL_CR_CTL3 0x0000011e |
#define MSR_IA32_SYSENTER_CS 0x00000174 |
#define MSR_IA32_SYSENTER_ESP 0x00000175 |
#define MSR_IA32_SYSENTER_EIP 0x00000176 |
#define MSR_IA32_MCG_CAP 0x00000179 |
#define MSR_IA32_MCG_STATUS 0x0000017a |
#define MSR_IA32_MCG_CTL 0x0000017b |
#define MSR_IA32_MCG_EXT_CTL 0x000004d0 |
#define MSR_OFFCORE_RSP_0 0x000001a6 |
#define MSR_OFFCORE_RSP_1 0x000001a7 |
#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad |
#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae |
#define MSR_TURBO_RATIO_LIMIT 0x000001ad |
#define MSR_TURBO_RATIO_LIMIT1 0x000001ae |
#define MSR_TURBO_RATIO_LIMIT2 0x000001af |
#define MSR_LBR_SELECT 0x000001c8 |
#define MSR_LBR_TOS 0x000001c9 |
#define MSR_LBR_NHM_FROM 0x00000680 |
#define MSR_LBR_NHM_TO 0x000006c0 |
#define MSR_LBR_CORE_FROM 0x00000040 |
#define MSR_LBR_CORE_TO 0x00000060 |
#define MSR_LBR_INFO_0 0x00000dc0 /* ... 0xddf for _31 */ |
#define LBR_INFO_MISPRED BIT_ULL(63) |
#define LBR_INFO_IN_TX BIT_ULL(62) |
#define LBR_INFO_ABORT BIT_ULL(61) |
#define LBR_INFO_CYCLES 0xffff |
#define MSR_IA32_PEBS_ENABLE 0x000003f1 |
#define MSR_IA32_DS_AREA 0x00000600 |
#define MSR_IA32_PERF_CAPABILITIES 0x00000345 |
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 |
#define MSR_IA32_RTIT_CTL 0x00000570 |
#define RTIT_CTL_TRACEEN BIT(0) |
#define RTIT_CTL_CYCLEACC BIT(1) |
#define RTIT_CTL_OS BIT(2) |
#define RTIT_CTL_USR BIT(3) |
#define RTIT_CTL_CR3EN BIT(7) |
#define RTIT_CTL_TOPA BIT(8) |
#define RTIT_CTL_MTC_EN BIT(9) |
#define RTIT_CTL_TSC_EN BIT(10) |
#define RTIT_CTL_DISRETC BIT(11) |
#define RTIT_CTL_BRANCH_EN BIT(13) |
#define RTIT_CTL_MTC_RANGE_OFFSET 14 |
#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET) |
#define RTIT_CTL_CYC_THRESH_OFFSET 19 |
#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET) |
#define RTIT_CTL_PSB_FREQ_OFFSET 24 |
#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET) |
#define MSR_IA32_RTIT_STATUS 0x00000571 |
#define RTIT_STATUS_CONTEXTEN BIT(1) |
#define RTIT_STATUS_TRIGGEREN BIT(2) |
#define RTIT_STATUS_ERROR BIT(4) |
#define RTIT_STATUS_STOPPED BIT(5) |
#define MSR_IA32_RTIT_CR3_MATCH 0x00000572 |
#define MSR_IA32_RTIT_OUTPUT_BASE 0x00000560 |
#define MSR_IA32_RTIT_OUTPUT_MASK 0x00000561 |
#define MSR_MTRRfix64K_00000 0x00000250 |
#define MSR_MTRRfix16K_80000 0x00000258 |
#define MSR_MTRRfix16K_A0000 0x00000259 |
#define MSR_MTRRfix4K_C0000 0x00000268 |
#define MSR_MTRRfix4K_C8000 0x00000269 |
#define MSR_MTRRfix4K_D0000 0x0000026a |
#define MSR_MTRRfix4K_D8000 0x0000026b |
#define MSR_MTRRfix4K_E0000 0x0000026c |
#define MSR_MTRRfix4K_E8000 0x0000026d |
#define MSR_MTRRfix4K_F0000 0x0000026e |
#define MSR_MTRRfix4K_F8000 0x0000026f |
#define MSR_MTRRdefType 0x000002ff |
#define MSR_IA32_CR_PAT 0x00000277 |
#define MSR_IA32_DEBUGCTLMSR 0x000001d9 |
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db |
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc |
#define MSR_IA32_LASTINTFROMIP 0x000001dd |
#define MSR_IA32_LASTINTTOIP 0x000001de |
/* DEBUGCTLMSR bits (others vary by model): */ |
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ |
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ |
#define DEBUGCTLMSR_TR (1UL << 6) |
#define DEBUGCTLMSR_BTS (1UL << 7) |
#define DEBUGCTLMSR_BTINT (1UL << 8) |
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9) |
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) |
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) |
#define MSR_PEBS_FRONTEND 0x000003f7 |
#define MSR_IA32_POWER_CTL 0x000001fc |
#define MSR_IA32_MC0_CTL 0x00000400 |
#define MSR_IA32_MC0_STATUS 0x00000401 |
#define MSR_IA32_MC0_ADDR 0x00000402 |
#define MSR_IA32_MC0_MISC 0x00000403 |
/* C-state Residency Counters */ |
#define MSR_PKG_C3_RESIDENCY 0x000003f8 |
#define MSR_PKG_C6_RESIDENCY 0x000003f9 |
#define MSR_PKG_C7_RESIDENCY 0x000003fa |
#define MSR_CORE_C3_RESIDENCY 0x000003fc |
#define MSR_CORE_C6_RESIDENCY 0x000003fd |
#define MSR_CORE_C7_RESIDENCY 0x000003fe |
#define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff |
#define MSR_PKG_C2_RESIDENCY 0x0000060d |
#define MSR_PKG_C8_RESIDENCY 0x00000630 |
#define MSR_PKG_C9_RESIDENCY 0x00000631 |
#define MSR_PKG_C10_RESIDENCY 0x00000632 |
/* Run Time Average Power Limiting (RAPL) Interface */ |
#define MSR_RAPL_POWER_UNIT 0x00000606 |
#define MSR_PKG_POWER_LIMIT 0x00000610 |
#define MSR_PKG_ENERGY_STATUS 0x00000611 |
#define MSR_PKG_PERF_STATUS 0x00000613 |
#define MSR_PKG_POWER_INFO 0x00000614 |
#define MSR_DRAM_POWER_LIMIT 0x00000618 |
#define MSR_DRAM_ENERGY_STATUS 0x00000619 |
#define MSR_DRAM_PERF_STATUS 0x0000061b |
#define MSR_DRAM_POWER_INFO 0x0000061c |
#define MSR_PP0_POWER_LIMIT 0x00000638 |
#define MSR_PP0_ENERGY_STATUS 0x00000639 |
#define MSR_PP0_POLICY 0x0000063a |
#define MSR_PP0_PERF_STATUS 0x0000063b |
#define MSR_PP1_POWER_LIMIT 0x00000640 |
#define MSR_PP1_ENERGY_STATUS 0x00000641 |
#define MSR_PP1_POLICY 0x00000642 |
#define MSR_CONFIG_TDP_NOMINAL 0x00000648 |
#define MSR_CONFIG_TDP_LEVEL_1 0x00000649 |
#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A |
#define MSR_CONFIG_TDP_CONTROL 0x0000064B |
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C |
#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 |
#define MSR_PKG_ANY_CORE_C0_RES 0x00000659 |
#define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A |
#define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B |
#define MSR_CORE_C1_RES 0x00000660 |
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 |
#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 |
#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690 |
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 |
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 |
/* Config TDP MSRs */ |
#define MSR_CONFIG_TDP_NOMINAL 0x00000648 |
#define MSR_CONFIG_TDP_LEVEL1 0x00000649 |
#define MSR_CONFIG_TDP_LEVEL2 0x0000064A |
#define MSR_CONFIG_TDP_CONTROL 0x0000064B |
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C |
/* Hardware P state interface */ |
#define MSR_PPERF 0x0000064e |
#define MSR_PERF_LIMIT_REASONS 0x0000064f |
#define MSR_PM_ENABLE 0x00000770 |
#define MSR_HWP_CAPABILITIES 0x00000771 |
#define MSR_HWP_REQUEST_PKG 0x00000772 |
#define MSR_HWP_INTERRUPT 0x00000773 |
#define MSR_HWP_REQUEST 0x00000774 |
#define MSR_HWP_STATUS 0x00000777 |
/* CPUID.6.EAX */ |
#define HWP_BASE_BIT (1<<7) |
#define HWP_NOTIFICATIONS_BIT (1<<8) |
#define HWP_ACTIVITY_WINDOW_BIT (1<<9) |
#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) |
#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) |
/* IA32_HWP_CAPABILITIES */ |
#define HWP_HIGHEST_PERF(x) (x & 0xff) |
#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8) |
#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16) |
#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24) |
/* IA32_HWP_REQUEST */ |
#define HWP_MIN_PERF(x) (x & 0xff) |
#define HWP_MAX_PERF(x) ((x & 0xff) << 8) |
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) |
#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24) |
#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32) |
#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42) |
/* IA32_HWP_STATUS */ |
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1) |
#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) |
/* IA32_HWP_INTERRUPT */ |
#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) |
#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) |
#define MSR_AMD64_MC0_MASK 0xc0010044 |
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) |
#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) |
#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) |
#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) |
#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) |
/* These are consecutive and not in the normal 4er MCE bank block */ |
#define MSR_IA32_MC0_CTL2 0x00000280 |
#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) |
#define MSR_P6_PERFCTR0 0x000000c1 |
#define MSR_P6_PERFCTR1 0x000000c2 |
#define MSR_P6_EVNTSEL0 0x00000186 |
#define MSR_P6_EVNTSEL1 0x00000187 |
#define MSR_KNC_PERFCTR0 0x00000020 |
#define MSR_KNC_PERFCTR1 0x00000021 |
#define MSR_KNC_EVNTSEL0 0x00000028 |
#define MSR_KNC_EVNTSEL1 0x00000029 |
/* Alternative perfctr range with full access. */ |
#define MSR_IA32_PMC0 0x000004c1 |
/* AMD64 MSRs. Not complete. See the architecture manual for a more |
complete list. */ |
#define MSR_AMD64_PATCH_LEVEL 0x0000008b |
#define MSR_AMD64_TSC_RATIO 0xc0000104 |
#define MSR_AMD64_NB_CFG 0xc001001f |
#define MSR_AMD64_PATCH_LOADER 0xc0010020 |
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 |
#define MSR_AMD64_OSVW_STATUS 0xc0010141 |
#define MSR_AMD64_LS_CFG 0xc0011020 |
#define MSR_AMD64_DC_CFG 0xc0011022 |
#define MSR_AMD64_BU_CFG2 0xc001102a |
#define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
#define MSR_AMD64_IBSFETCH_REG_COUNT 3 |
#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1) |
#define MSR_AMD64_IBSOPCTL 0xc0011033 |
#define MSR_AMD64_IBSOPRIP 0xc0011034 |
#define MSR_AMD64_IBSOPDATA 0xc0011035 |
#define MSR_AMD64_IBSOPDATA2 0xc0011036 |
#define MSR_AMD64_IBSOPDATA3 0xc0011037 |
#define MSR_AMD64_IBSDCLINAD 0xc0011038 |
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 |
#define MSR_AMD64_IBSOP_REG_COUNT 7 |
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) |
#define MSR_AMD64_IBSCTL 0xc001103a |
#define MSR_AMD64_IBSBRTARGET 0xc001103b |
#define MSR_AMD64_IBSOPDATA4 0xc001103d |
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ |
/* Fam 16h MSRs */ |
#define MSR_F16H_L2I_PERF_CTL 0xc0010230 |
#define MSR_F16H_L2I_PERF_CTR 0xc0010231 |
#define MSR_F16H_DR1_ADDR_MASK 0xc0011019 |
#define MSR_F16H_DR2_ADDR_MASK 0xc001101a |
#define MSR_F16H_DR3_ADDR_MASK 0xc001101b |
#define MSR_F16H_DR0_ADDR_MASK 0xc0011027 |
/* Fam 15h MSRs */ |
#define MSR_F15H_PERF_CTL 0xc0010200 |
#define MSR_F15H_PERF_CTR 0xc0010201 |
#define MSR_F15H_NB_PERF_CTL 0xc0010240 |
#define MSR_F15H_NB_PERF_CTR 0xc0010241 |
/* Fam 10h MSRs */ |
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 |
#define FAM10H_MMIO_CONF_ENABLE (1<<0) |
#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf |
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 |
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL |
#define FAM10H_MMIO_CONF_BASE_SHIFT 20 |
#define MSR_FAM10H_NODE_ID 0xc001100c |
/* K8 MSRs */ |
#define MSR_K8_TOP_MEM1 0xc001001a |
#define MSR_K8_TOP_MEM2 0xc001001d |
#define MSR_K8_SYSCFG 0xc0010010 |
#define MSR_K8_INT_PENDING_MSG 0xc0010055 |
/* C1E active bits in int pending message */ |
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000 |
#define MSR_K8_TSEG_ADDR 0xc0010112 |
#define MSR_K8_TSEG_MASK 0xc0010113 |
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ |
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ |
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ |
/* K7 MSRs */ |
#define MSR_K7_EVNTSEL0 0xc0010000 |
#define MSR_K7_PERFCTR0 0xc0010004 |
#define MSR_K7_EVNTSEL1 0xc0010001 |
#define MSR_K7_PERFCTR1 0xc0010005 |
#define MSR_K7_EVNTSEL2 0xc0010002 |
#define MSR_K7_PERFCTR2 0xc0010006 |
#define MSR_K7_EVNTSEL3 0xc0010003 |
#define MSR_K7_PERFCTR3 0xc0010007 |
#define MSR_K7_CLK_CTL 0xc001001b |
#define MSR_K7_HWCR 0xc0010015 |
#define MSR_K7_FID_VID_CTL 0xc0010041 |
#define MSR_K7_FID_VID_STATUS 0xc0010042 |
/* K6 MSRs */ |
#define MSR_K6_WHCR 0xc0000082 |
#define MSR_K6_UWCCR 0xc0000085 |
#define MSR_K6_EPMR 0xc0000086 |
#define MSR_K6_PSOR 0xc0000087 |
#define MSR_K6_PFIR 0xc0000088 |
/* Centaur-Hauls/IDT defined MSRs. */ |
#define MSR_IDT_FCR1 0x00000107 |
#define MSR_IDT_FCR2 0x00000108 |
#define MSR_IDT_FCR3 0x00000109 |
#define MSR_IDT_FCR4 0x0000010a |
#define MSR_IDT_MCR0 0x00000110 |
#define MSR_IDT_MCR1 0x00000111 |
#define MSR_IDT_MCR2 0x00000112 |
#define MSR_IDT_MCR3 0x00000113 |
#define MSR_IDT_MCR4 0x00000114 |
#define MSR_IDT_MCR5 0x00000115 |
#define MSR_IDT_MCR6 0x00000116 |
#define MSR_IDT_MCR7 0x00000117 |
#define MSR_IDT_MCR_CTRL 0x00000120 |
/* VIA Cyrix defined MSRs*/ |
#define MSR_VIA_FCR 0x00001107 |
#define MSR_VIA_LONGHAUL 0x0000110a |
#define MSR_VIA_RNG 0x0000110b |
#define MSR_VIA_BCR2 0x00001147 |
/* Transmeta defined MSRs */ |
#define MSR_TMTA_LONGRUN_CTRL 0x80868010 |
#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 |
#define MSR_TMTA_LRTI_READOUT 0x80868018 |
#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a |
/* Intel defined MSRs. */ |
#define MSR_IA32_P5_MC_ADDR 0x00000000 |
#define MSR_IA32_P5_MC_TYPE 0x00000001 |
#define MSR_IA32_TSC 0x00000010 |
#define MSR_IA32_PLATFORM_ID 0x00000017 |
#define MSR_IA32_EBL_CR_POWERON 0x0000002a |
#define MSR_EBC_FREQUENCY_ID 0x0000002c |
#define MSR_SMI_COUNT 0x00000034 |
#define MSR_IA32_FEATURE_CONTROL 0x0000003a |
#define MSR_IA32_TSC_ADJUST 0x0000003b |
#define MSR_IA32_BNDCFGS 0x00000d90 |
#define MSR_IA32_XSS 0x00000da0 |
#define FEATURE_CONTROL_LOCKED (1<<0) |
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) |
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) |
#define FEATURE_CONTROL_LMCE (1<<20) |
#define MSR_IA32_APICBASE 0x0000001b |
#define MSR_IA32_APICBASE_BSP (1<<8) |
#define MSR_IA32_APICBASE_ENABLE (1<<11) |
#define MSR_IA32_APICBASE_BASE (0xfffff<<12) |
#define MSR_IA32_TSCDEADLINE 0x000006e0 |
#define MSR_IA32_UCODE_WRITE 0x00000079 |
#define MSR_IA32_UCODE_REV 0x0000008b |
#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b |
#define MSR_IA32_SMBASE 0x0000009e |
#define MSR_IA32_PERF_STATUS 0x00000198 |
#define MSR_IA32_PERF_CTL 0x00000199 |
#define INTEL_PERF_CTL_MASK 0xffff |
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 |
#define MSR_AMD_PERF_STATUS 0xc0010063 |
#define MSR_AMD_PERF_CTL 0xc0010062 |
#define MSR_IA32_MPERF 0x000000e7 |
#define MSR_IA32_APERF 0x000000e8 |
#define MSR_IA32_THERM_CONTROL 0x0000019a |
#define MSR_IA32_THERM_INTERRUPT 0x0000019b |
#define THERM_INT_HIGH_ENABLE (1 << 0) |
#define THERM_INT_LOW_ENABLE (1 << 1) |
#define THERM_INT_PLN_ENABLE (1 << 24) |
#define MSR_IA32_THERM_STATUS 0x0000019c |
#define THERM_STATUS_PROCHOT (1 << 0) |
#define THERM_STATUS_POWER_LIMIT (1 << 10) |
#define MSR_THERM2_CTL 0x0000019d |
#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16) |
#define MSR_IA32_MISC_ENABLE 0x000001a0 |
#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 |
#define MSR_MISC_PWR_MGMT 0x000001aa |
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 |
#define ENERGY_PERF_BIAS_PERFORMANCE 0 |
#define ENERGY_PERF_BIAS_NORMAL 6 |
#define ENERGY_PERF_BIAS_POWERSAVE 15 |
#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1 |
#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0) |
#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10) |
#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2 |
#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0) |
#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1) |
#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24) |
/* Thermal Thresholds Support */ |
#define THERM_INT_THRESHOLD0_ENABLE (1 << 15) |
#define THERM_SHIFT_THRESHOLD0 8 |
#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0) |
#define THERM_INT_THRESHOLD1_ENABLE (1 << 23) |
#define THERM_SHIFT_THRESHOLD1 16 |
#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1) |
#define THERM_STATUS_THRESHOLD0 (1 << 6) |
#define THERM_LOG_THRESHOLD0 (1 << 7) |
#define THERM_STATUS_THRESHOLD1 (1 << 8) |
#define THERM_LOG_THRESHOLD1 (1 << 9) |
/* MISC_ENABLE bits: architectural */ |
#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0 |
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) |
#define MSR_IA32_MISC_ENABLE_TCC_BIT 1 |
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT) |
#define MSR_IA32_MISC_ENABLE_EMON_BIT 7 |
#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT) |
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11 |
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT) |
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12 |
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT) |
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16 |
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT) |
#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18 |
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT) |
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22 |
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) |
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23 |
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34 |
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT) |
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ |
#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2 |
#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT) |
#define MSR_IA32_MISC_ENABLE_TM1_BIT 3 |
#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT) |
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4 |
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6 |
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8 |
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT) |
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9 |
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_FERR_BIT 10 |
#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT) |
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10 |
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT) |
#define MSR_IA32_MISC_ENABLE_TM2_BIT 13 |
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT) |
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19 |
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20 |
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT) |
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24 |
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT) |
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37 |
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38 |
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39 |
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT) |
#define MSR_IA32_TSC_DEADLINE 0x000006E0 |
/* P4/Xeon+ specific */ |
#define MSR_IA32_MCG_EAX 0x00000180 |
#define MSR_IA32_MCG_EBX 0x00000181 |
#define MSR_IA32_MCG_ECX 0x00000182 |
#define MSR_IA32_MCG_EDX 0x00000183 |
#define MSR_IA32_MCG_ESI 0x00000184 |
#define MSR_IA32_MCG_EDI 0x00000185 |
#define MSR_IA32_MCG_EBP 0x00000186 |
#define MSR_IA32_MCG_ESP 0x00000187 |
#define MSR_IA32_MCG_EFLAGS 0x00000188 |
#define MSR_IA32_MCG_EIP 0x00000189 |
#define MSR_IA32_MCG_RESERVED 0x0000018a |
/* Pentium IV performance counter MSRs */ |
#define MSR_P4_BPU_PERFCTR0 0x00000300 |
#define MSR_P4_BPU_PERFCTR1 0x00000301 |
#define MSR_P4_BPU_PERFCTR2 0x00000302 |
#define MSR_P4_BPU_PERFCTR3 0x00000303 |
#define MSR_P4_MS_PERFCTR0 0x00000304 |
#define MSR_P4_MS_PERFCTR1 0x00000305 |
#define MSR_P4_MS_PERFCTR2 0x00000306 |
#define MSR_P4_MS_PERFCTR3 0x00000307 |
#define MSR_P4_FLAME_PERFCTR0 0x00000308 |
#define MSR_P4_FLAME_PERFCTR1 0x00000309 |
#define MSR_P4_FLAME_PERFCTR2 0x0000030a |
#define MSR_P4_FLAME_PERFCTR3 0x0000030b |
#define MSR_P4_IQ_PERFCTR0 0x0000030c |
#define MSR_P4_IQ_PERFCTR1 0x0000030d |
#define MSR_P4_IQ_PERFCTR2 0x0000030e |
#define MSR_P4_IQ_PERFCTR3 0x0000030f |
#define MSR_P4_IQ_PERFCTR4 0x00000310 |
#define MSR_P4_IQ_PERFCTR5 0x00000311 |
#define MSR_P4_BPU_CCCR0 0x00000360 |
#define MSR_P4_BPU_CCCR1 0x00000361 |
#define MSR_P4_BPU_CCCR2 0x00000362 |
#define MSR_P4_BPU_CCCR3 0x00000363 |
#define MSR_P4_MS_CCCR0 0x00000364 |
#define MSR_P4_MS_CCCR1 0x00000365 |
#define MSR_P4_MS_CCCR2 0x00000366 |
#define MSR_P4_MS_CCCR3 0x00000367 |
#define MSR_P4_FLAME_CCCR0 0x00000368 |
#define MSR_P4_FLAME_CCCR1 0x00000369 |
#define MSR_P4_FLAME_CCCR2 0x0000036a |
#define MSR_P4_FLAME_CCCR3 0x0000036b |
#define MSR_P4_IQ_CCCR0 0x0000036c |
#define MSR_P4_IQ_CCCR1 0x0000036d |
#define MSR_P4_IQ_CCCR2 0x0000036e |
#define MSR_P4_IQ_CCCR3 0x0000036f |
#define MSR_P4_IQ_CCCR4 0x00000370 |
#define MSR_P4_IQ_CCCR5 0x00000371 |
#define MSR_P4_ALF_ESCR0 0x000003ca |
#define MSR_P4_ALF_ESCR1 0x000003cb |
#define MSR_P4_BPU_ESCR0 0x000003b2 |
#define MSR_P4_BPU_ESCR1 0x000003b3 |
#define MSR_P4_BSU_ESCR0 0x000003a0 |
#define MSR_P4_BSU_ESCR1 0x000003a1 |
#define MSR_P4_CRU_ESCR0 0x000003b8 |
#define MSR_P4_CRU_ESCR1 0x000003b9 |
#define MSR_P4_CRU_ESCR2 0x000003cc |
#define MSR_P4_CRU_ESCR3 0x000003cd |
#define MSR_P4_CRU_ESCR4 0x000003e0 |
#define MSR_P4_CRU_ESCR5 0x000003e1 |
#define MSR_P4_DAC_ESCR0 0x000003a8 |
#define MSR_P4_DAC_ESCR1 0x000003a9 |
#define MSR_P4_FIRM_ESCR0 0x000003a4 |
#define MSR_P4_FIRM_ESCR1 0x000003a5 |
#define MSR_P4_FLAME_ESCR0 0x000003a6 |
#define MSR_P4_FLAME_ESCR1 0x000003a7 |
#define MSR_P4_FSB_ESCR0 0x000003a2 |
#define MSR_P4_FSB_ESCR1 0x000003a3 |
#define MSR_P4_IQ_ESCR0 0x000003ba |
#define MSR_P4_IQ_ESCR1 0x000003bb |
#define MSR_P4_IS_ESCR0 0x000003b4 |
#define MSR_P4_IS_ESCR1 0x000003b5 |
#define MSR_P4_ITLB_ESCR0 0x000003b6 |
#define MSR_P4_ITLB_ESCR1 0x000003b7 |
#define MSR_P4_IX_ESCR0 0x000003c8 |
#define MSR_P4_IX_ESCR1 0x000003c9 |
#define MSR_P4_MOB_ESCR0 0x000003aa |
#define MSR_P4_MOB_ESCR1 0x000003ab |
#define MSR_P4_MS_ESCR0 0x000003c0 |
#define MSR_P4_MS_ESCR1 0x000003c1 |
#define MSR_P4_PMH_ESCR0 0x000003ac |
#define MSR_P4_PMH_ESCR1 0x000003ad |
#define MSR_P4_RAT_ESCR0 0x000003bc |
#define MSR_P4_RAT_ESCR1 0x000003bd |
#define MSR_P4_SAAT_ESCR0 0x000003ae |
#define MSR_P4_SAAT_ESCR1 0x000003af |
#define MSR_P4_SSU_ESCR0 0x000003be |
#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */ |
#define MSR_P4_TBPU_ESCR0 0x000003c2 |
#define MSR_P4_TBPU_ESCR1 0x000003c3 |
#define MSR_P4_TC_ESCR0 0x000003c4 |
#define MSR_P4_TC_ESCR1 0x000003c5 |
#define MSR_P4_U2L_ESCR0 0x000003b0 |
#define MSR_P4_U2L_ESCR1 0x000003b1 |
#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2 |
/* Intel Core-based CPU performance counters */ |
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 |
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a |
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b |
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d |
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e |
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f |
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 |
/* Geode defined MSRs */ |
#define MSR_GEODE_BUSCONT_CONF0 0x00001900 |
/* Intel VT MSRs */ |
#define MSR_IA32_VMX_BASIC 0x00000480 |
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 |
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 |
#define MSR_IA32_VMX_EXIT_CTLS 0x00000483 |
#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 |
#define MSR_IA32_VMX_MISC 0x00000485 |
#define MSR_IA32_VMX_CR0_FIXED0 0x00000486 |
#define MSR_IA32_VMX_CR0_FIXED1 0x00000487 |
#define MSR_IA32_VMX_CR4_FIXED0 0x00000488 |
#define MSR_IA32_VMX_CR4_FIXED1 0x00000489 |
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a |
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b |
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c |
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d |
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e |
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f |
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 |
#define MSR_IA32_VMX_VMFUNC 0x00000491 |
/* VMX_BASIC bits and bitmasks */ |
#define VMX_BASIC_VMCS_SIZE_SHIFT 32 |
#define VMX_BASIC_TRUE_CTLS (1ULL << 55) |
#define VMX_BASIC_64 0x0001000000000000LLU |
#define VMX_BASIC_MEM_TYPE_SHIFT 50 |
#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU |
#define VMX_BASIC_MEM_TYPE_WB 6LLU |
#define VMX_BASIC_INOUT 0x0040000000000000LLU |
/* MSR_IA32_VMX_MISC bits */ |
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) |
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F |
/* AMD-V MSRs */ |
#define MSR_VM_CR 0xc0010114 |
#define MSR_VM_IGNNE 0xc0010115 |
#define MSR_VM_HSAVE_PA 0xc0010117 |
#endif /* _ASM_X86_MSR_INDEX_H */ |
/drivers/include/asm/msr.h |
---|
1,7 → 1,7 |
#ifndef _ASM_X86_MSR_H |
#define _ASM_X86_MSR_H |
#include <uapi/asm/msr.h> |
#include "msr-index.h" |
#ifndef __ASSEMBLY__ |
8,6 → 8,7 |
#include <asm/asm.h> |
#include <asm/errno.h> |
#include <asm/cpumask.h> |
#include <uapi/asm/msr.h> |
struct msr { |
union { |
46,14 → 47,13 |
* it means rax *or* rdx. |
*/ |
#ifdef CONFIG_X86_64 |
#define DECLARE_ARGS(val, low, high) unsigned low, high |
#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32)) |
#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) |
/* Using 64-bit values saves one instruction clearing the high half of low */ |
#define DECLARE_ARGS(val, low, high) unsigned long low, high |
#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) |
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) |
#else |
#define DECLARE_ARGS(val, low, high) unsigned long long val |
#define EAX_EDX_VAL(val, low, high) (val) |
#define EAX_EDX_ARGS(val, low, high) "A" (val) |
#define EAX_EDX_RET(val, low, high) "=A" (val) |
#endif |
105,12 → 105,19 |
return err; |
} |
extern unsigned long long native_read_tsc(void); |
extern int rdmsr_safe_regs(u32 regs[8]); |
extern int wrmsr_safe_regs(u32 regs[8]); |
static __always_inline unsigned long long __native_read_tsc(void) |
/** |
* rdtsc() - returns the current TSC without ordering constraints |
* |
* rdtsc() returns the result of RDTSC as a 64-bit integer. The |
* only ordering constraint it supplies is the ordering implied by |
* "asm volatile": it will put the RDTSC in the place you expect. The |
* CPU can and will speculatively execute that RDTSC, though, so the |
* results can be non-monotonic if compared on different CPUs. |
*/ |
static __always_inline unsigned long long rdtsc(void) |
{ |
DECLARE_ARGS(val, low, high); |
152,8 → 159,10 |
#define rdmsrl(msr, val) \ |
((val) = native_read_msr((msr))) |
#define wrmsrl(msr, val) \ |
native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) |
static inline void wrmsrl(unsigned msr, u64 val) |
{ |
native_write_msr(msr, (u32)val, (u32)(val >> 32)); |
} |
/* wrmsr with exception handling */ |
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) |
179,12 → 188,6 |
return err; |
} |
#define rdtscl(low) \ |
((low) = (u32)__native_read_tsc()) |
#define rdtscll(val) \ |
((val) = __native_read_tsc()) |
#define rdpmc(counter, low, high) \ |
do { \ |
u64 _l = native_read_pmc((counter)); \ |
194,19 → 197,15 |
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) |
#define rdtscp(low, high, aux) \ |
do { \ |
unsigned long long _val = native_read_tscp(&(aux)); \ |
(low) = (u32)_val; \ |
(high) = (u32)(_val >> 32); \ |
} while (0) |
#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) |
#endif /* !CONFIG_PARAVIRT */ |
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ |
(u32)((val) >> 32)) |
/* |
* 64-bit version of wrmsr_safe(): |
*/ |
static inline int wrmsrl_safe(u32 msr, u64 val) |
{ |
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); |
} |
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) |
/drivers/include/asm/page_types.h |
---|
0,0 → 1,75 |
#ifndef _ASM_X86_PAGE_DEFS_H |
#define _ASM_X86_PAGE_DEFS_H |
#include <linux/const.h> |
#include <linux/types.h> |
/* PAGE_SHIFT determines the page size */ |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) |
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) |
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) |
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) |
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) |
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) |
/* Cast *PAGE_MASK to a signed type so that it is sign-extended if |
virtual addresses are 32-bits but physical addresses are larger |
(ie, 32-bit PAE). */ |
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) |
#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK) |
#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK) |
#define HPAGE_SHIFT PMD_SHIFT |
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
#define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
#define HUGE_MAX_HSTATE 2 |
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
#define VM_DATA_DEFAULT_FLAGS \ |
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ |
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \ |
CONFIG_PHYSICAL_ALIGN) |
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) |
#ifdef CONFIG_X86_64 |
#include <asm/page_64_types.h> |
#define IOREMAP_MAX_ORDER (PUD_SHIFT) |
#else |
#include <asm/page_32_types.h> |
#define IOREMAP_MAX_ORDER (PMD_SHIFT) |
#endif /* CONFIG_X86_64 */ |
#ifndef __ASSEMBLY__ |
extern int devmem_is_allowed(unsigned long pagenr); |
extern unsigned long max_low_pfn_mapped; |
extern unsigned long max_pfn_mapped; |
static inline phys_addr_t get_max_mapped(void) |
{ |
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; |
} |
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); |
extern unsigned long init_memory_mapping(unsigned long start, |
unsigned long end); |
extern void initmem_init(void); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PAGE_DEFS_H */ |
/drivers/include/asm/pgtable-2level.h |
---|
62,44 → 62,8 |
return ((value >> rightshift) & mask) << leftshift; |
} |
/* |
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, |
* split up the 29 bits of offset into this range. |
*/ |
#define PTE_FILE_MAX_BITS 29 |
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) |
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) |
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) |
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) |
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) |
#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) |
#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) |
#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) |
#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) |
static __always_inline pgoff_t pte_to_pgoff(pte_t pte) |
{ |
return (pgoff_t) |
(pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + |
pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + |
pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3)); |
} |
static __always_inline pte_t pgoff_to_pte(pgoff_t off) |
{ |
return (pte_t){ |
.pte_low = |
pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + |
pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + |
pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) + |
_PAGE_FILE, |
}; |
} |
/* Encode and de-code a swap entry */ |
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) |
#define SWP_TYPE_BITS 5 |
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) |
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) |
/drivers/include/asm/pgtable-2level_types.h |
---|
17,7 → 17,6 |
#endif /* !__ASSEMBLY__ */ |
#define SHARED_KERNEL_PMD 0 |
#define PAGETABLE_LEVELS 2 |
/* |
* traditional i386 two-level paging structure: |
/drivers/include/asm/pgtable.h |
---|
19,7 → 19,14 |
#include <asm/x86_init.h> |
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); |
void ptdump_walk_pgd_level_checkwx(void); |
#ifdef CONFIG_DEBUG_WX |
#define debug_checkwx() ptdump_walk_pgd_level_checkwx() |
#else |
#define debug_checkwx() do { } while (0) |
#endif |
/* |
* ZERO_PAGE is a global shared page that is always zero: used |
* for zero-mapped memory areas etc.. |
115,11 → 122,6 |
return pte_flags(pte) & _PAGE_RW; |
} |
static inline int pte_file(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_FILE; |
} |
static inline int pte_huge(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_PSE; |
137,13 → 139,7 |
static inline int pte_special(pte_t pte) |
{ |
/* |
* See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h. |
* On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 == |
* __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL. |
*/ |
return (pte_flags(pte) & _PAGE_SPECIAL) && |
(pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE)); |
return pte_flags(pte) & _PAGE_SPECIAL; |
} |
static inline unsigned long pte_pfn(pte_t pte) |
153,12 → 149,12 |
static inline unsigned long pmd_pfn(pmd_t pmd) |
{ |
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; |
return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; |
} |
static inline unsigned long pud_pfn(pud_t pud) |
{ |
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; |
return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; |
} |
#define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
305,7 → 301,7 |
static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
{ |
return pmd_clear_flags(pmd, _PAGE_PRESENT); |
return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); |
} |
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
329,21 → 325,16 |
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); |
} |
static inline pte_t pte_file_clear_soft_dirty(pte_t pte) |
static inline pte_t pte_clear_soft_dirty(pte_t pte) |
{ |
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); |
} |
static inline pte_t pte_file_mksoft_dirty(pte_t pte) |
static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) |
{ |
return pte_set_flags(pte, _PAGE_SOFT_DIRTY); |
return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); |
} |
static inline int pte_file_soft_dirty(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_SOFT_DIRTY; |
} |
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
/* |
405,7 → 396,9 |
return __pgprot(preservebits | addbits); |
} |
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) |
#define pte_pgprot(x) __pgprot(pte_flags(x)) |
#define pmd_pgprot(x) __pgprot(pmd_flags(x)) |
#define pud_pgprot(x) __pgprot(pud_flags(x)) |
#define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
424,11 → 417,17 |
* requested memtype: |
* - request is uncached, return cannot be write-back |
* - request is write-combine, return cannot be write-back |
* - request is write-through, return cannot be write-back |
* - request is write-through, return cannot be write-combine |
*/ |
if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && |
new_pcm == _PAGE_CACHE_MODE_WB) || |
(pcm == _PAGE_CACHE_MODE_WC && |
new_pcm == _PAGE_CACHE_MODE_WB)) { |
new_pcm == _PAGE_CACHE_MODE_WB) || |
(pcm == _PAGE_CACHE_MODE_WT && |
new_pcm == _PAGE_CACHE_MODE_WB) || |
(pcm == _PAGE_CACHE_MODE_WT && |
new_pcm == _PAGE_CACHE_MODE_WC)) { |
return 0; |
} |
463,13 → 462,6 |
static inline int pte_present(pte_t a) |
{ |
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | |
_PAGE_NUMA); |
} |
#define pte_present_nonuma pte_present_nonuma |
static inline int pte_present_nonuma(pte_t a) |
{ |
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); |
} |
479,7 → 471,7 |
if (pte_flags(a) & _PAGE_PRESENT) |
return true; |
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && |
if ((pte_flags(a) & _PAGE_PROTNONE) && |
mm_tlb_flush_pending(mm)) |
return true; |
499,10 → 491,27 |
* the _PAGE_PSE flag will remain set at all times while the |
* _PAGE_PRESENT bit is clear). |
*/ |
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE | |
_PAGE_NUMA); |
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); |
} |
#ifdef CONFIG_NUMA_BALANCING |
/* |
* These work without NUMA balancing but the kernel does not care. See the |
* comment in include/asm-generic/pgtable.h |
*/ |
static inline int pte_protnone(pte_t pte) |
{ |
return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) |
== _PAGE_PROTNONE; |
} |
static inline int pmd_protnone(pmd_t pmd) |
{ |
return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) |
== _PAGE_PROTNONE; |
} |
#endif /* CONFIG_NUMA_BALANCING */ |
static inline int pmd_none(pmd_t pmd) |
{ |
/* Only check low word on 32-bit platforms, since it might be |
512,7 → 521,7 |
static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
{ |
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); |
return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); |
} |
/* |
519,7 → 528,8 |
* Currently stuck as a macro due to indirect forward reference to |
* linux/mmzone.h's __section_mem_map_addr() definition: |
*/ |
#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT) |
#define pmd_page(pmd) \ |
pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT) |
/* |
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] |
559,11 → 569,6 |
static inline int pmd_bad(pmd_t pmd) |
{ |
#ifdef CONFIG_NUMA_BALANCING |
/* pmd_numa check */ |
if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA) |
return 0; |
#endif |
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; |
} |
572,7 → 577,7 |
return npg >> (20 - PAGE_SHIFT); |
} |
#if PAGETABLE_LEVELS > 2 |
#if CONFIG_PGTABLE_LEVELS > 2 |
static inline int pud_none(pud_t pud) |
{ |
return native_pud_val(pud) == 0; |
585,7 → 590,7 |
static inline unsigned long pud_page_vaddr(pud_t pud) |
{ |
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); |
return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); |
} |
/* |
592,7 → 597,8 |
* Currently stuck as a macro due to indirect forward reference to |
* linux/mmzone.h's __section_mem_map_addr() definition: |
*/ |
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) |
#define pud_page(pud) \ |
pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT) |
/* Find an entry in the second-level page table.. */ |
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
615,9 → 621,9 |
{ |
return 0; |
} |
#endif /* PAGETABLE_LEVELS > 2 */ |
#endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
#if PAGETABLE_LEVELS > 3 |
#if CONFIG_PGTABLE_LEVELS > 3 |
static inline int pgd_present(pgd_t pgd) |
{ |
return pgd_flags(pgd) & _PAGE_PRESENT; |
654,7 → 660,7 |
{ |
return !native_pgd_val(pgd); |
} |
#endif /* PAGETABLE_LEVELS > 3 */ |
#endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
#endif /* __ASSEMBLY__ */ |
820,8 → 826,8 |
return pmd_flags(pmd) & _PAGE_RW; |
} |
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR |
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, |
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, |
pmd_t *pmdp) |
{ |
pmd_t pmd = native_pmdp_get_and_clear(pmdp); |
882,19 → 888,16 |
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
{ |
VM_BUG_ON(pte_present_nonuma(pte)); |
return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
} |
static inline int pte_swp_soft_dirty(pte_t pte) |
{ |
VM_BUG_ON(pte_present_nonuma(pte)); |
return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; |
} |
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) |
{ |
VM_BUG_ON(pte_present_nonuma(pte)); |
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
} |
#endif |
/drivers/include/asm/pgtable_types.h |
---|
4,7 → 4,7 |
#include <linux/const.h> |
#include <asm/page_types.h> |
#define FIRST_USER_ADDRESS 0 |
#define FIRST_USER_ADDRESS 0UL |
#define _PAGE_BIT_PRESENT 0 /* is present */ |
#define _PAGE_BIT_RW 1 /* writeable */ |
27,19 → 27,9 |
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ |
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ |
/* |
* Swap offsets on configurations that allow automatic NUMA balancing use the |
* bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from |
* swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the |
* maximum possible swap space from 16TB to 8TB. |
*/ |
#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1) |
/* If _PAGE_BIT_PRESENT is clear, we use these: */ |
/* - if the user mapped it with PROT_NONE; pte_present gives true */ |
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL |
/* - set: nonlinear file mapping, saved PTE; unset:swap */ |
#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY |
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) |
#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) |
78,21 → 68,6 |
#endif |
/* |
* _PAGE_NUMA distinguishes between a numa hinting minor fault and a page |
* that is not present. The hinting fault gathers numa placement statistics |
* (see pte_numa()). The bit is always zero when the PTE is not present. |
* |
* The bit picked must be always zero when the pmd is present and not |
* present, so that we don't lose information when we set it while |
* atomically clearing the present bit. |
*/ |
#ifdef CONFIG_NUMA_BALANCING |
#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA) |
#else |
#define _PAGE_NUMA (_AT(pteval_t, 0)) |
#endif |
/* |
* Tracking soft dirty bit when a page goes to a swap is tricky. |
* We need a bit which can be stored in pte _and_ not conflict |
* with swap entry format. On x86 bits 6 and 7 are *not* involved |
114,7 → 89,6 |
#define _PAGE_NX (_AT(pteval_t, 0)) |
#endif |
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) |
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) |
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
125,8 → 99,8 |
/* Set of bits not changed in pte_modify */ |
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ |
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
_PAGE_SOFT_DIRTY | _PAGE_NUMA) |
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA) |
_PAGE_SOFT_DIRTY) |
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) |
/* |
* The cache modes defined here are used to translate between pure SW usage |
235,10 → 209,10 |
#include <linux/types.h> |
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ |
/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */ |
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) |
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ |
/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */ |
#define PTE_FLAGS_MASK (~PTE_PFN_MASK) |
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; |
260,7 → 234,7 |
return native_pgd_val(pgd) & PTE_FLAGS_MASK; |
} |
#if PAGETABLE_LEVELS > 3 |
#if CONFIG_PGTABLE_LEVELS > 3 |
typedef struct { pudval_t pud; } pud_t; |
static inline pud_t native_make_pud(pmdval_t val) |
281,7 → 255,7 |
} |
#endif |
#if PAGETABLE_LEVELS > 2 |
#if CONFIG_PGTABLE_LEVELS > 2 |
typedef struct { pmdval_t pmd; } pmd_t; |
static inline pmd_t native_make_pmd(pmdval_t val) |
302,14 → 276,40 |
} |
#endif |
static inline pudval_t pud_pfn_mask(pud_t pud) |
{ |
if (native_pud_val(pud) & _PAGE_PSE) |
return PHYSICAL_PUD_PAGE_MASK; |
else |
return PTE_PFN_MASK; |
} |
static inline pudval_t pud_flags_mask(pud_t pud) |
{ |
return ~pud_pfn_mask(pud); |
} |
static inline pudval_t pud_flags(pud_t pud) |
{ |
return native_pud_val(pud) & PTE_FLAGS_MASK; |
return native_pud_val(pud) & pud_flags_mask(pud); |
} |
static inline pmdval_t pmd_pfn_mask(pmd_t pmd) |
{ |
if (native_pmd_val(pmd) & _PAGE_PSE) |
return PHYSICAL_PMD_PAGE_MASK; |
else |
return PTE_PFN_MASK; |
} |
static inline pmdval_t pmd_flags_mask(pmd_t pmd) |
{ |
return ~pmd_pfn_mask(pmd); |
} |
static inline pmdval_t pmd_flags(pmd_t pmd) |
{ |
return native_pmd_val(pmd) & PTE_FLAGS_MASK; |
return native_pmd_val(pmd) & pmd_flags_mask(pmd); |
} |
static inline pte_t native_make_pte(pteval_t val) |
327,20 → 327,6 |
return native_pte_val(pte) & PTE_FLAGS_MASK; |
} |
#ifdef CONFIG_NUMA_BALANCING |
/* Set of bits that distinguishes present, prot_none and numa ptes */ |
#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT) |
static inline pteval_t ptenuma_flags(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_NUMA_MASK; |
} |
static inline pmdval_t pmdnuma_flags(pmd_t pmd) |
{ |
return pmd_flags(pmd) & _PAGE_NUMA_MASK; |
} |
#endif /* CONFIG_NUMA_BALANCING */ |
#define pgprot_val(x) ((x).pgprot) |
#define __pgprot(x) ((pgprot_t) { (x) } ) |
407,6 → 393,9 |
#define pgprot_writecombine pgprot_writecombine |
extern pgprot_t pgprot_writecombine(pgprot_t prot); |
#define pgprot_writethrough pgprot_writethrough |
extern pgprot_t pgprot_writethrough(pgprot_t prot); |
/* Indicate that x86 has its own track and untrack pfn vma functions */ |
#define __HAVE_PFNMAP_TRACKING |
/drivers/include/asm/preempt.h |
---|
30,12 → 30,9 |
/* |
* must be macros to avoid header recursion hell |
*/ |
#define init_task_preempt_count(p) do { \ |
task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \ |
} while (0) |
#define init_task_preempt_count(p) do { } while (0) |
#define init_idle_preempt_count(p, cpu) do { \ |
task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \ |
per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ |
} while (0) |
90,9 → 87,9 |
/* |
* Returns true when we need to resched and can (barring IRQ state). |
*/ |
static __always_inline bool should_resched(void) |
static __always_inline bool should_resched(int preempt_offset) |
{ |
return unlikely(!raw_cpu_read_4(__preempt_count)); |
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); |
} |
#ifdef CONFIG_PREEMPT |
99,11 → 96,9 |
extern asmlinkage void ___preempt_schedule(void); |
# define __preempt_schedule() asm ("call ___preempt_schedule") |
extern asmlinkage void preempt_schedule(void); |
# ifdef CONFIG_CONTEXT_TRACKING |
extern asmlinkage void ___preempt_schedule_context(void); |
# define __preempt_schedule_context() asm ("call ___preempt_schedule_context") |
extern asmlinkage void preempt_schedule_context(void); |
extern asmlinkage void ___preempt_schedule_notrace(void); |
# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace") |
extern asmlinkage void preempt_schedule_notrace(void); |
# endif |
#endif |
#endif /* __ASM_PREEMPT_H */ |
/drivers/include/asm/processor.h |
---|
6,12 → 6,12 |
/* Forward declaration, a strange C thing */ |
struct task_struct; |
struct mm_struct; |
struct vm86; |
#include <asm/vm86.h> |
#include <asm/math_emu.h> |
#include <asm/segment.h> |
#include <asm/types.h> |
#include <asm/sigcontext.h> |
#include <uapi/asm/sigcontext.h> |
#include <asm/current.h> |
#include <asm/cpufeature.h> |
#include <asm/page.h> |
21,6 → 21,7 |
#include <asm/desc_defs.h> |
#include <asm/nops.h> |
#include <asm/special_insns.h> |
#include <asm/fpu/types.h> |
#include <linux/personality.h> |
#include <linux/cpumask.h> |
52,6 → 53,11 |
return pc; |
} |
/* |
* These alignment constraints are for performance in the vSMP case, |
* but in the task_struct case we must also meet hardware imposed |
* alignment requirements of the FPU state: |
*/ |
#ifdef CONFIG_X86_VSMP |
# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) |
109,6 → 115,9 |
/* in KB - valid for CPUS which support this call: */ |
int x86_cache_size; |
int x86_cache_alignment; /* In bytes */ |
/* Cache QoS architectural values: */ |
int x86_cache_max_rmid; /* max index */ |
int x86_cache_occ_scale; /* scale to bytes */ |
int x86_power; |
unsigned long loops_per_jiffy; |
/* cpuid returned max cores value: */ |
160,10 → 169,7 |
extern const struct seq_operations cpuinfo_op; |
#define cache_line_size() (x86_cache_alignment) |
extern void cpu_detect(struct cpuinfo_x86 *c); |
extern void fpu_detect(struct cpuinfo_x86 *c); |
extern void early_cpu_init(void); |
extern void identify_boot_cpu(void); |
210,8 → 216,23 |
unsigned long sp0; |
unsigned short ss0, __ss0h; |
unsigned long sp1; |
/* ss1 caches MSR_IA32_SYSENTER_CS: */ |
unsigned short ss1, __ss1h; |
/* |
* We don't use ring 1, so ss1 is a convenient scratch space in |
* the same cacheline as sp0. We use ss1 to cache the value in |
* MSR_IA32_SYSENTER_CS. When we context switch |
* MSR_IA32_SYSENTER_CS, we first check if the new value being |
* written matches ss1, and, if it's not, then we wrmsr the new |
* value and update ss1. |
* |
* The only reason we context switch MSR_IA32_SYSENTER_CS is |
* that we set it to zero in vm86 tasks to avoid corrupting the |
* stack if we were to go through the sysenter path from vm86 |
* mode. |
*/ |
unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ |
unsigned short __ss1h; |
unsigned long sp2; |
unsigned short ss2, __ss2h; |
unsigned long __cr3; |
276,14 → 297,18 |
unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
/* |
* .. and then another 0x100 bytes for the emergency kernel stack: |
* Space for the temporary SYSENTER stack: |
*/ |
unsigned long stack[64]; |
unsigned long SYSENTER_stack[64]; |
} ____cacheline_aligned; |
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); |
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); |
#ifdef CONFIG_X86_32 |
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); |
#endif |
/* |
* Save the original ist values for checking stack pointers during debugging |
*/ |
291,128 → 316,6 |
unsigned long ist[7]; |
}; |
#define MXCSR_DEFAULT 0x1f80 |
struct i387_fsave_struct { |
u32 cwd; /* FPU Control Word */ |
u32 swd; /* FPU Status Word */ |
u32 twd; /* FPU Tag Word */ |
u32 fip; /* FPU IP Offset */ |
u32 fcs; /* FPU IP Selector */ |
u32 foo; /* FPU Operand Pointer Offset */ |
u32 fos; /* FPU Operand Pointer Selector */ |
/* 8*10 bytes for each FP-reg = 80 bytes: */ |
u32 st_space[20]; |
/* Software status information [not touched by FSAVE ]: */ |
u32 status; |
}; |
struct i387_fxsave_struct { |
u16 cwd; /* Control Word */ |
u16 swd; /* Status Word */ |
u16 twd; /* Tag Word */ |
u16 fop; /* Last Instruction Opcode */ |
union { |
struct { |
u64 rip; /* Instruction Pointer */ |
u64 rdp; /* Data Pointer */ |
}; |
struct { |
u32 fip; /* FPU IP Offset */ |
u32 fcs; /* FPU IP Selector */ |
u32 foo; /* FPU Operand Offset */ |
u32 fos; /* FPU Operand Selector */ |
}; |
}; |
u32 mxcsr; /* MXCSR Register State */ |
u32 mxcsr_mask; /* MXCSR Mask */ |
/* 8*16 bytes for each FP-reg = 128 bytes: */ |
u32 st_space[32]; |
/* 16*16 bytes for each XMM-reg = 256 bytes: */ |
u32 xmm_space[64]; |
u32 padding[12]; |
union { |
u32 padding1[12]; |
u32 sw_reserved[12]; |
}; |
} __attribute__((aligned(16))); |
struct i387_soft_struct { |
u32 cwd; |
u32 swd; |
u32 twd; |
u32 fip; |
u32 fcs; |
u32 foo; |
u32 fos; |
/* 8*10 bytes for each FP-reg = 80 bytes: */ |
u32 st_space[20]; |
u8 ftop; |
u8 changed; |
u8 lookahead; |
u8 no_update; |
u8 rm; |
u8 alimit; |
struct math_emu_info *info; |
u32 entry_eip; |
}; |
struct ymmh_struct { |
/* 16 * 16 bytes for each YMMH-reg = 256 bytes */ |
u32 ymmh_space[64]; |
}; |
/* We don't support LWP yet: */ |
struct lwp_struct { |
u8 reserved[128]; |
}; |
struct bndreg { |
u64 lower_bound; |
u64 upper_bound; |
} __packed; |
struct bndcsr { |
u64 bndcfgu; |
u64 bndstatus; |
} __packed; |
struct xsave_hdr_struct { |
u64 xstate_bv; |
u64 xcomp_bv; |
u64 reserved[6]; |
} __attribute__((packed)); |
struct xsave_struct { |
struct i387_fxsave_struct i387; |
struct xsave_hdr_struct xsave_hdr; |
struct ymmh_struct ymmh; |
struct lwp_struct lwp; |
struct bndreg bndreg[4]; |
struct bndcsr bndcsr; |
/* new processor state extensions will go here */ |
} __attribute__ ((packed, aligned (64))); |
union thread_xstate { |
struct i387_fsave_struct fsave; |
struct i387_fxsave_struct fxsave; |
struct i387_soft_struct soft; |
struct xsave_struct xsave; |
}; |
struct fpu { |
unsigned int last_cpu; |
unsigned int has_fpu; |
union thread_xstate *state; |
}; |
#ifdef CONFIG_X86_64 |
DECLARE_PER_CPU(struct orig_ist, orig_ist); |
461,8 → 364,6 |
#endif /* X86_64 */ |
extern unsigned int xstate_size; |
extern void free_thread_xstate(struct task_struct *); |
extern struct kmem_cache *task_xstate_cachep; |
struct perf_event; |
474,7 → 375,6 |
#ifdef CONFIG_X86_32 |
unsigned long sysenter_cs; |
#else |
unsigned long usersp; /* Copy from PDA */ |
unsigned short es; |
unsigned short ds; |
unsigned short fsindex; |
487,6 → 387,7 |
unsigned long fs; |
#endif |
unsigned long gs; |
/* Save middle states of ptrace breakpoints */ |
struct perf_event *ptrace_bps[HBP_NUM]; |
/* Debug status used for traps, single steps, etc... */ |
497,17 → 398,9 |
unsigned long cr2; |
unsigned long trap_nr; |
unsigned long error_code; |
/* floating point and extended processor state */ |
struct fpu fpu; |
#ifdef CONFIG_X86_32 |
#ifdef CONFIG_VM86 |
/* Virtual 86 mode info */ |
struct vm86_struct __user *vm86_info; |
unsigned long screen_bitmap; |
unsigned long v86flags; |
unsigned long v86mask; |
unsigned long saved_sp0; |
unsigned int saved_fs; |
unsigned int saved_gs; |
struct vm86 *vm86; |
#endif |
/* IO permissions: */ |
unsigned long *io_bitmap_ptr; |
514,15 → 407,13 |
unsigned long iopl; |
/* Max allowed port in the bitmap, in bytes: */ |
unsigned io_bitmap_max; |
/* Floating point and extended processor state */ |
struct fpu fpu; |
/* |
* fpu_counter contains the number of consecutive context switches |
* that the FPU is used. If this is over a threshold, the lazy fpu |
* saving becomes unlazy to save the trap. This is an unsigned char |
* so that after 256 times the counter wraps and the behavior turns |
* lazy again; this to deal with bursty apps that only use FPU for |
* a short time |
* WARNING: 'fpu' is dynamically-sized. It *MUST* be at |
* the end. |
*/ |
unsigned char fpu_counter; |
}; |
/* |
564,11 → 455,13 |
#endif |
} |
#ifdef CONFIG_PARAVIRT |
#include <asm/paravirt.h> |
#else |
#define __cpuid native_cpuid |
#define paravirt_enabled() 0 |
#define paravirt_has(x) 0 |
static inline void load_sp0(struct tss_struct *tss, |
struct thread_struct *thread) |
579,39 → 472,6 |
#define set_iopl_mask native_set_iopl_mask |
#endif /* CONFIG_PARAVIRT */ |
/* |
* Save the cr4 feature set we're using (ie |
* Pentium 4MB enable and PPro Global page |
* enable), so that any CPU's that boot up |
* after us can get the correct flags. |
*/ |
extern unsigned long mmu_cr4_features; |
extern u32 *trampoline_cr4_features; |
static inline void set_in_cr4(unsigned long mask) |
{ |
unsigned long cr4; |
mmu_cr4_features |= mask; |
if (trampoline_cr4_features) |
*trampoline_cr4_features = mmu_cr4_features; |
cr4 = read_cr4(); |
cr4 |= mask; |
write_cr4(cr4); |
} |
static inline void clear_in_cr4(unsigned long mask) |
{ |
unsigned long cr4; |
mmu_cr4_features &= ~mask; |
if (trampoline_cr4_features) |
*trampoline_cr4_features = mmu_cr4_features; |
cr4 = read_cr4(); |
cr4 &= ~mask; |
write_cr4(cr4); |
} |
typedef struct { |
unsigned long seg; |
} mm_segment_t; |
686,12 → 546,12 |
} |
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
static inline void rep_nop(void) |
static __always_inline void rep_nop(void) |
{ |
asm volatile("rep; nop" ::: "memory"); |
} |
static inline void cpu_relax(void) |
static __always_inline void cpu_relax(void) |
{ |
rep_nop(); |
} |
775,14 → 635,6 |
extern void set_task_blockstep(struct task_struct *task, bool on); |
/* |
* from system description table in BIOS. Mostly for MCA use, but |
* others may find it useful: |
*/ |
extern unsigned int machine_id; |
extern unsigned int machine_submodel_id; |
extern unsigned int BIOS_revision; |
/* Boot loader type from the setup header: */ |
extern int bootloader_type; |
extern int bootloader_version; |
794,10 → 646,10 |
#define ARCH_HAS_SPINLOCK_PREFETCH |
#ifdef CONFIG_X86_32 |
# define BASE_PREFETCH ASM_NOP4 |
# define BASE_PREFETCH "" |
# define ARCH_HAS_PREFETCH |
#else |
# define BASE_PREFETCH "prefetcht0 (%1)" |
# define BASE_PREFETCH "prefetcht0 %P1" |
#endif |
/* |
832,6 → 684,9 |
prefetchw(x); |
} |
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ |
TOP_OF_KERNEL_STACK_PADDING) |
#ifdef CONFIG_X86_32 |
/* |
* User space process size: 3GB (default). |
842,39 → 697,15 |
#define STACK_TOP_MAX STACK_TOP |
#define INIT_THREAD { \ |
.sp0 = sizeof(init_stack) + (long)&init_stack, \ |
.vm86_info = NULL, \ |
.sp0 = TOP_OF_INIT_STACK, \ |
.sysenter_cs = __KERNEL_CS, \ |
.io_bitmap_ptr = NULL, \ |
} |
/* |
* Note that the .io_bitmap member must be extra-big. This is because |
* the CPU will access an additional byte beyond the end of the IO |
* permission bitmap. The extra byte must be all 1 bits, and must |
* be within the limit. |
*/ |
#define INIT_TSS { \ |
.x86_tss = { \ |
.sp0 = sizeof(init_stack) + (long)&init_stack, \ |
.ss0 = __KERNEL_DS, \ |
.ss1 = __KERNEL_CS, \ |
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ |
}, \ |
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ |
} |
extern unsigned long thread_saved_pc(struct task_struct *tsk); |
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) |
#define KSTK_TOP(info) \ |
({ \ |
unsigned long *__ptr = (unsigned long *)(info); \ |
(unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ |
}) |
/* |
* The below -8 is to reserve 8 bytes on top of the ring0 stack. |
* TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. |
* This is necessary to guarantee that the entire "struct pt_regs" |
* is accessible even if the CPU haven't stored the SS/ESP registers |
* on the stack (interrupt gate does not save these registers |
885,9 → 716,9 |
*/ |
#define task_pt_regs(task) \ |
({ \ |
struct pt_regs *__regs__; \ |
__regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ |
__regs__ - 1; \ |
unsigned long __ptr = (unsigned long)task_stack_page(task); \ |
__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ |
((struct pt_regs *)__ptr) - 1; \ |
}) |
#define KSTK_ESP(task) (task_pt_regs(task)->sp) |
919,13 → 750,9 |
#define STACK_TOP_MAX TASK_SIZE_MAX |
#define INIT_THREAD { \ |
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ |
.sp0 = TOP_OF_INIT_STACK \ |
} |
#define INIT_TSS { \ |
.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ |
} |
/* |
* Return saved PC of a blocked thread. |
* What is this good for? it will be always the scheduler or ret_from_fork. |
935,11 → 762,6 |
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
extern unsigned long KSTK_ESP(struct task_struct *task); |
/* |
* User space RSP while inside the SYSCALL fast path |
*/ |
DECLARE_PER_CPU(unsigned long, old_rsp); |
#endif /* CONFIG_X86_64 */ |
extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
961,18 → 783,18 |
extern int set_tsc_mode(unsigned int val); |
/* Register/unregister a process' MPX related resource */ |
#define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk)) |
#define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk)) |
#define MPX_ENABLE_MANAGEMENT() mpx_enable_management() |
#define MPX_DISABLE_MANAGEMENT() mpx_disable_management() |
#ifdef CONFIG_X86_INTEL_MPX |
extern int mpx_enable_management(struct task_struct *tsk); |
extern int mpx_disable_management(struct task_struct *tsk); |
extern int mpx_enable_management(void); |
extern int mpx_disable_management(void); |
#else |
static inline int mpx_enable_management(struct task_struct *tsk) |
static inline int mpx_enable_management(void) |
{ |
return -EINVAL; |
} |
static inline int mpx_disable_management(struct task_struct *tsk) |
static inline int mpx_disable_management(void) |
{ |
return -EINVAL; |
} |
979,6 → 801,7 |
#endif /* CONFIG_X86_INTEL_MPX */ |
extern u16 amd_get_nb_id(int cpu); |
extern u32 amd_get_nodes_per_socket(void); |
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) |
{ |
/drivers/include/asm/sigcontext.h |
---|
1,79 → 1,8 |
#ifndef _ASM_X86_SIGCONTEXT_H |
#define _ASM_X86_SIGCONTEXT_H |
/* This is a legacy header - all kernel code includes <uapi/asm/sigcontext.h> directly. */ |
#include <uapi/asm/sigcontext.h> |
#ifdef __i386__ |
struct sigcontext { |
unsigned short gs, __gsh; |
unsigned short fs, __fsh; |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned long di; |
unsigned long si; |
unsigned long bp; |
unsigned long sp; |
unsigned long bx; |
unsigned long dx; |
unsigned long cx; |
unsigned long ax; |
unsigned long trapno; |
unsigned long err; |
unsigned long ip; |
unsigned short cs, __csh; |
unsigned long flags; |
unsigned long sp_at_signal; |
unsigned short ss, __ssh; |
/* |
* fpstate is really (struct _fpstate *) or (struct _xstate *) |
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved |
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end |
* of extended memory layout. See comments at the definition of |
* (struct _fpx_sw_bytes) |
*/ |
void __user *fpstate; /* zero when no FPU/extended context */ |
unsigned long oldmask; |
unsigned long cr2; |
}; |
#else /* __i386__ */ |
struct sigcontext { |
unsigned long r8; |
unsigned long r9; |
unsigned long r10; |
unsigned long r11; |
unsigned long r12; |
unsigned long r13; |
unsigned long r14; |
unsigned long r15; |
unsigned long di; |
unsigned long si; |
unsigned long bp; |
unsigned long bx; |
unsigned long dx; |
unsigned long ax; |
unsigned long cx; |
unsigned long sp; |
unsigned long ip; |
unsigned long flags; |
unsigned short cs; |
unsigned short gs; |
unsigned short fs; |
unsigned short __pad0; |
unsigned long err; |
unsigned long trapno; |
unsigned long oldmask; |
unsigned long cr2; |
/* |
* fpstate is really (struct _fpstate *) or (struct _xstate *) |
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved |
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end |
* of extended memory layout. See comments at the definition of |
* (struct _fpx_sw_bytes) |
*/ |
void __user *fpstate; /* zero when no FPU/extended context */ |
unsigned long reserved1[8]; |
}; |
#endif /* !__i386__ */ |
#endif /* _ASM_X86_SIGCONTEXT_H */ |
/drivers/include/asm/special_insns.h |
---|
137,17 → 137,17 |
native_write_cr3(x); |
} |
static inline unsigned long read_cr4(void) |
static inline unsigned long __read_cr4(void) |
{ |
return native_read_cr4(); |
} |
static inline unsigned long read_cr4_safe(void) |
static inline unsigned long __read_cr4_safe(void) |
{ |
return native_read_cr4_safe(); |
} |
static inline void write_cr4(unsigned long x) |
static inline void __write_cr4(unsigned long x) |
{ |
native_write_cr4(x); |
} |
/drivers/include/asm/x86_init.h |
---|
1,7 → 1,6 |
#ifndef _ASM_X86_PLATFORM_H |
#define _ASM_X86_PLATFORM_H |
#include <asm/pgtable_types.h> |
//#include <asm/bootparam.h> |
struct mpc_bus; |
171,38 → 170,17 |
}; |
struct pci_dev; |
struct msi_msg; |
struct x86_msi_ops { |
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); |
void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq, |
unsigned int dest, struct msi_msg *msg, |
u8 hpet_id); |
void (*teardown_msi_irq)(unsigned int irq); |
void (*teardown_msi_irqs)(struct pci_dev *dev); |
void (*restore_msi_irqs)(struct pci_dev *dev); |
int (*setup_hpet_msi)(unsigned int irq, unsigned int id); |
}; |
struct IO_APIC_route_entry; |
struct io_apic_irq_attr; |
struct irq_data; |
struct cpumask; |
struct x86_io_apic_ops { |
void (*init) (void); |
unsigned int (*read) (unsigned int apic, unsigned int reg); |
void (*write) (unsigned int apic, unsigned int reg, unsigned int value); |
void (*modify) (unsigned int apic, unsigned int reg, unsigned int value); |
void (*disable)(void); |
void (*print_entries)(unsigned int apic, unsigned int nr_entries); |
int (*set_affinity)(struct irq_data *data, |
const struct cpumask *mask, |
bool force); |
int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry, |
unsigned int destination, int vector, |
struct io_apic_irq_attr *attr); |
void (*eoi_ioapic_pin)(int apic, int pin, int vector); |
}; |
extern struct x86_init_ops x86_init; |