Rev 7143 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 7143 | Rev 9078 | ||
---|---|---|---|
1 | #ifndef _ASM_X86_PROCESSOR_H |
1 | #ifndef _ASM_X86_PROCESSOR_H |
2 | #define _ASM_X86_PROCESSOR_H |
2 | #define _ASM_X86_PROCESSOR_H |
3 | 3 | ||
4 | #include |
4 | #include |
5 | 5 | ||
6 | /* Forward declaration, a strange C thing */ |
6 | /* Forward declaration, a strange C thing */ |
7 | struct task_struct; |
7 | struct task_struct; |
8 | struct mm_struct; |
8 | struct mm_struct; |
9 | struct vm86; |
9 | struct vm86; |
10 | 10 | ||
11 | #include |
11 | #include |
12 | #include |
12 | #include |
13 | #include |
13 | #include |
14 | #include |
14 | #include |
15 | #include |
15 | #include |
16 | #include |
16 | #include |
17 | #include |
17 | #include |
18 | #include |
18 | #include |
19 | #include |
19 | #include |
20 | #include |
20 | #include |
- | 21 | #include |
|
21 | #include |
22 | #include |
22 | #include |
23 | #include |
23 | #include |
24 | #include |
24 | #include |
25 | #include |
25 | 26 | ||
26 | #include |
27 | #include |
27 | #include |
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | 33 | ||
33 | /* |
34 | /* |
34 | * We handle most unaligned accesses in hardware. On the other hand |
35 | * We handle most unaligned accesses in hardware. On the other hand |
35 | * unaligned DMA can be quite expensive on some Nehalem processors. |
36 | * unaligned DMA can be quite expensive on some Nehalem processors. |
36 | * |
37 | * |
37 | * Based on this we disable the IP header alignment in network drivers. |
38 | * Based on this we disable the IP header alignment in network drivers. |
38 | */ |
39 | */ |
39 | #define NET_IP_ALIGN 0 |
40 | #define NET_IP_ALIGN 0 |
40 | 41 | ||
41 | #define HBP_NUM 4 |
42 | #define HBP_NUM 4 |
42 | /* |
43 | /* |
43 | * Default implementation of macro that returns current |
44 | * Default implementation of macro that returns current |
44 | * instruction pointer ("program counter"). |
45 | * instruction pointer ("program counter"). |
45 | */ |
46 | */ |
46 | static inline void *current_text_addr(void) |
47 | static inline void *current_text_addr(void) |
47 | { |
48 | { |
48 | void *pc; |
49 | void *pc; |
49 | 50 | ||
50 | asm volatile("mov $1f, %0; 1:":"=r" (pc)); |
51 | asm volatile("mov $1f, %0; 1:":"=r" (pc)); |
51 | 52 | ||
52 | return pc; |
53 | return pc; |
53 | } |
54 | } |
54 | 55 | ||
55 | /* |
56 | /* |
56 | * These alignment constraints are for performance in the vSMP case, |
57 | * These alignment constraints are for performance in the vSMP case, |
57 | * but in the task_struct case we must also meet hardware imposed |
58 | * but in the task_struct case we must also meet hardware imposed |
58 | * alignment requirements of the FPU state: |
59 | * alignment requirements of the FPU state: |
59 | */ |
60 | */ |
60 | #ifdef CONFIG_X86_VSMP |
61 | #ifdef CONFIG_X86_VSMP |
61 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
62 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
62 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) |
63 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) |
63 | #else |
64 | #else |
64 | # define ARCH_MIN_TASKALIGN 16 |
65 | # define ARCH_MIN_TASKALIGN 16 |
65 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 |
66 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 |
66 | #endif |
67 | #endif |
67 | 68 | ||
68 | enum tlb_infos { |
69 | enum tlb_infos { |
69 | ENTRIES, |
70 | ENTRIES, |
70 | NR_INFO |
71 | NR_INFO |
71 | }; |
72 | }; |
72 | 73 | ||
73 | extern u16 __read_mostly tlb_lli_4k[NR_INFO]; |
74 | extern u16 __read_mostly tlb_lli_4k[NR_INFO]; |
74 | extern u16 __read_mostly tlb_lli_2m[NR_INFO]; |
75 | extern u16 __read_mostly tlb_lli_2m[NR_INFO]; |
75 | extern u16 __read_mostly tlb_lli_4m[NR_INFO]; |
76 | extern u16 __read_mostly tlb_lli_4m[NR_INFO]; |
76 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; |
77 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; |
77 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; |
78 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; |
78 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; |
79 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; |
79 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; |
80 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; |
80 | 81 | ||
81 | /* |
82 | /* |
82 | * CPU type and hardware bug flags. Kept separately for each CPU. |
83 | * CPU type and hardware bug flags. Kept separately for each CPU. |
83 | * Members of this structure are referenced in head.S, so think twice |
84 | * Members of this structure are referenced in head.S, so think twice |
84 | * before touching them. [mj] |
85 | * before touching them. [mj] |
85 | */ |
86 | */ |
86 | 87 | ||
87 | struct cpuinfo_x86 { |
88 | struct cpuinfo_x86 { |
88 | __u8 x86; /* CPU family */ |
89 | __u8 x86; /* CPU family */ |
89 | __u8 x86_vendor; /* CPU vendor */ |
90 | __u8 x86_vendor; /* CPU vendor */ |
90 | __u8 x86_model; |
91 | __u8 x86_model; |
91 | __u8 x86_mask; |
92 | __u8 x86_mask; |
92 | #ifdef CONFIG_X86_32 |
93 | #ifdef CONFIG_X86_32 |
93 | char wp_works_ok; /* It doesn't on 386's */ |
94 | char wp_works_ok; /* It doesn't on 386's */ |
94 | 95 | ||
95 | /* Problems on some 486Dx4's and old 386's: */ |
96 | /* Problems on some 486Dx4's and old 386's: */ |
96 | char rfu; |
97 | char rfu; |
97 | char pad0; |
98 | char pad0; |
98 | char pad1; |
99 | char pad1; |
99 | #else |
100 | #else |
100 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
101 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
101 | int x86_tlbsize; |
102 | int x86_tlbsize; |
102 | #endif |
103 | #endif |
103 | __u8 x86_virt_bits; |
104 | __u8 x86_virt_bits; |
104 | __u8 x86_phys_bits; |
105 | __u8 x86_phys_bits; |
105 | /* CPUID returned core id bits: */ |
106 | /* CPUID returned core id bits: */ |
106 | __u8 x86_coreid_bits; |
107 | __u8 x86_coreid_bits; |
107 | /* Max extended CPUID function supported: */ |
108 | /* Max extended CPUID function supported: */ |
108 | __u32 extended_cpuid_level; |
109 | __u32 extended_cpuid_level; |
109 | /* Maximum supported CPUID level, -1=no CPUID: */ |
110 | /* Maximum supported CPUID level, -1=no CPUID: */ |
110 | int cpuid_level; |
111 | int cpuid_level; |
111 | __u32 x86_capability[NCAPINTS + NBUGINTS]; |
112 | __u32 x86_capability[NCAPINTS + NBUGINTS]; |
112 | char x86_vendor_id[16]; |
113 | char x86_vendor_id[16]; |
113 | char x86_model_id[64]; |
114 | char x86_model_id[64]; |
114 | /* in KB - valid for CPUS which support this call: */ |
115 | /* in KB - valid for CPUS which support this call: */ |
115 | int x86_cache_size; |
116 | int x86_cache_size; |
116 | int x86_cache_alignment; /* In bytes */ |
117 | int x86_cache_alignment; /* In bytes */ |
117 | /* Cache QoS architectural values: */ |
118 | /* Cache QoS architectural values: */ |
118 | int x86_cache_max_rmid; /* max index */ |
119 | int x86_cache_max_rmid; /* max index */ |
119 | int x86_cache_occ_scale; /* scale to bytes */ |
120 | int x86_cache_occ_scale; /* scale to bytes */ |
120 | int x86_power; |
121 | int x86_power; |
121 | unsigned long loops_per_jiffy; |
122 | unsigned long loops_per_jiffy; |
122 | /* cpuid returned max cores value: */ |
123 | /* cpuid returned max cores value: */ |
123 | u16 x86_max_cores; |
124 | u16 x86_max_cores; |
124 | u16 apicid; |
125 | u16 apicid; |
125 | u16 initial_apicid; |
126 | u16 initial_apicid; |
126 | u16 x86_clflush_size; |
127 | u16 x86_clflush_size; |
127 | /* number of cores as seen by the OS: */ |
128 | /* number of cores as seen by the OS: */ |
128 | u16 booted_cores; |
129 | u16 booted_cores; |
129 | /* Physical processor id: */ |
130 | /* Physical processor id: */ |
130 | u16 phys_proc_id; |
131 | u16 phys_proc_id; |
131 | /* Logical processor id: */ |
132 | /* Logical processor id: */ |
132 | u16 logical_proc_id; |
133 | u16 logical_proc_id; |
133 | /* Core id: */ |
134 | /* Core id: */ |
134 | u16 cpu_core_id; |
135 | u16 cpu_core_id; |
135 | /* Index into per_cpu list: */ |
136 | /* Index into per_cpu list: */ |
136 | u16 cpu_index; |
137 | u16 cpu_index; |
137 | u32 microcode; |
138 | u32 microcode; |
138 | }; |
139 | }; |
139 | 140 | ||
140 | #define X86_VENDOR_INTEL 0 |
141 | #define X86_VENDOR_INTEL 0 |
141 | #define X86_VENDOR_CYRIX 1 |
142 | #define X86_VENDOR_CYRIX 1 |
142 | #define X86_VENDOR_AMD 2 |
143 | #define X86_VENDOR_AMD 2 |
143 | #define X86_VENDOR_UMC 3 |
144 | #define X86_VENDOR_UMC 3 |
144 | #define X86_VENDOR_CENTAUR 5 |
145 | #define X86_VENDOR_CENTAUR 5 |
145 | #define X86_VENDOR_TRANSMETA 7 |
146 | #define X86_VENDOR_TRANSMETA 7 |
146 | #define X86_VENDOR_NSC 8 |
147 | #define X86_VENDOR_NSC 8 |
147 | #define X86_VENDOR_NUM 9 |
148 | #define X86_VENDOR_NUM 9 |
148 | - | ||
- | 149 | #define X86_VENDOR_HYGON 9 |
|
149 | #define X86_VENDOR_UNKNOWN 0xff |
150 | #define X86_VENDOR_UNKNOWN 0xff |
150 | 151 | ||
151 | /* |
152 | /* |
152 | * capabilities of CPUs |
153 | * capabilities of CPUs |
153 | */ |
154 | */ |
154 | extern struct cpuinfo_x86 boot_cpu_data; |
155 | extern struct cpuinfo_x86 boot_cpu_data; |
155 | extern struct cpuinfo_x86 new_cpu_data; |
156 | extern struct cpuinfo_x86 new_cpu_data; |
156 | 157 | ||
157 | extern struct tss_struct doublefault_tss; |
158 | extern struct tss_struct doublefault_tss; |
158 | extern __u32 cpu_caps_cleared[NCAPINTS]; |
159 | extern __u32 cpu_caps_cleared[NCAPINTS]; |
159 | extern __u32 cpu_caps_set[NCAPINTS]; |
160 | extern __u32 cpu_caps_set[NCAPINTS]; |
160 | 161 | ||
161 | #ifdef CONFIG_SMP |
162 | #ifdef CONFIG_SMP |
162 | DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
163 | DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
163 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
164 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
164 | #else |
165 | #else |
165 | #define cpu_info boot_cpu_data |
166 | #define cpu_info boot_cpu_data |
166 | #define cpu_data(cpu) boot_cpu_data |
167 | #define cpu_data(cpu) boot_cpu_data |
167 | #endif |
168 | #endif |
168 | 169 | ||
169 | extern const struct seq_operations cpuinfo_op; |
170 | extern const struct seq_operations cpuinfo_op; |
170 | 171 | ||
171 | extern void cpu_detect(struct cpuinfo_x86 *c); |
172 | extern void cpu_detect(struct cpuinfo_x86 *c); |
172 | 173 | ||
173 | extern void early_cpu_init(void); |
174 | extern void early_cpu_init(void); |
174 | extern void identify_boot_cpu(void); |
175 | extern void identify_boot_cpu(void); |
175 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); |
176 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); |
176 | extern void print_cpu_info(struct cpuinfo_x86 *); |
177 | extern void print_cpu_info(struct cpuinfo_x86 *); |
177 | void print_cpu_msr(struct cpuinfo_x86 *); |
178 | void print_cpu_msr(struct cpuinfo_x86 *); |
178 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); |
179 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); |
179 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
180 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
180 | extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); |
181 | extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); |
181 | 182 | ||
182 | extern void detect_extended_topology(struct cpuinfo_x86 *c); |
183 | extern void detect_extended_topology(struct cpuinfo_x86 *c); |
183 | extern void detect_ht(struct cpuinfo_x86 *c); |
184 | extern void detect_ht(struct cpuinfo_x86 *c); |
184 | 185 | ||
185 | #ifdef CONFIG_X86_32 |
186 | #ifdef CONFIG_X86_32 |
186 | extern int have_cpuid_p(void); |
187 | extern int have_cpuid_p(void); |
187 | #else |
188 | #else |
188 | static inline int have_cpuid_p(void) |
189 | static inline int have_cpuid_p(void) |
189 | { |
190 | { |
190 | return 1; |
191 | return 1; |
191 | } |
192 | } |
192 | #endif |
193 | #endif |
193 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
194 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
194 | unsigned int *ecx, unsigned int *edx) |
195 | unsigned int *ecx, unsigned int *edx) |
195 | { |
196 | { |
196 | /* ecx is often an input as well as an output. */ |
197 | /* ecx is often an input as well as an output. */ |
197 | asm volatile("cpuid" |
198 | asm volatile("cpuid" |
198 | : "=a" (*eax), |
199 | : "=a" (*eax), |
199 | "=b" (*ebx), |
200 | "=b" (*ebx), |
200 | "=c" (*ecx), |
201 | "=c" (*ecx), |
201 | "=d" (*edx) |
202 | "=d" (*edx) |
202 | : "0" (*eax), "2" (*ecx) |
203 | : "0" (*eax), "2" (*ecx) |
203 | : "memory"); |
204 | : "memory"); |
204 | } |
205 | } |
205 | 206 | ||
206 | static inline void load_cr3(pgd_t *pgdir) |
207 | static inline void load_cr3(pgd_t *pgdir) |
207 | { |
208 | { |
208 | write_cr3(__pa(pgdir)); |
209 | write_cr3(__pa(pgdir)); |
209 | } |
210 | } |
210 | 211 | ||
211 | #ifdef CONFIG_X86_32 |
212 | #ifdef CONFIG_X86_32 |
212 | /* This is the TSS defined by the hardware. */ |
213 | /* This is the TSS defined by the hardware. */ |
213 | struct x86_hw_tss { |
214 | struct x86_hw_tss { |
214 | unsigned short back_link, __blh; |
215 | unsigned short back_link, __blh; |
215 | unsigned long sp0; |
216 | unsigned long sp0; |
216 | unsigned short ss0, __ss0h; |
217 | unsigned short ss0, __ss0h; |
217 | unsigned long sp1; |
218 | unsigned long sp1; |
218 | 219 | ||
219 | /* |
220 | /* |
220 | * We don't use ring 1, so ss1 is a convenient scratch space in |
221 | * We don't use ring 1, so ss1 is a convenient scratch space in |
221 | * the same cacheline as sp0. We use ss1 to cache the value in |
222 | * the same cacheline as sp0. We use ss1 to cache the value in |
222 | * MSR_IA32_SYSENTER_CS. When we context switch |
223 | * MSR_IA32_SYSENTER_CS. When we context switch |
223 | * MSR_IA32_SYSENTER_CS, we first check if the new value being |
224 | * MSR_IA32_SYSENTER_CS, we first check if the new value being |
224 | * written matches ss1, and, if it's not, then we wrmsr the new |
225 | * written matches ss1, and, if it's not, then we wrmsr the new |
225 | * value and update ss1. |
226 | * value and update ss1. |
226 | * |
227 | * |
227 | * The only reason we context switch MSR_IA32_SYSENTER_CS is |
228 | * The only reason we context switch MSR_IA32_SYSENTER_CS is |
228 | * that we set it to zero in vm86 tasks to avoid corrupting the |
229 | * that we set it to zero in vm86 tasks to avoid corrupting the |
229 | * stack if we were to go through the sysenter path from vm86 |
230 | * stack if we were to go through the sysenter path from vm86 |
230 | * mode. |
231 | * mode. |
231 | */ |
232 | */ |
232 | unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ |
233 | unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ |
233 | 234 | ||
234 | unsigned short __ss1h; |
235 | unsigned short __ss1h; |
235 | unsigned long sp2; |
236 | unsigned long sp2; |
236 | unsigned short ss2, __ss2h; |
237 | unsigned short ss2, __ss2h; |
237 | unsigned long __cr3; |
238 | unsigned long __cr3; |
238 | unsigned long ip; |
239 | unsigned long ip; |
239 | unsigned long flags; |
240 | unsigned long flags; |
240 | unsigned long ax; |
241 | unsigned long ax; |
241 | unsigned long cx; |
242 | unsigned long cx; |
242 | unsigned long dx; |
243 | unsigned long dx; |
243 | unsigned long bx; |
244 | unsigned long bx; |
244 | unsigned long sp; |
245 | unsigned long sp; |
245 | unsigned long bp; |
246 | unsigned long bp; |
246 | unsigned long si; |
247 | unsigned long si; |
247 | unsigned long di; |
248 | unsigned long di; |
248 | unsigned short es, __esh; |
249 | unsigned short es, __esh; |
249 | unsigned short cs, __csh; |
250 | unsigned short cs, __csh; |
250 | unsigned short ss, __ssh; |
251 | unsigned short ss, __ssh; |
251 | unsigned short ds, __dsh; |
252 | unsigned short ds, __dsh; |
252 | unsigned short fs, __fsh; |
253 | unsigned short fs, __fsh; |
253 | unsigned short gs, __gsh; |
254 | unsigned short gs, __gsh; |
254 | unsigned short ldt, __ldth; |
255 | unsigned short ldt, __ldth; |
255 | unsigned short trace; |
256 | unsigned short trace; |
256 | unsigned short io_bitmap_base; |
257 | unsigned short io_bitmap_base; |
257 | 258 | ||
258 | } __attribute__((packed)); |
259 | } __attribute__((packed)); |
259 | #else |
260 | #else |
260 | struct x86_hw_tss { |
261 | struct x86_hw_tss { |
261 | u32 reserved1; |
262 | u32 reserved1; |
262 | u64 sp0; |
263 | u64 sp0; |
263 | u64 sp1; |
264 | u64 sp1; |
264 | u64 sp2; |
265 | u64 sp2; |
265 | u64 reserved2; |
266 | u64 reserved2; |
266 | u64 ist[7]; |
267 | u64 ist[7]; |
267 | u32 reserved3; |
268 | u32 reserved3; |
268 | u32 reserved4; |
269 | u32 reserved4; |
269 | u16 reserved5; |
270 | u16 reserved5; |
270 | u16 io_bitmap_base; |
271 | u16 io_bitmap_base; |
271 | 272 | ||
272 | } __attribute__((packed)) ____cacheline_aligned; |
273 | } __attribute__((packed)) ____cacheline_aligned; |
273 | #endif |
274 | #endif |
274 | 275 | ||
275 | /* |
276 | /* |
276 | * IO-bitmap sizes: |
277 | * IO-bitmap sizes: |
277 | */ |
278 | */ |
278 | #define IO_BITMAP_BITS 65536 |
279 | #define IO_BITMAP_BITS 65536 |
279 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) |
280 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) |
280 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) |
281 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) |
281 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) |
282 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) |
282 | #define INVALID_IO_BITMAP_OFFSET 0x8000 |
283 | #define INVALID_IO_BITMAP_OFFSET 0x8000 |
283 | 284 | ||
284 | struct tss_struct { |
285 | struct tss_struct { |
285 | /* |
286 | /* |
286 | * The hardware state: |
287 | * The hardware state: |
287 | */ |
288 | */ |
288 | struct x86_hw_tss x86_tss; |
289 | struct x86_hw_tss x86_tss; |
289 | 290 | ||
290 | /* |
291 | /* |
291 | * The extra 1 is there because the CPU will access an |
292 | * The extra 1 is there because the CPU will access an |
292 | * additional byte beyond the end of the IO permission |
293 | * additional byte beyond the end of the IO permission |
293 | * bitmap. The extra byte must be all 1 bits, and must |
294 | * bitmap. The extra byte must be all 1 bits, and must |
294 | * be within the limit. |
295 | * be within the limit. |
295 | */ |
296 | */ |
296 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
297 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
297 | 298 | ||
298 | #ifdef CONFIG_X86_32 |
299 | #ifdef CONFIG_X86_32 |
299 | /* |
300 | /* |
300 | * Space for the temporary SYSENTER stack. |
301 | * Space for the temporary SYSENTER stack. |
301 | */ |
302 | */ |
302 | unsigned long SYSENTER_stack_canary; |
303 | unsigned long SYSENTER_stack_canary; |
303 | unsigned long SYSENTER_stack[64]; |
304 | unsigned long SYSENTER_stack[64]; |
304 | #endif |
305 | #endif |
305 | 306 | ||
306 | } ____cacheline_aligned; |
307 | } ____cacheline_aligned; |
307 | 308 | ||
308 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); |
309 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); |
309 | 310 | ||
310 | #ifdef CONFIG_X86_32 |
311 | #ifdef CONFIG_X86_32 |
311 | DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); |
312 | DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); |
312 | #endif |
313 | #endif |
313 | 314 | ||
314 | /* |
315 | /* |
315 | * Save the original ist values for checking stack pointers during debugging |
316 | * Save the original ist values for checking stack pointers during debugging |
316 | */ |
317 | */ |
317 | struct orig_ist { |
318 | struct orig_ist { |
318 | unsigned long ist[7]; |
319 | unsigned long ist[7]; |
319 | }; |
320 | }; |
320 | 321 | ||
321 | #ifdef CONFIG_X86_64 |
322 | #ifdef CONFIG_X86_64 |
322 | DECLARE_PER_CPU(struct orig_ist, orig_ist); |
323 | DECLARE_PER_CPU(struct orig_ist, orig_ist); |
323 | 324 | ||
324 | union irq_stack_union { |
325 | union irq_stack_union { |
325 | char irq_stack[IRQ_STACK_SIZE]; |
326 | char irq_stack[IRQ_STACK_SIZE]; |
326 | /* |
327 | /* |
327 | * GCC hardcodes the stack canary as %gs:40. Since the |
328 | * GCC hardcodes the stack canary as %gs:40. Since the |
328 | * irq_stack is the object at %gs:0, we reserve the bottom |
329 | * irq_stack is the object at %gs:0, we reserve the bottom |
329 | * 48 bytes of the irq stack for the canary. |
330 | * 48 bytes of the irq stack for the canary. |
330 | */ |
331 | */ |
331 | struct { |
332 | struct { |
332 | char gs_base[40]; |
333 | char gs_base[40]; |
333 | unsigned long stack_canary; |
334 | unsigned long stack_canary; |
334 | }; |
335 | }; |
335 | }; |
336 | }; |
336 | 337 | ||
337 | DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible; |
338 | DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible; |
338 | DECLARE_INIT_PER_CPU(irq_stack_union); |
339 | DECLARE_INIT_PER_CPU(irq_stack_union); |
339 | 340 | ||
340 | DECLARE_PER_CPU(char *, irq_stack_ptr); |
341 | DECLARE_PER_CPU(char *, irq_stack_ptr); |
341 | DECLARE_PER_CPU(unsigned int, irq_count); |
342 | DECLARE_PER_CPU(unsigned int, irq_count); |
342 | extern asmlinkage void ignore_sysret(void); |
343 | extern asmlinkage void ignore_sysret(void); |
343 | #else /* X86_64 */ |
344 | #else /* X86_64 */ |
344 | #ifdef CONFIG_CC_STACKPROTECTOR |
345 | #ifdef CONFIG_CC_STACKPROTECTOR |
345 | /* |
346 | /* |
346 | * Make sure stack canary segment base is cached-aligned: |
347 | * Make sure stack canary segment base is cached-aligned: |
347 | * "For Intel Atom processors, avoid non zero segment base address |
348 | * "For Intel Atom processors, avoid non zero segment base address |
348 | * that is not aligned to cache line boundary at all cost." |
349 | * that is not aligned to cache line boundary at all cost." |
349 | * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) |
350 | * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) |
350 | */ |
351 | */ |
351 | struct stack_canary { |
352 | struct stack_canary { |
352 | char __pad[20]; /* canary at %gs:20 */ |
353 | char __pad[20]; /* canary at %gs:20 */ |
353 | unsigned long canary; |
354 | unsigned long canary; |
354 | }; |
355 | }; |
355 | DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
356 | DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
356 | #endif |
357 | #endif |
357 | /* |
358 | /* |
358 | * per-CPU IRQ handling stacks |
359 | * per-CPU IRQ handling stacks |
359 | */ |
360 | */ |
360 | struct irq_stack { |
361 | struct irq_stack { |
361 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
362 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
362 | } __aligned(THREAD_SIZE); |
363 | } __aligned(THREAD_SIZE); |
363 | 364 | ||
364 | DECLARE_PER_CPU(struct irq_stack *, hardirq_stack); |
365 | DECLARE_PER_CPU(struct irq_stack *, hardirq_stack); |
365 | DECLARE_PER_CPU(struct irq_stack *, softirq_stack); |
366 | DECLARE_PER_CPU(struct irq_stack *, softirq_stack); |
366 | #endif /* X86_64 */ |
367 | #endif /* X86_64 */ |
367 | 368 | ||
368 | extern unsigned int xstate_size; |
369 | extern unsigned int xstate_size; |
369 | 370 | ||
370 | struct perf_event; |
371 | struct perf_event; |
371 | 372 | ||
372 | struct thread_struct { |
373 | struct thread_struct { |
373 | /* Cached TLS descriptors: */ |
374 | /* Cached TLS descriptors: */ |
374 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
375 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
375 | unsigned long sp0; |
376 | unsigned long sp0; |
376 | unsigned long sp; |
377 | unsigned long sp; |
377 | #ifdef CONFIG_X86_32 |
378 | #ifdef CONFIG_X86_32 |
378 | unsigned long sysenter_cs; |
379 | unsigned long sysenter_cs; |
379 | #else |
380 | #else |
380 | unsigned short es; |
381 | unsigned short es; |
381 | unsigned short ds; |
382 | unsigned short ds; |
382 | unsigned short fsindex; |
383 | unsigned short fsindex; |
383 | unsigned short gsindex; |
384 | unsigned short gsindex; |
384 | #endif |
385 | #endif |
385 | #ifdef CONFIG_X86_32 |
386 | #ifdef CONFIG_X86_32 |
386 | unsigned long ip; |
387 | unsigned long ip; |
387 | #endif |
388 | #endif |
388 | #ifdef CONFIG_X86_64 |
389 | #ifdef CONFIG_X86_64 |
389 | unsigned long fs; |
390 | unsigned long fs; |
390 | #endif |
391 | #endif |
391 | unsigned long gs; |
392 | unsigned long gs; |
392 | 393 | ||
393 | /* Save middle states of ptrace breakpoints */ |
394 | /* Save middle states of ptrace breakpoints */ |
394 | struct perf_event *ptrace_bps[HBP_NUM]; |
395 | struct perf_event *ptrace_bps[HBP_NUM]; |
395 | /* Debug status used for traps, single steps, etc... */ |
396 | /* Debug status used for traps, single steps, etc... */ |
396 | unsigned long debugreg6; |
397 | unsigned long debugreg6; |
397 | /* Keep track of the exact dr7 value set by the user */ |
398 | /* Keep track of the exact dr7 value set by the user */ |
398 | unsigned long ptrace_dr7; |
399 | unsigned long ptrace_dr7; |
399 | /* Fault info: */ |
400 | /* Fault info: */ |
400 | unsigned long cr2; |
401 | unsigned long cr2; |
401 | unsigned long trap_nr; |
402 | unsigned long trap_nr; |
402 | unsigned long error_code; |
403 | unsigned long error_code; |
403 | #ifdef CONFIG_VM86 |
404 | #ifdef CONFIG_VM86 |
404 | /* Virtual 86 mode info */ |
405 | /* Virtual 86 mode info */ |
405 | struct vm86 *vm86; |
406 | struct vm86 *vm86; |
406 | #endif |
407 | #endif |
407 | /* IO permissions: */ |
408 | /* IO permissions: */ |
408 | unsigned long *io_bitmap_ptr; |
409 | unsigned long *io_bitmap_ptr; |
409 | unsigned long iopl; |
410 | unsigned long iopl; |
410 | /* Max allowed port in the bitmap, in bytes: */ |
411 | /* Max allowed port in the bitmap, in bytes: */ |
411 | unsigned io_bitmap_max; |
412 | unsigned io_bitmap_max; |
412 | 413 | ||
413 | /* Floating point and extended processor state */ |
414 | /* Floating point and extended processor state */ |
414 | struct fpu fpu; |
415 | struct fpu fpu; |
415 | /* |
416 | /* |
416 | * WARNING: 'fpu' is dynamically-sized. It *MUST* be at |
417 | * WARNING: 'fpu' is dynamically-sized. It *MUST* be at |
417 | * the end. |
418 | * the end. |
418 | */ |
419 | */ |
419 | }; |
420 | }; |
420 | 421 | ||
421 | /* |
422 | /* |
422 | * Set IOPL bits in EFLAGS from given mask |
423 | * Set IOPL bits in EFLAGS from given mask |
423 | */ |
424 | */ |
424 | static inline void native_set_iopl_mask(unsigned mask) |
425 | static inline void native_set_iopl_mask(unsigned mask) |
425 | { |
426 | { |
426 | #ifdef CONFIG_X86_32 |
427 | #ifdef CONFIG_X86_32 |
427 | unsigned int reg; |
428 | unsigned int reg; |
428 | 429 | ||
429 | asm volatile ("pushfl;" |
430 | asm volatile ("pushfl;" |
430 | "popl %0;" |
431 | "popl %0;" |
431 | "andl %1, %0;" |
432 | "andl %1, %0;" |
432 | "orl %2, %0;" |
433 | "orl %2, %0;" |
433 | "pushl %0;" |
434 | "pushl %0;" |
434 | "popfl" |
435 | "popfl" |
435 | : "=&r" (reg) |
436 | : "=&r" (reg) |
436 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); |
437 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); |
437 | #endif |
438 | #endif |
438 | } |
439 | } |
439 | 440 | ||
440 | static inline void |
441 | static inline void |
441 | native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) |
442 | native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) |
442 | { |
443 | { |
443 | tss->x86_tss.sp0 = thread->sp0; |
444 | tss->x86_tss.sp0 = thread->sp0; |
444 | #ifdef CONFIG_X86_32 |
445 | #ifdef CONFIG_X86_32 |
445 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
446 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
446 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
447 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
447 | tss->x86_tss.ss1 = thread->sysenter_cs; |
448 | tss->x86_tss.ss1 = thread->sysenter_cs; |
448 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
449 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
449 | } |
450 | } |
450 | #endif |
451 | #endif |
451 | } |
452 | } |
452 | 453 | ||
453 | static inline void native_swapgs(void) |
454 | static inline void native_swapgs(void) |
454 | { |
455 | { |
455 | #ifdef CONFIG_X86_64 |
456 | #ifdef CONFIG_X86_64 |
456 | asm volatile("swapgs" ::: "memory"); |
457 | asm volatile("swapgs" ::: "memory"); |
457 | #endif |
458 | #endif |
458 | } |
459 | } |
459 | 460 | ||
460 | 461 | ||
461 | #ifdef CONFIG_PARAVIRT |
462 | #ifdef CONFIG_PARAVIRT |
462 | #include |
463 | #include |
463 | #else |
464 | #else |
464 | #define __cpuid native_cpuid |
465 | #define __cpuid native_cpuid |
465 | #define paravirt_enabled() 0 |
466 | #define paravirt_enabled() 0 |
466 | #define paravirt_has(x) 0 |
467 | #define paravirt_has(x) 0 |
467 | 468 | ||
468 | static inline void load_sp0(struct tss_struct *tss, |
469 | static inline void load_sp0(struct tss_struct *tss, |
469 | struct thread_struct *thread) |
470 | struct thread_struct *thread) |
470 | { |
471 | { |
471 | native_load_sp0(tss, thread); |
472 | native_load_sp0(tss, thread); |
472 | } |
473 | } |
473 | 474 | ||
474 | #define set_iopl_mask native_set_iopl_mask |
475 | #define set_iopl_mask native_set_iopl_mask |
475 | #endif /* CONFIG_PARAVIRT */ |
476 | #endif /* CONFIG_PARAVIRT */ |
476 | 477 | ||
477 | typedef struct { |
478 | typedef struct { |
478 | unsigned long seg; |
479 | unsigned long seg; |
479 | } mm_segment_t; |
480 | } mm_segment_t; |
480 | 481 | ||
481 | 482 | ||
482 | /* Free all resources held by a thread. */ |
483 | /* Free all resources held by a thread. */ |
483 | extern void release_thread(struct task_struct *); |
484 | extern void release_thread(struct task_struct *); |
484 | 485 | ||
485 | unsigned long get_wchan(struct task_struct *p); |
486 | unsigned long get_wchan(struct task_struct *p); |
486 | 487 | ||
487 | /* |
488 | /* |
488 | * Generic CPUID function |
489 | * Generic CPUID function |
489 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx |
490 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx |
490 | * resulting in stale register contents being returned. |
491 | * resulting in stale register contents being returned. |
491 | */ |
492 | */ |
492 | static inline void cpuid(unsigned int op, |
493 | static inline void cpuid(unsigned int op, |
493 | unsigned int *eax, unsigned int *ebx, |
494 | unsigned int *eax, unsigned int *ebx, |
494 | unsigned int *ecx, unsigned int *edx) |
495 | unsigned int *ecx, unsigned int *edx) |
495 | { |
496 | { |
496 | *eax = op; |
497 | *eax = op; |
497 | *ecx = 0; |
498 | *ecx = 0; |
498 | __cpuid(eax, ebx, ecx, edx); |
499 | __cpuid(eax, ebx, ecx, edx); |
499 | } |
500 | } |
500 | 501 | ||
501 | /* Some CPUID calls want 'count' to be placed in ecx */ |
502 | /* Some CPUID calls want 'count' to be placed in ecx */ |
502 | static inline void cpuid_count(unsigned int op, int count, |
503 | static inline void cpuid_count(unsigned int op, int count, |
503 | unsigned int *eax, unsigned int *ebx, |
504 | unsigned int *eax, unsigned int *ebx, |
504 | unsigned int *ecx, unsigned int *edx) |
505 | unsigned int *ecx, unsigned int *edx) |
505 | { |
506 | { |
506 | *eax = op; |
507 | *eax = op; |
507 | *ecx = count; |
508 | *ecx = count; |
508 | __cpuid(eax, ebx, ecx, edx); |
509 | __cpuid(eax, ebx, ecx, edx); |
509 | } |
510 | } |
510 | 511 | ||
511 | /* |
512 | /* |
512 | * CPUID functions returning a single datum |
513 | * CPUID functions returning a single datum |
513 | */ |
514 | */ |
514 | static inline unsigned int cpuid_eax(unsigned int op) |
515 | static inline unsigned int cpuid_eax(unsigned int op) |
515 | { |
516 | { |
516 | unsigned int eax, ebx, ecx, edx; |
517 | unsigned int eax, ebx, ecx, edx; |
517 | 518 | ||
518 | cpuid(op, &eax, &ebx, &ecx, &edx); |
519 | cpuid(op, &eax, &ebx, &ecx, &edx); |
519 | 520 | ||
520 | return eax; |
521 | return eax; |
521 | } |
522 | } |
522 | 523 | ||
523 | static inline unsigned int cpuid_ebx(unsigned int op) |
524 | static inline unsigned int cpuid_ebx(unsigned int op) |
524 | { |
525 | { |
525 | unsigned int eax, ebx, ecx, edx; |
526 | unsigned int eax, ebx, ecx, edx; |
526 | 527 | ||
527 | cpuid(op, &eax, &ebx, &ecx, &edx); |
528 | cpuid(op, &eax, &ebx, &ecx, &edx); |
528 | 529 | ||
529 | return ebx; |
530 | return ebx; |
530 | } |
531 | } |
531 | 532 | ||
532 | static inline unsigned int cpuid_ecx(unsigned int op) |
533 | static inline unsigned int cpuid_ecx(unsigned int op) |
533 | { |
534 | { |
534 | unsigned int eax, ebx, ecx, edx; |
535 | unsigned int eax, ebx, ecx, edx; |
535 | 536 | ||
536 | cpuid(op, &eax, &ebx, &ecx, &edx); |
537 | cpuid(op, &eax, &ebx, &ecx, &edx); |
537 | 538 | ||
538 | return ecx; |
539 | return ecx; |
539 | } |
540 | } |
540 | 541 | ||
541 | static inline unsigned int cpuid_edx(unsigned int op) |
542 | static inline unsigned int cpuid_edx(unsigned int op) |
542 | { |
543 | { |
543 | unsigned int eax, ebx, ecx, edx; |
544 | unsigned int eax, ebx, ecx, edx; |
544 | 545 | ||
545 | cpuid(op, &eax, &ebx, &ecx, &edx); |
546 | cpuid(op, &eax, &ebx, &ecx, &edx); |
546 | 547 | ||
547 | return edx; |
548 | return edx; |
548 | } |
549 | } |
549 | 550 | ||
550 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
551 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
551 | static __always_inline void rep_nop(void) |
552 | static __always_inline void rep_nop(void) |
552 | { |
553 | { |
553 | asm volatile("rep; nop" ::: "memory"); |
554 | asm volatile("rep; nop" ::: "memory"); |
554 | } |
555 | } |
555 | 556 | ||
556 | static __always_inline void cpu_relax(void) |
557 | static __always_inline void cpu_relax(void) |
557 | { |
558 | { |
558 | rep_nop(); |
559 | rep_nop(); |
559 | } |
560 | } |
560 | 561 | ||
561 | #define cpu_relax_lowlatency() cpu_relax() |
562 | #define cpu_relax_lowlatency() cpu_relax() |
562 | 563 | ||
563 | /* Stop speculative execution and prefetching of modified code. */ |
564 | /* Stop speculative execution and prefetching of modified code. */ |
564 | static inline void sync_core(void) |
565 | static inline void sync_core(void) |
565 | { |
566 | { |
566 | int tmp; |
567 | int tmp; |
567 | 568 | ||
568 | #ifdef CONFIG_M486 |
569 | #ifdef CONFIG_M486 |
569 | /* |
570 | /* |
570 | * Do a CPUID if available, otherwise do a jump. The jump |
571 | * Do a CPUID if available, otherwise do a jump. The jump |
571 | * can conveniently enough be the jump around CPUID. |
572 | * can conveniently enough be the jump around CPUID. |
572 | */ |
573 | */ |
573 | asm volatile("cmpl %2,%1\n\t" |
574 | asm volatile("cmpl %2,%1\n\t" |
574 | "jl 1f\n\t" |
575 | "jl 1f\n\t" |
575 | "cpuid\n" |
576 | "cpuid\n" |
576 | "1:" |
577 | "1:" |
577 | : "=a" (tmp) |
578 | : "=a" (tmp) |
578 | : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) |
579 | : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) |
579 | : "ebx", "ecx", "edx", "memory"); |
580 | : "ebx", "ecx", "edx", "memory"); |
580 | #else |
581 | #else |
581 | /* |
582 | /* |
582 | * CPUID is a barrier to speculative execution. |
583 | * CPUID is a barrier to speculative execution. |
583 | * Prefetched instructions are automatically |
584 | * Prefetched instructions are automatically |
584 | * invalidated when modified. |
585 | * invalidated when modified. |
585 | */ |
586 | */ |
586 | asm volatile("cpuid" |
587 | asm volatile("cpuid" |
587 | : "=a" (tmp) |
588 | : "=a" (tmp) |
588 | : "0" (1) |
589 | : "0" (1) |
589 | : "ebx", "ecx", "edx", "memory"); |
590 | : "ebx", "ecx", "edx", "memory"); |
590 | #endif |
591 | #endif |
591 | } |
592 | } |
592 | 593 | ||
593 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
594 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
594 | extern void init_amd_e400_c1e_mask(void); |
595 | extern void init_amd_e400_c1e_mask(void); |
595 | 596 | ||
596 | extern unsigned long boot_option_idle_override; |
597 | extern unsigned long boot_option_idle_override; |
597 | extern bool amd_e400_c1e_detected; |
598 | extern bool amd_e400_c1e_detected; |
598 | 599 | ||
599 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
600 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
600 | IDLE_POLL}; |
601 | IDLE_POLL}; |
601 | 602 | ||
602 | extern void enable_sep_cpu(void); |
603 | extern void enable_sep_cpu(void); |
603 | extern int sysenter_setup(void); |
604 | extern int sysenter_setup(void); |
604 | 605 | ||
605 | extern void early_trap_init(void); |
606 | extern void early_trap_init(void); |
606 | void early_trap_pf_init(void); |
607 | void early_trap_pf_init(void); |
607 | 608 | ||
608 | /* Defined in head.S */ |
609 | /* Defined in head.S */ |
609 | extern struct desc_ptr early_gdt_descr; |
610 | extern struct desc_ptr early_gdt_descr; |
610 | 611 | ||
611 | extern void cpu_set_gdt(int); |
612 | extern void cpu_set_gdt(int); |
612 | extern void switch_to_new_gdt(int); |
613 | extern void switch_to_new_gdt(int); |
613 | extern void load_percpu_segment(int); |
614 | extern void load_percpu_segment(int); |
614 | extern void cpu_init(void); |
615 | extern void cpu_init(void); |
615 | 616 | ||
616 | static inline unsigned long get_debugctlmsr(void) |
617 | static inline unsigned long get_debugctlmsr(void) |
617 | { |
618 | { |
618 | unsigned long debugctlmsr = 0; |
619 | unsigned long debugctlmsr = 0; |
619 | 620 | ||
620 | #ifndef CONFIG_X86_DEBUGCTLMSR |
621 | #ifndef CONFIG_X86_DEBUGCTLMSR |
621 | if (boot_cpu_data.x86 < 6) |
622 | if (boot_cpu_data.x86 < 6) |
622 | return 0; |
623 | return 0; |
623 | #endif |
624 | #endif |
624 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); |
625 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); |
625 | 626 | ||
626 | return debugctlmsr; |
627 | return debugctlmsr; |
627 | } |
628 | } |
628 | 629 | ||
629 | static inline void update_debugctlmsr(unsigned long debugctlmsr) |
630 | static inline void update_debugctlmsr(unsigned long debugctlmsr) |
630 | { |
631 | { |
631 | #ifndef CONFIG_X86_DEBUGCTLMSR |
632 | #ifndef CONFIG_X86_DEBUGCTLMSR |
632 | if (boot_cpu_data.x86 < 6) |
633 | if (boot_cpu_data.x86 < 6) |
633 | return; |
634 | return; |
634 | #endif |
635 | #endif |
635 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); |
636 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); |
636 | } |
637 | } |
637 | 638 | ||
638 | extern void set_task_blockstep(struct task_struct *task, bool on); |
639 | extern void set_task_blockstep(struct task_struct *task, bool on); |
639 | 640 | ||
640 | /* Boot loader type from the setup header: */ |
641 | /* Boot loader type from the setup header: */ |
641 | extern int bootloader_type; |
642 | extern int bootloader_type; |
642 | extern int bootloader_version; |
643 | extern int bootloader_version; |
643 | 644 | ||
644 | extern char ignore_fpu_irq; |
645 | extern char ignore_fpu_irq; |
645 | 646 | ||
646 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 |
647 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 |
647 | #define ARCH_HAS_PREFETCHW |
648 | #define ARCH_HAS_PREFETCHW |
648 | #define ARCH_HAS_SPINLOCK_PREFETCH |
649 | #define ARCH_HAS_SPINLOCK_PREFETCH |
649 | 650 | ||
650 | #ifdef CONFIG_X86_32 |
651 | #ifdef CONFIG_X86_32 |
651 | # define BASE_PREFETCH "" |
652 | # define BASE_PREFETCH "" |
652 | # define ARCH_HAS_PREFETCH |
653 | # define ARCH_HAS_PREFETCH |
653 | #else |
654 | #else |
654 | # define BASE_PREFETCH "prefetcht0 %P1" |
655 | # define BASE_PREFETCH "prefetcht0 %P1" |
655 | #endif |
656 | #endif |
656 | 657 | ||
657 | /* |
658 | /* |
658 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) |
659 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) |
659 | * |
660 | * |
660 | * It's not worth to care about 3dnow prefetches for the K6 |
661 | * It's not worth to care about 3dnow prefetches for the K6 |
661 | * because they are microcoded there and very slow. |
662 | * because they are microcoded there and very slow. |
662 | */ |
663 | */ |
663 | static inline void prefetch(const void *x) |
664 | static inline void prefetch(const void *x) |
664 | { |
665 | { |
665 | alternative_input(BASE_PREFETCH, "prefetchnta %P1", |
666 | alternative_input(BASE_PREFETCH, "prefetchnta %P1", |
666 | X86_FEATURE_XMM, |
667 | X86_FEATURE_XMM, |
667 | "m" (*(const char *)x)); |
668 | "m" (*(const char *)x)); |
668 | } |
669 | } |
669 | 670 | ||
670 | /* |
671 | /* |
671 | * 3dnow prefetch to get an exclusive cache line. |
672 | * 3dnow prefetch to get an exclusive cache line. |
672 | * Useful for spinlocks to avoid one state transition in the |
673 | * Useful for spinlocks to avoid one state transition in the |
673 | * cache coherency protocol: |
674 | * cache coherency protocol: |
674 | */ |
675 | */ |
675 | static inline void prefetchw(const void *x) |
676 | static inline void prefetchw(const void *x) |
676 | { |
677 | { |
677 | alternative_input(BASE_PREFETCH, "prefetchw %P1", |
678 | alternative_input(BASE_PREFETCH, "prefetchw %P1", |
678 | X86_FEATURE_3DNOWPREFETCH, |
679 | X86_FEATURE_3DNOWPREFETCH, |
679 | "m" (*(const char *)x)); |
680 | "m" (*(const char *)x)); |
680 | } |
681 | } |
681 | 682 | ||
682 | static inline void spin_lock_prefetch(const void *x) |
683 | static inline void spin_lock_prefetch(const void *x) |
683 | { |
684 | { |
684 | prefetchw(x); |
685 | prefetchw(x); |
685 | } |
686 | } |
686 | 687 | ||
687 | #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ |
688 | #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ |
688 | TOP_OF_KERNEL_STACK_PADDING) |
689 | TOP_OF_KERNEL_STACK_PADDING) |
689 | 690 | ||
690 | #ifdef CONFIG_X86_32 |
691 | #ifdef CONFIG_X86_32 |
691 | /* |
692 | /* |
692 | * User space process size: 3GB (default). |
693 | * User space process size: 3GB (default). |
693 | */ |
694 | */ |
694 | #define TASK_SIZE PAGE_OFFSET |
695 | #define TASK_SIZE PAGE_OFFSET |
695 | #define TASK_SIZE_MAX TASK_SIZE |
696 | #define TASK_SIZE_MAX TASK_SIZE |
696 | #define STACK_TOP TASK_SIZE |
697 | #define STACK_TOP TASK_SIZE |
697 | #define STACK_TOP_MAX STACK_TOP |
698 | #define STACK_TOP_MAX STACK_TOP |
698 | 699 | ||
699 | #define INIT_THREAD { \ |
700 | #define INIT_THREAD { \ |
700 | .sp0 = TOP_OF_INIT_STACK, \ |
701 | .sp0 = TOP_OF_INIT_STACK, \ |
701 | .sysenter_cs = __KERNEL_CS, \ |
702 | .sysenter_cs = __KERNEL_CS, \ |
702 | .io_bitmap_ptr = NULL, \ |
703 | .io_bitmap_ptr = NULL, \ |
703 | } |
704 | } |
704 | 705 | ||
705 | extern unsigned long thread_saved_pc(struct task_struct *tsk); |
706 | extern unsigned long thread_saved_pc(struct task_struct *tsk); |
706 | 707 | ||
707 | /* |
708 | /* |
708 | * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. |
709 | * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. |
709 | * This is necessary to guarantee that the entire "struct pt_regs" |
710 | * This is necessary to guarantee that the entire "struct pt_regs" |
710 | * is accessible even if the CPU haven't stored the SS/ESP registers |
711 | * is accessible even if the CPU haven't stored the SS/ESP registers |
711 | * on the stack (interrupt gate does not save these registers |
712 | * on the stack (interrupt gate does not save these registers |
712 | * when switching to the same priv ring). |
713 | * when switching to the same priv ring). |
713 | * Therefore beware: accessing the ss/esp fields of the |
714 | * Therefore beware: accessing the ss/esp fields of the |
714 | * "struct pt_regs" is possible, but they may contain the |
715 | * "struct pt_regs" is possible, but they may contain the |
715 | * completely wrong values. |
716 | * completely wrong values. |
716 | */ |
717 | */ |
717 | #define task_pt_regs(task) \ |
718 | #define task_pt_regs(task) \ |
718 | ({ \ |
719 | ({ \ |
719 | unsigned long __ptr = (unsigned long)task_stack_page(task); \ |
720 | unsigned long __ptr = (unsigned long)task_stack_page(task); \ |
720 | __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ |
721 | __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ |
721 | ((struct pt_regs *)__ptr) - 1; \ |
722 | ((struct pt_regs *)__ptr) - 1; \ |
722 | }) |
723 | }) |
723 | 724 | ||
724 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
725 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
725 | 726 | ||
726 | #else |
727 | #else |
727 | /* |
728 | /* |
728 | * User space process size. 47bits minus one guard page. The guard |
729 | * User space process size. 47bits minus one guard page. The guard |
729 | * page is necessary on Intel CPUs: if a SYSCALL instruction is at |
730 | * page is necessary on Intel CPUs: if a SYSCALL instruction is at |
730 | * the highest possible canonical userspace address, then that |
731 | * the highest possible canonical userspace address, then that |
731 | * syscall will enter the kernel with a non-canonical return |
732 | * syscall will enter the kernel with a non-canonical return |
732 | * address, and SYSRET will explode dangerously. We avoid this |
733 | * address, and SYSRET will explode dangerously. We avoid this |
733 | * particular problem by preventing anything from being mapped |
734 | * particular problem by preventing anything from being mapped |
734 | * at the maximum canonical address. |
735 | * at the maximum canonical address. |
735 | */ |
736 | */ |
736 | #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) |
737 | #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) |
737 | 738 | ||
738 | /* This decides where the kernel will search for a free chunk of vm |
739 | /* This decides where the kernel will search for a free chunk of vm |
739 | * space during mmap's. |
740 | * space during mmap's. |
740 | */ |
741 | */ |
741 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
742 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
742 | 0xc0000000 : 0xFFFFe000) |
743 | 0xc0000000 : 0xFFFFe000) |
743 | 744 | ||
744 | #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ |
745 | #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ |
745 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
746 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
746 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ |
747 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ |
747 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
748 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
748 | 749 | ||
749 | #define STACK_TOP TASK_SIZE |
750 | #define STACK_TOP TASK_SIZE |
750 | #define STACK_TOP_MAX TASK_SIZE_MAX |
751 | #define STACK_TOP_MAX TASK_SIZE_MAX |
751 | 752 | ||
752 | #define INIT_THREAD { \ |
753 | #define INIT_THREAD { \ |
753 | .sp0 = TOP_OF_INIT_STACK \ |
754 | .sp0 = TOP_OF_INIT_STACK \ |
754 | } |
755 | } |
755 | 756 | ||
756 | /* |
757 | /* |
757 | * Return saved PC of a blocked thread. |
758 | * Return saved PC of a blocked thread. |
758 | * What is this good for? it will be always the scheduler or ret_from_fork. |
759 | * What is this good for? it will be always the scheduler or ret_from_fork. |
759 | */ |
760 | */ |
760 | #define thread_saved_pc(t) READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8)) |
761 | #define thread_saved_pc(t) READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8)) |
761 | 762 | ||
762 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
763 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
763 | extern unsigned long KSTK_ESP(struct task_struct *task); |
764 | extern unsigned long KSTK_ESP(struct task_struct *task); |
764 | 765 | ||
765 | #endif /* CONFIG_X86_64 */ |
766 | #endif /* CONFIG_X86_64 */ |
766 | 767 | ||
767 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
768 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
768 | unsigned long new_sp); |
769 | unsigned long new_sp); |
769 | 770 | ||
770 | /* |
771 | /* |
771 | * This decides where the kernel will search for a free chunk of vm |
772 | * This decides where the kernel will search for a free chunk of vm |
772 | * space during mmap's. |
773 | * space during mmap's. |
773 | */ |
774 | */ |
774 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) |
775 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) |
775 | 776 | ||
776 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) |
777 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) |
777 | 778 | ||
778 | /* Get/set a process' ability to use the timestamp counter instruction */ |
779 | /* Get/set a process' ability to use the timestamp counter instruction */ |
779 | #define GET_TSC_CTL(adr) get_tsc_mode((adr)) |
780 | #define GET_TSC_CTL(adr) get_tsc_mode((adr)) |
780 | #define SET_TSC_CTL(val) set_tsc_mode((val)) |
781 | #define SET_TSC_CTL(val) set_tsc_mode((val)) |
781 | 782 | ||
782 | extern int get_tsc_mode(unsigned long adr); |
783 | extern int get_tsc_mode(unsigned long adr); |
783 | extern int set_tsc_mode(unsigned int val); |
784 | extern int set_tsc_mode(unsigned int val); |
784 | 785 | ||
785 | /* Register/unregister a process' MPX related resource */ |
786 | /* Register/unregister a process' MPX related resource */ |
786 | #define MPX_ENABLE_MANAGEMENT() mpx_enable_management() |
787 | #define MPX_ENABLE_MANAGEMENT() mpx_enable_management() |
787 | #define MPX_DISABLE_MANAGEMENT() mpx_disable_management() |
788 | #define MPX_DISABLE_MANAGEMENT() mpx_disable_management() |
788 | 789 | ||
789 | #ifdef CONFIG_X86_INTEL_MPX |
790 | #ifdef CONFIG_X86_INTEL_MPX |
790 | extern int mpx_enable_management(void); |
791 | extern int mpx_enable_management(void); |
791 | extern int mpx_disable_management(void); |
792 | extern int mpx_disable_management(void); |
792 | #else |
793 | #else |
793 | static inline int mpx_enable_management(void) |
794 | static inline int mpx_enable_management(void) |
794 | { |
795 | { |
795 | return -EINVAL; |
796 | return -EINVAL; |
796 | } |
797 | } |
797 | static inline int mpx_disable_management(void) |
798 | static inline int mpx_disable_management(void) |
798 | { |
799 | { |
799 | return -EINVAL; |
800 | return -EINVAL; |
800 | } |
801 | } |
801 | #endif /* CONFIG_X86_INTEL_MPX */ |
802 | #endif /* CONFIG_X86_INTEL_MPX */ |
802 | 803 | ||
803 | extern u16 amd_get_nb_id(int cpu); |
804 | extern u16 amd_get_nb_id(int cpu); |
804 | extern u32 amd_get_nodes_per_socket(void); |
805 | extern u32 amd_get_nodes_per_socket(void); |
805 | 806 | ||
806 | static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) |
807 | static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) |
807 | { |
808 | { |
808 | uint32_t base, eax, signature[3]; |
809 | uint32_t base, eax, signature[3]; |
809 | 810 | ||
810 | for (base = 0x40000000; base < 0x40010000; base += 0x100) { |
811 | for (base = 0x40000000; base < 0x40010000; base += 0x100) { |
811 | cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); |
812 | cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); |
812 | 813 | ||
813 | if (!memcmp(sig, signature, 12) && |
814 | if (!memcmp(sig, signature, 12) && |
814 | (leaves == 0 || ((eax - base) >= leaves))) |
815 | (leaves == 0 || ((eax - base) >= leaves))) |
815 | return base; |
816 | return base; |
816 | } |
817 | } |
817 | 818 | ||
818 | return 0; |
819 | return 0; |
819 | } |
820 | } |
820 | 821 | ||
821 | extern unsigned long arch_align_stack(unsigned long sp); |
822 | extern unsigned long arch_align_stack(unsigned long sp); |
822 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
823 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
823 | 824 | ||
824 | void default_idle(void); |
825 | void default_idle(void); |
825 | #ifdef CONFIG_XEN |
826 | #ifdef CONFIG_XEN |
826 | bool xen_set_default_idle(void); |
827 | bool xen_set_default_idle(void); |
827 | #else |
828 | #else |
828 | #define xen_set_default_idle 0 |
829 | #define xen_set_default_idle 0 |
829 | #endif |
830 | #endif |
830 | 831 | ||
831 | void stop_this_cpu(void *dummy); |
832 | void stop_this_cpu(void *dummy); |
832 | void df_debug(struct pt_regs *regs, long error_code); |
833 | void df_debug(struct pt_regs *regs, long error_code); |
833 | #endif /* _ASM_X86_PROCESSOR_H */>><>>>><>><> |
834 | #endif /* _ASM_X86_PROCESSOR_H */>><>>>><>><> |