Rev 6936 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5270 | serge | 1 | #ifndef _ASM_X86_MSR_H |
2 | #define _ASM_X86_MSR_H |
||
3 | |||
6082 | serge | 4 | #include "msr-index.h" |
5270 | serge | 5 | |
6 | #ifndef __ASSEMBLY__ |
||
7 | |||
8 | #include |
||
9 | #include |
||
10 | #include |
||
6082 | serge | 11 | #include |
5270 | serge | 12 | |
13 | struct msr { |
||
14 | union { |
||
15 | struct { |
||
16 | u32 l; |
||
17 | u32 h; |
||
18 | }; |
||
19 | u64 q; |
||
20 | }; |
||
21 | }; |
||
22 | |||
23 | struct msr_info { |
||
24 | u32 msr_no; |
||
25 | struct msr reg; |
||
26 | struct msr *msrs; |
||
27 | int err; |
||
28 | }; |
||
29 | |||
30 | struct msr_regs_info { |
||
31 | u32 *regs; |
||
32 | int err; |
||
33 | }; |
||
34 | |||
6936 | serge | 35 | struct saved_msr { |
36 | bool valid; |
||
37 | struct msr_info info; |
||
38 | }; |
||
39 | |||
40 | struct saved_msrs { |
||
41 | unsigned int num; |
||
42 | struct saved_msr *array; |
||
43 | }; |
||
44 | |||
5270 | serge | 45 | /* |
46 | * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" |
||
47 | * constraint has different meanings. For i386, "A" means exactly |
||
48 | * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, |
||
49 | * it means rax *or* rdx. |
||
50 | */ |
||
51 | #ifdef CONFIG_X86_64 |
||
6082 | serge | 52 | /* Using 64-bit values saves one instruction clearing the high half of low */ |
53 | #define DECLARE_ARGS(val, low, high) unsigned long low, high |
||
54 | #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) |
||
5270 | serge | 55 | #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) |
56 | #else |
||
57 | #define DECLARE_ARGS(val, low, high) unsigned long long val |
||
58 | #define EAX_EDX_VAL(val, low, high) (val) |
||
59 | #define EAX_EDX_RET(val, low, high) "=A" (val) |
||
60 | #endif |
||
61 | |||
7143 | serge | 62 | #ifdef CONFIG_TRACEPOINTS |
63 | /* |
||
64 | * Be very careful with includes. This header is prone to include loops. |
||
65 | */ |
||
66 | #include |
||
67 | #include |
||
68 | |||
69 | extern struct tracepoint __tracepoint_read_msr; |
||
70 | extern struct tracepoint __tracepoint_write_msr; |
||
71 | extern struct tracepoint __tracepoint_rdpmc; |
||
72 | #define msr_tracepoint_active(t) static_key_false(&(t).key) |
||
73 | extern void do_trace_write_msr(unsigned msr, u64 val, int failed); |
||
74 | extern void do_trace_read_msr(unsigned msr, u64 val, int failed); |
||
75 | extern void do_trace_rdpmc(unsigned msr, u64 val, int failed); |
||
76 | #else |
||
77 | #define msr_tracepoint_active(t) false |
||
78 | static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {} |
||
79 | static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {} |
||
80 | static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {} |
||
81 | #endif |
||
82 | |||
5270 | serge | 83 | static inline unsigned long long native_read_msr(unsigned int msr) |
84 | { |
||
85 | DECLARE_ARGS(val, low, high); |
||
86 | |||
87 | asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); |
||
7143 | serge | 88 | if (msr_tracepoint_active(__tracepoint_read_msr)) |
89 | do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0); |
||
5270 | serge | 90 | return EAX_EDX_VAL(val, low, high); |
91 | } |
||
92 | |||
93 | static inline unsigned long long native_read_msr_safe(unsigned int msr, |
||
94 | int *err) |
||
95 | { |
||
96 | DECLARE_ARGS(val, low, high); |
||
97 | |||
98 | asm volatile("2: rdmsr ; xor %[err],%[err]\n" |
||
99 | "1:\n\t" |
||
100 | ".section .fixup,\"ax\"\n\t" |
||
101 | "3: mov %[fault],%[err] ; jmp 1b\n\t" |
||
102 | ".previous\n\t" |
||
103 | _ASM_EXTABLE(2b, 3b) |
||
104 | : [err] "=r" (*err), EAX_EDX_RET(val, low, high) |
||
105 | : "c" (msr), [fault] "i" (-EIO)); |
||
7143 | serge | 106 | if (msr_tracepoint_active(__tracepoint_read_msr)) |
107 | do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); |
||
5270 | serge | 108 | return EAX_EDX_VAL(val, low, high); |
109 | } |
||
110 | |||
111 | static inline void native_write_msr(unsigned int msr, |
||
112 | unsigned low, unsigned high) |
||
113 | { |
||
114 | asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); |
||
7143 | serge | 115 | if (msr_tracepoint_active(__tracepoint_write_msr)) |
116 | do_trace_write_msr(msr, ((u64)high << 32 | low), 0); |
||
5270 | serge | 117 | } |
118 | |||
119 | /* Can be uninlined because referenced by paravirt */ |
||
120 | notrace static inline int native_write_msr_safe(unsigned int msr, |
||
121 | unsigned low, unsigned high) |
||
122 | { |
||
123 | int err; |
||
124 | asm volatile("2: wrmsr ; xor %[err],%[err]\n" |
||
125 | "1:\n\t" |
||
126 | ".section .fixup,\"ax\"\n\t" |
||
127 | "3: mov %[fault],%[err] ; jmp 1b\n\t" |
||
128 | ".previous\n\t" |
||
129 | _ASM_EXTABLE(2b, 3b) |
||
130 | : [err] "=a" (err) |
||
131 | : "c" (msr), "0" (low), "d" (high), |
||
132 | [fault] "i" (-EIO) |
||
133 | : "memory"); |
||
7143 | serge | 134 | if (msr_tracepoint_active(__tracepoint_write_msr)) |
135 | do_trace_write_msr(msr, ((u64)high << 32 | low), err); |
||
5270 | serge | 136 | return err; |
137 | } |
||
138 | |||
139 | extern int rdmsr_safe_regs(u32 regs[8]); |
||
140 | extern int wrmsr_safe_regs(u32 regs[8]); |
||
141 | |||
6082 | serge | 142 | /** |
143 | * rdtsc() - returns the current TSC without ordering constraints |
||
144 | * |
||
145 | * rdtsc() returns the result of RDTSC as a 64-bit integer. The |
||
146 | * only ordering constraint it supplies is the ordering implied by |
||
147 | * "asm volatile": it will put the RDTSC in the place you expect. The |
||
148 | * CPU can and will speculatively execute that RDTSC, though, so the |
||
149 | * results can be non-monotonic if compared on different CPUs. |
||
150 | */ |
||
151 | static __always_inline unsigned long long rdtsc(void) |
||
5270 | serge | 152 | { |
153 | DECLARE_ARGS(val, low, high); |
||
154 | |||
155 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); |
||
156 | |||
157 | return EAX_EDX_VAL(val, low, high); |
||
158 | } |
||
159 | |||
7143 | serge | 160 | /** |
161 | * rdtsc_ordered() - read the current TSC in program order |
||
162 | * |
||
163 | * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer. |
||
164 | * It is ordered like a load to a global in-memory counter. It should |
||
165 | * be impossible to observe non-monotonic rdtsc_unordered() behavior |
||
166 | * across multiple CPUs as long as the TSC is synced. |
||
167 | */ |
||
168 | static __always_inline unsigned long long rdtsc_ordered(void) |
||
169 | { |
||
170 | /* |
||
171 | * The RDTSC instruction is not ordered relative to memory |
||
172 | * access. The Intel SDM and the AMD APM are both vague on this |
||
173 | * point, but empirically an RDTSC instruction can be |
||
174 | * speculatively executed before prior loads. An RDTSC |
||
175 | * immediately after an appropriate barrier appears to be |
||
176 | * ordered as a normal load, that is, it provides the same |
||
177 | * ordering guarantees as reading from a global memory location |
||
178 | * that some other imaginary CPU is updating continuously with a |
||
179 | * time stamp. |
||
180 | */ |
||
181 | alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, |
||
182 | "lfence", X86_FEATURE_LFENCE_RDTSC); |
||
183 | return rdtsc(); |
||
184 | } |
||
185 | |||
186 | /* Deprecated, keep it for a cycle for easier merging: */ |
||
187 | #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0) |
||
188 | |||
5270 | serge | 189 | static inline unsigned long long native_read_pmc(int counter) |
190 | { |
||
191 | DECLARE_ARGS(val, low, high); |
||
192 | |||
193 | asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); |
||
7143 | serge | 194 | if (msr_tracepoint_active(__tracepoint_rdpmc)) |
195 | do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); |
||
5270 | serge | 196 | return EAX_EDX_VAL(val, low, high); |
197 | } |
||
198 | |||
199 | #ifdef CONFIG_PARAVIRT |
||
200 | #include |
||
201 | #else |
||
202 | #include |
||
203 | /* |
||
204 | * Access to machine-specific registers (available on 586 and better only) |
||
205 | * Note: the rd* operations modify the parameters directly (without using |
||
206 | * pointer indirection), this allows gcc to optimize better |
||
207 | */ |
||
208 | |||
209 | #define rdmsr(msr, low, high) \ |
||
210 | do { \ |
||
211 | u64 __val = native_read_msr((msr)); \ |
||
212 | (void)((low) = (u32)__val); \ |
||
213 | (void)((high) = (u32)(__val >> 32)); \ |
||
214 | } while (0) |
||
215 | |||
216 | static inline void wrmsr(unsigned msr, unsigned low, unsigned high) |
||
217 | { |
||
218 | native_write_msr(msr, low, high); |
||
219 | } |
||
220 | |||
221 | #define rdmsrl(msr, val) \ |
||
222 | ((val) = native_read_msr((msr))) |
||
223 | |||
6082 | serge | 224 | static inline void wrmsrl(unsigned msr, u64 val) |
225 | { |
||
6936 | serge | 226 | native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); |
6082 | serge | 227 | } |
5270 | serge | 228 | |
229 | /* wrmsr with exception handling */ |
||
230 | static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) |
||
231 | { |
||
232 | return native_write_msr_safe(msr, low, high); |
||
233 | } |
||
234 | |||
235 | /* rdmsr with exception handling */ |
||
236 | #define rdmsr_safe(msr, low, high) \ |
||
237 | ({ \ |
||
238 | int __err; \ |
||
239 | u64 __val = native_read_msr_safe((msr), &__err); \ |
||
240 | (*low) = (u32)__val; \ |
||
241 | (*high) = (u32)(__val >> 32); \ |
||
242 | __err; \ |
||
243 | }) |
||
244 | |||
245 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
||
246 | { |
||
247 | int err; |
||
248 | |||
249 | *p = native_read_msr_safe(msr, &err); |
||
250 | return err; |
||
251 | } |
||
252 | |||
253 | #define rdpmc(counter, low, high) \ |
||
254 | do { \ |
||
255 | u64 _l = native_read_pmc((counter)); \ |
||
256 | (low) = (u32)_l; \ |
||
257 | (high) = (u32)(_l >> 32); \ |
||
258 | } while (0) |
||
259 | |||
260 | #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) |
||
261 | |||
262 | #endif /* !CONFIG_PARAVIRT */ |
||
263 | |||
6082 | serge | 264 | /* |
265 | * 64-bit version of wrmsr_safe(): |
||
266 | */ |
||
267 | static inline int wrmsrl_safe(u32 msr, u64 val) |
||
268 | { |
||
269 | return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); |
||
270 | } |
||
5270 | serge | 271 | |
272 | #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) |
||
273 | |||
274 | #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) |
||
275 | |||
276 | struct msr *msrs_alloc(void); |
||
277 | void msrs_free(struct msr *msrs); |
||
278 | int msr_set_bit(u32 msr, u8 bit); |
||
279 | int msr_clear_bit(u32 msr, u8 bit); |
||
280 | |||
281 | #ifdef CONFIG_SMP |
||
282 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
||
283 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
||
284 | int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); |
||
285 | int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); |
||
286 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
||
287 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
||
288 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
||
289 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
||
290 | int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); |
||
291 | int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); |
||
292 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
||
293 | int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
||
294 | #else /* CONFIG_SMP */ |
||
295 | static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
||
296 | { |
||
297 | rdmsr(msr_no, *l, *h); |
||
298 | return 0; |
||
299 | } |
||
300 | static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
||
301 | { |
||
302 | wrmsr(msr_no, l, h); |
||
303 | return 0; |
||
304 | } |
||
305 | static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) |
||
306 | { |
||
307 | rdmsrl(msr_no, *q); |
||
308 | return 0; |
||
309 | } |
||
310 | static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) |
||
311 | { |
||
312 | wrmsrl(msr_no, q); |
||
313 | return 0; |
||
314 | } |
||
315 | static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
||
316 | struct msr *msrs) |
||
317 | { |
||
318 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); |
||
319 | } |
||
320 | static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
||
321 | struct msr *msrs) |
||
322 | { |
||
323 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); |
||
324 | } |
||
325 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, |
||
326 | u32 *l, u32 *h) |
||
327 | { |
||
328 | return rdmsr_safe(msr_no, l, h); |
||
329 | } |
||
330 | static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
||
331 | { |
||
332 | return wrmsr_safe(msr_no, l, h); |
||
333 | } |
||
334 | static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) |
||
335 | { |
||
336 | return rdmsrl_safe(msr_no, q); |
||
337 | } |
||
338 | static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) |
||
339 | { |
||
340 | return wrmsrl_safe(msr_no, q); |
||
341 | } |
||
342 | static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) |
||
343 | { |
||
344 | return rdmsr_safe_regs(regs); |
||
345 | } |
||
346 | static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) |
||
347 | { |
||
348 | return wrmsr_safe_regs(regs); |
||
349 | } |
||
350 | #endif /* CONFIG_SMP */ |
||
351 | #endif /* __ASSEMBLY__ */ |
||
352 | #endif /* _ASM_X86_MSR_H */><>><>><> |