Rev 6934 | Rev 7143 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6934 | Rev 6936 | ||
---|---|---|---|
1 | #ifndef _ASM_X86_MSR_H |
1 | #ifndef _ASM_X86_MSR_H |
2 | #define _ASM_X86_MSR_H |
2 | #define _ASM_X86_MSR_H |
3 | 3 | ||
4 | #include "msr-index.h" |
4 | #include "msr-index.h" |
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ |
6 | #ifndef __ASSEMBLY__ |
7 | 7 | ||
8 | #include |
8 | #include |
9 | #include |
9 | #include |
10 | #include |
10 | #include |
11 | #include |
11 | #include |
12 | 12 | ||
13 | struct msr { |
13 | struct msr { |
14 | union { |
14 | union { |
15 | struct { |
15 | struct { |
16 | u32 l; |
16 | u32 l; |
17 | u32 h; |
17 | u32 h; |
18 | }; |
18 | }; |
19 | u64 q; |
19 | u64 q; |
20 | }; |
20 | }; |
21 | }; |
21 | }; |
22 | 22 | ||
23 | struct msr_info { |
23 | struct msr_info { |
24 | u32 msr_no; |
24 | u32 msr_no; |
25 | struct msr reg; |
25 | struct msr reg; |
26 | struct msr *msrs; |
26 | struct msr *msrs; |
27 | int err; |
27 | int err; |
28 | }; |
28 | }; |
29 | 29 | ||
30 | struct msr_regs_info { |
30 | struct msr_regs_info { |
31 | u32 *regs; |
31 | u32 *regs; |
32 | int err; |
32 | int err; |
33 | }; |
33 | }; |
- | 34 | ||
- | 35 | struct saved_msr { |
|
- | 36 | bool valid; |
|
- | 37 | struct msr_info info; |
|
- | 38 | }; |
|
- | 39 | ||
- | 40 | struct saved_msrs { |
|
- | 41 | unsigned int num; |
|
- | 42 | struct saved_msr *array; |
|
- | 43 | }; |
|
34 | 44 | ||
35 | static inline unsigned long long native_read_tscp(unsigned int *aux) |
45 | static inline unsigned long long native_read_tscp(unsigned int *aux) |
36 | { |
46 | { |
37 | unsigned long low, high; |
47 | unsigned long low, high; |
38 | asm volatile(".byte 0x0f,0x01,0xf9" |
48 | asm volatile(".byte 0x0f,0x01,0xf9" |
39 | : "=a" (low), "=d" (high), "=c" (*aux)); |
49 | : "=a" (low), "=d" (high), "=c" (*aux)); |
40 | return low | ((u64)high << 32); |
50 | return low | ((u64)high << 32); |
41 | } |
51 | } |
42 | 52 | ||
43 | /* |
53 | /* |
44 | * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" |
54 | * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" |
45 | * constraint has different meanings. For i386, "A" means exactly |
55 | * constraint has different meanings. For i386, "A" means exactly |
46 | * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, |
56 | * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, |
47 | * it means rax *or* rdx. |
57 | * it means rax *or* rdx. |
48 | */ |
58 | */ |
49 | #ifdef CONFIG_X86_64 |
59 | #ifdef CONFIG_X86_64 |
50 | /* Using 64-bit values saves one instruction clearing the high half of low */ |
60 | /* Using 64-bit values saves one instruction clearing the high half of low */ |
51 | #define DECLARE_ARGS(val, low, high) unsigned long low, high |
61 | #define DECLARE_ARGS(val, low, high) unsigned long low, high |
52 | #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) |
62 | #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) |
53 | #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) |
63 | #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) |
54 | #else |
64 | #else |
55 | #define DECLARE_ARGS(val, low, high) unsigned long long val |
65 | #define DECLARE_ARGS(val, low, high) unsigned long long val |
56 | #define EAX_EDX_VAL(val, low, high) (val) |
66 | #define EAX_EDX_VAL(val, low, high) (val) |
57 | #define EAX_EDX_RET(val, low, high) "=A" (val) |
67 | #define EAX_EDX_RET(val, low, high) "=A" (val) |
58 | #endif |
68 | #endif |
59 | 69 | ||
60 | static inline unsigned long long native_read_msr(unsigned int msr) |
70 | static inline unsigned long long native_read_msr(unsigned int msr) |
61 | { |
71 | { |
62 | DECLARE_ARGS(val, low, high); |
72 | DECLARE_ARGS(val, low, high); |
63 | 73 | ||
64 | asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); |
74 | asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); |
65 | return EAX_EDX_VAL(val, low, high); |
75 | return EAX_EDX_VAL(val, low, high); |
66 | } |
76 | } |
67 | 77 | ||
68 | static inline unsigned long long native_read_msr_safe(unsigned int msr, |
78 | static inline unsigned long long native_read_msr_safe(unsigned int msr, |
69 | int *err) |
79 | int *err) |
70 | { |
80 | { |
71 | DECLARE_ARGS(val, low, high); |
81 | DECLARE_ARGS(val, low, high); |
72 | 82 | ||
73 | asm volatile("2: rdmsr ; xor %[err],%[err]\n" |
83 | asm volatile("2: rdmsr ; xor %[err],%[err]\n" |
74 | "1:\n\t" |
84 | "1:\n\t" |
75 | ".section .fixup,\"ax\"\n\t" |
85 | ".section .fixup,\"ax\"\n\t" |
76 | "3: mov %[fault],%[err] ; jmp 1b\n\t" |
86 | "3: mov %[fault],%[err] ; jmp 1b\n\t" |
77 | ".previous\n\t" |
87 | ".previous\n\t" |
78 | _ASM_EXTABLE(2b, 3b) |
88 | _ASM_EXTABLE(2b, 3b) |
79 | : [err] "=r" (*err), EAX_EDX_RET(val, low, high) |
89 | : [err] "=r" (*err), EAX_EDX_RET(val, low, high) |
80 | : "c" (msr), [fault] "i" (-EIO)); |
90 | : "c" (msr), [fault] "i" (-EIO)); |
81 | return EAX_EDX_VAL(val, low, high); |
91 | return EAX_EDX_VAL(val, low, high); |
82 | } |
92 | } |
83 | 93 | ||
84 | static inline void native_write_msr(unsigned int msr, |
94 | static inline void native_write_msr(unsigned int msr, |
85 | unsigned low, unsigned high) |
95 | unsigned low, unsigned high) |
86 | { |
96 | { |
87 | asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); |
97 | asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); |
88 | } |
98 | } |
89 | 99 | ||
90 | /* Can be uninlined because referenced by paravirt */ |
100 | /* Can be uninlined because referenced by paravirt */ |
91 | notrace static inline int native_write_msr_safe(unsigned int msr, |
101 | notrace static inline int native_write_msr_safe(unsigned int msr, |
92 | unsigned low, unsigned high) |
102 | unsigned low, unsigned high) |
93 | { |
103 | { |
94 | int err; |
104 | int err; |
95 | asm volatile("2: wrmsr ; xor %[err],%[err]\n" |
105 | asm volatile("2: wrmsr ; xor %[err],%[err]\n" |
96 | "1:\n\t" |
106 | "1:\n\t" |
97 | ".section .fixup,\"ax\"\n\t" |
107 | ".section .fixup,\"ax\"\n\t" |
98 | "3: mov %[fault],%[err] ; jmp 1b\n\t" |
108 | "3: mov %[fault],%[err] ; jmp 1b\n\t" |
99 | ".previous\n\t" |
109 | ".previous\n\t" |
100 | _ASM_EXTABLE(2b, 3b) |
110 | _ASM_EXTABLE(2b, 3b) |
101 | : [err] "=a" (err) |
111 | : [err] "=a" (err) |
102 | : "c" (msr), "0" (low), "d" (high), |
112 | : "c" (msr), "0" (low), "d" (high), |
103 | [fault] "i" (-EIO) |
113 | [fault] "i" (-EIO) |
104 | : "memory"); |
114 | : "memory"); |
105 | return err; |
115 | return err; |
106 | } |
116 | } |
107 | 117 | ||
108 | extern int rdmsr_safe_regs(u32 regs[8]); |
118 | extern int rdmsr_safe_regs(u32 regs[8]); |
109 | extern int wrmsr_safe_regs(u32 regs[8]); |
119 | extern int wrmsr_safe_regs(u32 regs[8]); |
110 | 120 | ||
111 | /** |
121 | /** |
112 | * rdtsc() - returns the current TSC without ordering constraints |
122 | * rdtsc() - returns the current TSC without ordering constraints |
113 | * |
123 | * |
114 | * rdtsc() returns the result of RDTSC as a 64-bit integer. The |
124 | * rdtsc() returns the result of RDTSC as a 64-bit integer. The |
115 | * only ordering constraint it supplies is the ordering implied by |
125 | * only ordering constraint it supplies is the ordering implied by |
116 | * "asm volatile": it will put the RDTSC in the place you expect. The |
126 | * "asm volatile": it will put the RDTSC in the place you expect. The |
117 | * CPU can and will speculatively execute that RDTSC, though, so the |
127 | * CPU can and will speculatively execute that RDTSC, though, so the |
118 | * results can be non-monotonic if compared on different CPUs. |
128 | * results can be non-monotonic if compared on different CPUs. |
119 | */ |
129 | */ |
120 | static __always_inline unsigned long long rdtsc(void) |
130 | static __always_inline unsigned long long rdtsc(void) |
121 | { |
131 | { |
122 | DECLARE_ARGS(val, low, high); |
132 | DECLARE_ARGS(val, low, high); |
123 | 133 | ||
124 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); |
134 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); |
125 | 135 | ||
126 | return EAX_EDX_VAL(val, low, high); |
136 | return EAX_EDX_VAL(val, low, high); |
127 | } |
137 | } |
128 | 138 | ||
129 | static inline unsigned long long native_read_pmc(int counter) |
139 | static inline unsigned long long native_read_pmc(int counter) |
130 | { |
140 | { |
131 | DECLARE_ARGS(val, low, high); |
141 | DECLARE_ARGS(val, low, high); |
132 | 142 | ||
133 | asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); |
143 | asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); |
134 | return EAX_EDX_VAL(val, low, high); |
144 | return EAX_EDX_VAL(val, low, high); |
135 | } |
145 | } |
136 | 146 | ||
137 | #ifdef CONFIG_PARAVIRT |
147 | #ifdef CONFIG_PARAVIRT |
138 | #include |
148 | #include |
139 | #else |
149 | #else |
140 | #include |
150 | #include |
141 | /* |
151 | /* |
142 | * Access to machine-specific registers (available on 586 and better only) |
152 | * Access to machine-specific registers (available on 586 and better only) |
143 | * Note: the rd* operations modify the parameters directly (without using |
153 | * Note: the rd* operations modify the parameters directly (without using |
144 | * pointer indirection), this allows gcc to optimize better |
154 | * pointer indirection), this allows gcc to optimize better |
145 | */ |
155 | */ |
146 | 156 | ||
147 | #define rdmsr(msr, low, high) \ |
157 | #define rdmsr(msr, low, high) \ |
148 | do { \ |
158 | do { \ |
149 | u64 __val = native_read_msr((msr)); \ |
159 | u64 __val = native_read_msr((msr)); \ |
150 | (void)((low) = (u32)__val); \ |
160 | (void)((low) = (u32)__val); \ |
151 | (void)((high) = (u32)(__val >> 32)); \ |
161 | (void)((high) = (u32)(__val >> 32)); \ |
152 | } while (0) |
162 | } while (0) |
153 | 163 | ||
154 | static inline void wrmsr(unsigned msr, unsigned low, unsigned high) |
164 | static inline void wrmsr(unsigned msr, unsigned low, unsigned high) |
155 | { |
165 | { |
156 | native_write_msr(msr, low, high); |
166 | native_write_msr(msr, low, high); |
157 | } |
167 | } |
158 | 168 | ||
159 | #define rdmsrl(msr, val) \ |
169 | #define rdmsrl(msr, val) \ |
160 | ((val) = native_read_msr((msr))) |
170 | ((val) = native_read_msr((msr))) |
161 | 171 | ||
162 | static inline void wrmsrl(unsigned msr, u64 val) |
172 | static inline void wrmsrl(unsigned msr, u64 val) |
163 | { |
173 | { |
164 | native_write_msr(msr, (u32)val, (u32)(val >> 32)); |
174 | native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); |
165 | } |
175 | } |
166 | 176 | ||
167 | /* wrmsr with exception handling */ |
177 | /* wrmsr with exception handling */ |
168 | static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) |
178 | static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) |
169 | { |
179 | { |
170 | return native_write_msr_safe(msr, low, high); |
180 | return native_write_msr_safe(msr, low, high); |
171 | } |
181 | } |
172 | 182 | ||
173 | /* rdmsr with exception handling */ |
183 | /* rdmsr with exception handling */ |
174 | #define rdmsr_safe(msr, low, high) \ |
184 | #define rdmsr_safe(msr, low, high) \ |
175 | ({ \ |
185 | ({ \ |
176 | int __err; \ |
186 | int __err; \ |
177 | u64 __val = native_read_msr_safe((msr), &__err); \ |
187 | u64 __val = native_read_msr_safe((msr), &__err); \ |
178 | (*low) = (u32)__val; \ |
188 | (*low) = (u32)__val; \ |
179 | (*high) = (u32)(__val >> 32); \ |
189 | (*high) = (u32)(__val >> 32); \ |
180 | __err; \ |
190 | __err; \ |
181 | }) |
191 | }) |
182 | 192 | ||
183 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
193 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
184 | { |
194 | { |
185 | int err; |
195 | int err; |
186 | 196 | ||
187 | *p = native_read_msr_safe(msr, &err); |
197 | *p = native_read_msr_safe(msr, &err); |
188 | return err; |
198 | return err; |
189 | } |
199 | } |
190 | 200 | ||
191 | #define rdpmc(counter, low, high) \ |
201 | #define rdpmc(counter, low, high) \ |
192 | do { \ |
202 | do { \ |
193 | u64 _l = native_read_pmc((counter)); \ |
203 | u64 _l = native_read_pmc((counter)); \ |
194 | (low) = (u32)_l; \ |
204 | (low) = (u32)_l; \ |
195 | (high) = (u32)(_l >> 32); \ |
205 | (high) = (u32)(_l >> 32); \ |
196 | } while (0) |
206 | } while (0) |
197 | 207 | ||
198 | #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) |
208 | #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) |
199 | 209 | ||
200 | #endif /* !CONFIG_PARAVIRT */ |
210 | #endif /* !CONFIG_PARAVIRT */ |
201 | 211 | ||
202 | /* |
212 | /* |
203 | * 64-bit version of wrmsr_safe(): |
213 | * 64-bit version of wrmsr_safe(): |
204 | */ |
214 | */ |
205 | static inline int wrmsrl_safe(u32 msr, u64 val) |
215 | static inline int wrmsrl_safe(u32 msr, u64 val) |
206 | { |
216 | { |
207 | return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); |
217 | return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); |
208 | } |
218 | } |
209 | 219 | ||
210 | #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) |
220 | #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) |
211 | 221 | ||
212 | #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) |
222 | #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) |
213 | 223 | ||
214 | struct msr *msrs_alloc(void); |
224 | struct msr *msrs_alloc(void); |
215 | void msrs_free(struct msr *msrs); |
225 | void msrs_free(struct msr *msrs); |
216 | int msr_set_bit(u32 msr, u8 bit); |
226 | int msr_set_bit(u32 msr, u8 bit); |
217 | int msr_clear_bit(u32 msr, u8 bit); |
227 | int msr_clear_bit(u32 msr, u8 bit); |
218 | 228 | ||
219 | #ifdef CONFIG_SMP |
229 | #ifdef CONFIG_SMP |
220 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
230 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
221 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
231 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
222 | int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); |
232 | int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); |
223 | int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); |
233 | int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); |
224 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
234 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
225 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
235 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
226 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
236 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
227 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
237 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
228 | int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); |
238 | int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); |
229 | int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); |
239 | int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); |
230 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
240 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
231 | int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
241 | int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
232 | #else /* CONFIG_SMP */ |
242 | #else /* CONFIG_SMP */ |
233 | static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
243 | static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
234 | { |
244 | { |
235 | rdmsr(msr_no, *l, *h); |
245 | rdmsr(msr_no, *l, *h); |
236 | return 0; |
246 | return 0; |
237 | } |
247 | } |
238 | static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
248 | static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
239 | { |
249 | { |
240 | wrmsr(msr_no, l, h); |
250 | wrmsr(msr_no, l, h); |
241 | return 0; |
251 | return 0; |
242 | } |
252 | } |
243 | static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) |
253 | static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) |
244 | { |
254 | { |
245 | rdmsrl(msr_no, *q); |
255 | rdmsrl(msr_no, *q); |
246 | return 0; |
256 | return 0; |
247 | } |
257 | } |
248 | static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) |
258 | static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) |
249 | { |
259 | { |
250 | wrmsrl(msr_no, q); |
260 | wrmsrl(msr_no, q); |
251 | return 0; |
261 | return 0; |
252 | } |
262 | } |
253 | static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
263 | static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
254 | struct msr *msrs) |
264 | struct msr *msrs) |
255 | { |
265 | { |
256 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); |
266 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); |
257 | } |
267 | } |
258 | static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
268 | static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
259 | struct msr *msrs) |
269 | struct msr *msrs) |
260 | { |
270 | { |
261 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); |
271 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); |
262 | } |
272 | } |
263 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, |
273 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, |
264 | u32 *l, u32 *h) |
274 | u32 *l, u32 *h) |
265 | { |
275 | { |
266 | return rdmsr_safe(msr_no, l, h); |
276 | return rdmsr_safe(msr_no, l, h); |
267 | } |
277 | } |
268 | static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
278 | static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
269 | { |
279 | { |
270 | return wrmsr_safe(msr_no, l, h); |
280 | return wrmsr_safe(msr_no, l, h); |
271 | } |
281 | } |
272 | static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) |
282 | static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) |
273 | { |
283 | { |
274 | return rdmsrl_safe(msr_no, q); |
284 | return rdmsrl_safe(msr_no, q); |
275 | } |
285 | } |
276 | static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) |
286 | static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) |
277 | { |
287 | { |
278 | return wrmsrl_safe(msr_no, q); |
288 | return wrmsrl_safe(msr_no, q); |
279 | } |
289 | } |
280 | static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) |
290 | static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) |
281 | { |
291 | { |
282 | return rdmsr_safe_regs(regs); |
292 | return rdmsr_safe_regs(regs); |
283 | } |
293 | } |
284 | static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) |
294 | static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) |
285 | { |
295 | { |
286 | return wrmsr_safe_regs(regs); |
296 | return wrmsr_safe_regs(regs); |
287 | } |
297 | } |
288 | #endif /* CONFIG_SMP */ |
298 | #endif /* CONFIG_SMP */ |
289 | #endif /* __ASSEMBLY__ */ |
299 | #endif /* __ASSEMBLY__ */ |
290 | #endif /* _ASM_X86_MSR_H */><>><> |
300 | #endif /* _ASM_X86_MSR_H */><>><> |