Subversion Repositories Kolibri OS

Rev

Rev 6934 | Rev 7143 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5270 serge 1
#ifndef _ASM_X86_MSR_H
2
#define _ASM_X86_MSR_H
3
 
6082 serge 4
#include "msr-index.h"
5270 serge 5
 
6
#ifndef __ASSEMBLY__
7
 
8
#include 
9
#include 
10
#include 
6082 serge 11
#include 
5270 serge 12
 
13
struct msr {
14
	union {
15
		struct {
16
			u32 l;
17
			u32 h;
18
		};
19
		u64 q;
20
	};
21
};
22
 
23
struct msr_info {
24
	u32 msr_no;
25
	struct msr reg;
26
	struct msr *msrs;
27
	int err;
28
};
29
 
30
struct msr_regs_info {
31
	u32 *regs;
32
	int err;
33
};
34
 
6936 serge 35
struct saved_msr {
36
	bool valid;
37
	struct msr_info info;
38
};
39
 
40
struct saved_msrs {
41
	unsigned int num;
42
	struct saved_msr *array;
43
};
44
 
5270 serge 45
static inline unsigned long long native_read_tscp(unsigned int *aux)
46
{
47
	unsigned long low, high;
48
	asm volatile(".byte 0x0f,0x01,0xf9"
49
		     : "=a" (low), "=d" (high), "=c" (*aux));
50
	return low | ((u64)high << 32);
51
}
52
 
53
/*
54
 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
55
 * constraint has different meanings. For i386, "A" means exactly
56
 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
57
 * it means rax *or* rdx.
58
 */
59
#ifdef CONFIG_X86_64
6082 serge 60
/* Using 64-bit values saves one instruction clearing the high half of low */
61
#define DECLARE_ARGS(val, low, high)	unsigned long low, high
62
#define EAX_EDX_VAL(val, low, high)	((low) | (high) << 32)
5270 serge 63
#define EAX_EDX_RET(val, low, high)	"=a" (low), "=d" (high)
64
#else
65
#define DECLARE_ARGS(val, low, high)	unsigned long long val
66
#define EAX_EDX_VAL(val, low, high)	(val)
67
#define EAX_EDX_RET(val, low, high)	"=A" (val)
68
#endif
69
 
70
static inline unsigned long long native_read_msr(unsigned int msr)
71
{
72
	DECLARE_ARGS(val, low, high);
73
 
74
	asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
75
	return EAX_EDX_VAL(val, low, high);
76
}
77
 
78
static inline unsigned long long native_read_msr_safe(unsigned int msr,
79
						      int *err)
80
{
81
	DECLARE_ARGS(val, low, high);
82
 
83
	asm volatile("2: rdmsr ; xor %[err],%[err]\n"
84
		     "1:\n\t"
85
		     ".section .fixup,\"ax\"\n\t"
86
		     "3:  mov %[fault],%[err] ; jmp 1b\n\t"
87
		     ".previous\n\t"
88
		     _ASM_EXTABLE(2b, 3b)
89
		     : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
90
		     : "c" (msr), [fault] "i" (-EIO));
91
	return EAX_EDX_VAL(val, low, high);
92
}
93
 
94
static inline void native_write_msr(unsigned int msr,
95
				    unsigned low, unsigned high)
96
{
97
	asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
98
}
99
 
100
/* Can be uninlined because referenced by paravirt */
101
notrace static inline int native_write_msr_safe(unsigned int msr,
102
					unsigned low, unsigned high)
103
{
104
	int err;
105
	asm volatile("2: wrmsr ; xor %[err],%[err]\n"
106
		     "1:\n\t"
107
		     ".section .fixup,\"ax\"\n\t"
108
		     "3:  mov %[fault],%[err] ; jmp 1b\n\t"
109
		     ".previous\n\t"
110
		     _ASM_EXTABLE(2b, 3b)
111
		     : [err] "=a" (err)
112
		     : "c" (msr), "0" (low), "d" (high),
113
		       [fault] "i" (-EIO)
114
		     : "memory");
115
	return err;
116
}
117
 
118
extern int rdmsr_safe_regs(u32 regs[8]);
119
extern int wrmsr_safe_regs(u32 regs[8]);
120
 
6082 serge 121
/**
122
 * rdtsc() - returns the current TSC without ordering constraints
123
 *
124
 * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
125
 * only ordering constraint it supplies is the ordering implied by
126
 * "asm volatile": it will put the RDTSC in the place you expect.  The
127
 * CPU can and will speculatively execute that RDTSC, though, so the
128
 * results can be non-monotonic if compared on different CPUs.
129
 */
130
static __always_inline unsigned long long rdtsc(void)
5270 serge 131
{
132
	DECLARE_ARGS(val, low, high);
133
 
134
	asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
135
 
136
	return EAX_EDX_VAL(val, low, high);
137
}
138
 
139
static inline unsigned long long native_read_pmc(int counter)
140
{
141
	DECLARE_ARGS(val, low, high);
142
 
143
	asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
144
	return EAX_EDX_VAL(val, low, high);
145
}
146
 
147
#ifdef CONFIG_PARAVIRT
148
#include 
149
#else
150
#include 
151
/*
152
 * Access to machine-specific registers (available on 586 and better only)
153
 * Note: the rd* operations modify the parameters directly (without using
154
 * pointer indirection), this allows gcc to optimize better
155
 */
156
 
157
#define rdmsr(msr, low, high)					\
158
do {								\
159
	u64 __val = native_read_msr((msr));			\
160
	(void)((low) = (u32)__val);				\
161
	(void)((high) = (u32)(__val >> 32));			\
162
} while (0)
163
 
164
static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
165
{
166
	native_write_msr(msr, low, high);
167
}
168
 
169
#define rdmsrl(msr, val)			\
170
	((val) = native_read_msr((msr)))
171
 
6082 serge 172
static inline void wrmsrl(unsigned msr, u64 val)
173
{
6936 serge 174
	native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
6082 serge 175
}
5270 serge 176
 
177
/* wrmsr with exception handling */
178
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
179
{
180
	return native_write_msr_safe(msr, low, high);
181
}
182
 
183
/* rdmsr with exception handling */
184
#define rdmsr_safe(msr, low, high)				\
185
({								\
186
	int __err;						\
187
	u64 __val = native_read_msr_safe((msr), &__err);	\
188
	(*low) = (u32)__val;					\
189
	(*high) = (u32)(__val >> 32);				\
190
	__err;							\
191
})
192
 
193
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
194
{
195
	int err;
196
 
197
	*p = native_read_msr_safe(msr, &err);
198
	return err;
199
}
200
 
201
#define rdpmc(counter, low, high)			\
202
do {							\
203
	u64 _l = native_read_pmc((counter));		\
204
	(low)  = (u32)_l;				\
205
	(high) = (u32)(_l >> 32);			\
206
} while (0)
207
 
208
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
209
 
210
#endif	/* !CONFIG_PARAVIRT */
211
 
6082 serge 212
/*
213
 * 64-bit version of wrmsr_safe():
214
 */
215
static inline int wrmsrl_safe(u32 msr, u64 val)
216
{
217
	return wrmsr_safe(msr, (u32)val,  (u32)(val >> 32));
218
}
5270 serge 219
 
220
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
221
 
222
#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
223
 
224
struct msr *msrs_alloc(void);
225
void msrs_free(struct msr *msrs);
226
int msr_set_bit(u32 msr, u8 bit);
227
int msr_clear_bit(u32 msr, u8 bit);
228
 
229
#ifdef CONFIG_SMP
230
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
231
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
232
int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
233
int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
234
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
235
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
236
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
237
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
238
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
239
int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
240
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
241
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
242
#else  /*  CONFIG_SMP  */
243
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
244
{
245
	rdmsr(msr_no, *l, *h);
246
	return 0;
247
}
248
static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
249
{
250
	wrmsr(msr_no, l, h);
251
	return 0;
252
}
253
static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
254
{
255
	rdmsrl(msr_no, *q);
256
	return 0;
257
}
258
static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
259
{
260
	wrmsrl(msr_no, q);
261
	return 0;
262
}
263
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
264
				struct msr *msrs)
265
{
266
       rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
267
}
268
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
269
				struct msr *msrs)
270
{
271
       wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
272
}
273
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
274
				    u32 *l, u32 *h)
275
{
276
	return rdmsr_safe(msr_no, l, h);
277
}
278
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
279
{
280
	return wrmsr_safe(msr_no, l, h);
281
}
282
static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
283
{
284
	return rdmsrl_safe(msr_no, q);
285
}
286
static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
287
{
288
	return wrmsrl_safe(msr_no, q);
289
}
290
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
291
{
292
	return rdmsr_safe_regs(regs);
293
}
294
static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
295
{
296
	return wrmsr_safe_regs(regs);
297
}
298
#endif  /* CONFIG_SMP */
299
#endif /* __ASSEMBLY__ */
300
#endif /* _ASM_X86_MSR_H */