Subversion Repositories Kolibri OS

Rev

Rev 6936 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6936 Rev 7143
Line 40... Line 40...
40
struct saved_msrs {
40
struct saved_msrs {
41
	unsigned int num;
41
	unsigned int num;
42
	struct saved_msr *array;
42
	struct saved_msr *array;
43
};
43
};
Line 44... Line -...
44
 
-
 
45
static inline unsigned long long native_read_tscp(unsigned int *aux)
-
 
46
{
-
 
47
	unsigned long low, high;
-
 
48
	asm volatile(".byte 0x0f,0x01,0xf9"
-
 
49
		     : "=a" (low), "=d" (high), "=c" (*aux));
-
 
50
	return low | ((u64)high << 32);
-
 
51
}
-
 
52
 
44
 
53
/*
45
/*
54
 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
46
 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
55
 * constraint has different meanings. For i386, "A" means exactly
47
 * constraint has different meanings. For i386, "A" means exactly
56
 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
48
 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
Line 65... Line 57...
65
#define DECLARE_ARGS(val, low, high)	unsigned long long val
57
#define DECLARE_ARGS(val, low, high)	unsigned long long val
66
#define EAX_EDX_VAL(val, low, high)	(val)
58
#define EAX_EDX_VAL(val, low, high)	(val)
67
#define EAX_EDX_RET(val, low, high)	"=A" (val)
59
#define EAX_EDX_RET(val, low, high)	"=A" (val)
68
#endif
60
#endif
Line -... Line 61...
-
 
61
 
-
 
62
#ifdef CONFIG_TRACEPOINTS
-
 
63
/*
-
 
64
 * Be very careful with includes. This header is prone to include loops.
-
 
65
 */
-
 
66
#include 
-
 
67
#include 
-
 
68
 
-
 
69
extern struct tracepoint __tracepoint_read_msr;
-
 
70
extern struct tracepoint __tracepoint_write_msr;
-
 
71
extern struct tracepoint __tracepoint_rdpmc;
-
 
72
#define msr_tracepoint_active(t) static_key_false(&(t).key)
-
 
73
extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
-
 
74
extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
-
 
75
extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
-
 
76
#else
-
 
77
#define msr_tracepoint_active(t) false
-
 
78
static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
-
 
79
static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
-
 
80
static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
-
 
81
#endif
69
 
82
 
70
static inline unsigned long long native_read_msr(unsigned int msr)
83
static inline unsigned long long native_read_msr(unsigned int msr)
71
{
84
{
Line 72... Line 85...
72
	DECLARE_ARGS(val, low, high);
85
	DECLARE_ARGS(val, low, high);
-
 
86
 
-
 
87
	asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
73
 
88
	if (msr_tracepoint_active(__tracepoint_read_msr))
74
	asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
89
		do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
Line 75... Line 90...
75
	return EAX_EDX_VAL(val, low, high);
90
	return EAX_EDX_VAL(val, low, high);
76
}
91
}
Line 86... Line 101...
86
		     "3:  mov %[fault],%[err] ; jmp 1b\n\t"
101
		     "3:  mov %[fault],%[err] ; jmp 1b\n\t"
87
		     ".previous\n\t"
102
		     ".previous\n\t"
88
		     _ASM_EXTABLE(2b, 3b)
103
		     _ASM_EXTABLE(2b, 3b)
89
		     : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
104
		     : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
90
		     : "c" (msr), [fault] "i" (-EIO));
105
		     : "c" (msr), [fault] "i" (-EIO));
-
 
106
	if (msr_tracepoint_active(__tracepoint_read_msr))
-
 
107
		do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
91
	return EAX_EDX_VAL(val, low, high);
108
	return EAX_EDX_VAL(val, low, high);
92
}
109
}
Line 93... Line 110...
93
 
110
 
94
static inline void native_write_msr(unsigned int msr,
111
static inline void native_write_msr(unsigned int msr,
95
				    unsigned low, unsigned high)
112
				    unsigned low, unsigned high)
96
{
113
{
-
 
114
	asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
-
 
115
	if (msr_tracepoint_active(__tracepoint_write_msr))
97
	asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
116
		do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
Line 98... Line 117...
98
}
117
}
99
 
118
 
100
/* Can be uninlined because referenced by paravirt */
119
/* Can be uninlined because referenced by paravirt */
Line 110... Line 129...
110
		     _ASM_EXTABLE(2b, 3b)
129
		     _ASM_EXTABLE(2b, 3b)
111
		     : [err] "=a" (err)
130
		     : [err] "=a" (err)
112
		     : "c" (msr), "0" (low), "d" (high),
131
		     : "c" (msr), "0" (low), "d" (high),
113
		       [fault] "i" (-EIO)
132
		       [fault] "i" (-EIO)
114
		     : "memory");
133
		     : "memory");
-
 
134
	if (msr_tracepoint_active(__tracepoint_write_msr))
-
 
135
		do_trace_write_msr(msr, ((u64)high << 32 | low), err);
115
	return err;
136
	return err;
116
}
137
}
Line 117... Line 138...
117
 
138
 
118
extern int rdmsr_safe_regs(u32 regs[8]);
139
extern int rdmsr_safe_regs(u32 regs[8]);
Line 134... Line 155...
134
	asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
155
	asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
Line 135... Line 156...
135
 
156
 
136
	return EAX_EDX_VAL(val, low, high);
157
	return EAX_EDX_VAL(val, low, high);
Line -... Line 158...
-
 
158
}
-
 
159
 
-
 
160
/**
-
 
161
 * rdtsc_ordered() - read the current TSC in program order
-
 
162
 *
-
 
163
 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
-
 
164
 * It is ordered like a load to a global in-memory counter.  It should
-
 
165
 * be impossible to observe non-monotonic rdtsc_unordered() behavior
-
 
166
 * across multiple CPUs as long as the TSC is synced.
-
 
167
 */
-
 
168
static __always_inline unsigned long long rdtsc_ordered(void)
-
 
169
{
-
 
170
	/*
-
 
171
	 * The RDTSC instruction is not ordered relative to memory
-
 
172
	 * access.  The Intel SDM and the AMD APM are both vague on this
-
 
173
	 * point, but empirically an RDTSC instruction can be
-
 
174
	 * speculatively executed before prior loads.  An RDTSC
-
 
175
	 * immediately after an appropriate barrier appears to be
-
 
176
	 * ordered as a normal load, that is, it provides the same
-
 
177
	 * ordering guarantees as reading from a global memory location
-
 
178
	 * that some other imaginary CPU is updating continuously with a
-
 
179
	 * time stamp.
-
 
180
	 */
-
 
181
	alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
-
 
182
			  "lfence", X86_FEATURE_LFENCE_RDTSC);
-
 
183
	return rdtsc();
-
 
184
}
-
 
185
 
-
 
186
/* Deprecated, keep it for a cycle for easier merging: */
137
}
187
#define rdtscll(now)	do { (now) = rdtsc_ordered(); } while (0)
138
 
188
 
139
static inline unsigned long long native_read_pmc(int counter)
189
static inline unsigned long long native_read_pmc(int counter)
Line 140... Line 190...
140
{
190
{
-
 
191
	DECLARE_ARGS(val, low, high);
-
 
192
 
141
	DECLARE_ARGS(val, low, high);
193
	asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
142
 
194
	if (msr_tracepoint_active(__tracepoint_rdpmc))
Line 143... Line 195...
143
	asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
195
		do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
144
	return EAX_EDX_VAL(val, low, high);
196
	return EAX_EDX_VAL(val, low, high);