Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5270 | serge | 1 | #ifndef _ASM_X86_PERCPU_H |
2 | #define _ASM_X86_PERCPU_H |
||
3 | |||
4 | #ifdef CONFIG_X86_64 |
||
5 | #define __percpu_seg gs |
||
6 | #define __percpu_mov_op movq |
||
7 | #else |
||
8 | #define __percpu_seg fs |
||
9 | #define __percpu_mov_op movl |
||
10 | #endif |
||
11 | |||
12 | #ifdef __ASSEMBLY__ |
||
13 | |||
14 | /* |
||
15 | * PER_CPU finds an address of a per-cpu variable. |
||
16 | * |
||
17 | * Args: |
||
18 | * var - variable name |
||
19 | * reg - 32bit register |
||
20 | * |
||
21 | * The resulting address is stored in the "reg" argument. |
||
22 | * |
||
23 | * Example: |
||
24 | * PER_CPU(cpu_gdt_descr, %ebx) |
||
25 | */ |
||
26 | #ifdef CONFIG_SMP |
||
27 | #define PER_CPU(var, reg) \ |
||
28 | __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ |
||
29 | lea var(reg), reg |
||
30 | #define PER_CPU_VAR(var) %__percpu_seg:var |
||
31 | #else /* ! SMP */ |
||
32 | #define PER_CPU(var, reg) __percpu_mov_op $var, reg |
||
33 | #define PER_CPU_VAR(var) var |
||
34 | #endif /* SMP */ |
||
35 | |||
36 | #ifdef CONFIG_X86_64_SMP |
||
37 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var |
||
38 | #else |
||
39 | #define INIT_PER_CPU_VAR(var) var |
||
40 | #endif |
||
41 | |||
42 | #else /* ...!ASSEMBLY */ |
||
43 | |||
44 | #include |
||
45 | #include |
||
46 | |||
47 | #ifdef CONFIG_SMP |
||
48 | #define __percpu_prefix "%%"__stringify(__percpu_seg)":" |
||
49 | #define __my_cpu_offset this_cpu_read(this_cpu_off) |
||
50 | |||
51 | /* |
||
52 | * Compared to the generic __my_cpu_offset version, the following |
||
53 | * saves one instruction and avoids clobbering a temp register. |
||
54 | */ |
||
55 | #define arch_raw_cpu_ptr(ptr) \ |
||
56 | ({ \ |
||
57 | unsigned long tcp_ptr__; \ |
||
58 | asm volatile("add " __percpu_arg(1) ", %0" \ |
||
59 | : "=r" (tcp_ptr__) \ |
||
60 | : "m" (this_cpu_off), "0" (ptr)); \ |
||
61 | (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ |
||
62 | }) |
||
63 | #else |
||
64 | #define __percpu_prefix "" |
||
65 | #endif |
||
66 | |||
67 | #define __percpu_arg(x) __percpu_prefix "%" #x |
||
68 | |||
69 | /* |
||
70 | * Initialized pointers to per-cpu variables needed for the boot |
||
71 | * processor need to use these macros to get the proper address |
||
72 | * offset from __per_cpu_load on SMP. |
||
73 | * |
||
74 | * There also must be an entry in vmlinux_64.lds.S |
||
75 | */ |
||
76 | #define DECLARE_INIT_PER_CPU(var) \ |
||
77 | extern typeof(var) init_per_cpu_var(var) |
||
78 | |||
79 | #ifdef CONFIG_X86_64_SMP |
||
80 | #define init_per_cpu_var(var) init_per_cpu__##var |
||
81 | #else |
||
82 | #define init_per_cpu_var(var) var |
||
83 | #endif |
||
84 | |||
85 | /* For arch-specific code, we can use direct single-insn ops (they |
||
86 | * don't give an lvalue though). */ |
||
87 | extern void __bad_percpu_size(void); |
||
88 | |||
89 | #define percpu_to_op(op, var, val) \ |
||
90 | do { \ |
||
91 | typedef typeof(var) pto_T__; \ |
||
92 | if (0) { \ |
||
93 | pto_T__ pto_tmp__; \ |
||
94 | pto_tmp__ = (val); \ |
||
95 | (void)pto_tmp__; \ |
||
96 | } \ |
||
97 | switch (sizeof(var)) { \ |
||
98 | case 1: \ |
||
99 | asm(op "b %1,"__percpu_arg(0) \ |
||
100 | : "+m" (var) \ |
||
101 | : "qi" ((pto_T__)(val))); \ |
||
102 | break; \ |
||
103 | case 2: \ |
||
104 | asm(op "w %1,"__percpu_arg(0) \ |
||
105 | : "+m" (var) \ |
||
106 | : "ri" ((pto_T__)(val))); \ |
||
107 | break; \ |
||
108 | case 4: \ |
||
109 | asm(op "l %1,"__percpu_arg(0) \ |
||
110 | : "+m" (var) \ |
||
111 | : "ri" ((pto_T__)(val))); \ |
||
112 | break; \ |
||
113 | case 8: \ |
||
114 | asm(op "q %1,"__percpu_arg(0) \ |
||
115 | : "+m" (var) \ |
||
116 | : "re" ((pto_T__)(val))); \ |
||
117 | break; \ |
||
118 | default: __bad_percpu_size(); \ |
||
119 | } \ |
||
120 | } while (0) |
||
121 | |||
122 | /* |
||
123 | * Generate a percpu add to memory instruction and optimize code |
||
124 | * if one is added or subtracted. |
||
125 | */ |
||
126 | #define percpu_add_op(var, val) \ |
||
127 | do { \ |
||
128 | typedef typeof(var) pao_T__; \ |
||
129 | const int pao_ID__ = (__builtin_constant_p(val) && \ |
||
130 | ((val) == 1 || (val) == -1)) ? \ |
||
131 | (int)(val) : 0; \ |
||
132 | if (0) { \ |
||
133 | pao_T__ pao_tmp__; \ |
||
134 | pao_tmp__ = (val); \ |
||
135 | (void)pao_tmp__; \ |
||
136 | } \ |
||
137 | switch (sizeof(var)) { \ |
||
138 | case 1: \ |
||
139 | if (pao_ID__ == 1) \ |
||
140 | asm("incb "__percpu_arg(0) : "+m" (var)); \ |
||
141 | else if (pao_ID__ == -1) \ |
||
142 | asm("decb "__percpu_arg(0) : "+m" (var)); \ |
||
143 | else \ |
||
144 | asm("addb %1, "__percpu_arg(0) \ |
||
145 | : "+m" (var) \ |
||
146 | : "qi" ((pao_T__)(val))); \ |
||
147 | break; \ |
||
148 | case 2: \ |
||
149 | if (pao_ID__ == 1) \ |
||
150 | asm("incw "__percpu_arg(0) : "+m" (var)); \ |
||
151 | else if (pao_ID__ == -1) \ |
||
152 | asm("decw "__percpu_arg(0) : "+m" (var)); \ |
||
153 | else \ |
||
154 | asm("addw %1, "__percpu_arg(0) \ |
||
155 | : "+m" (var) \ |
||
156 | : "ri" ((pao_T__)(val))); \ |
||
157 | break; \ |
||
158 | case 4: \ |
||
159 | if (pao_ID__ == 1) \ |
||
160 | asm("incl "__percpu_arg(0) : "+m" (var)); \ |
||
161 | else if (pao_ID__ == -1) \ |
||
162 | asm("decl "__percpu_arg(0) : "+m" (var)); \ |
||
163 | else \ |
||
164 | asm("addl %1, "__percpu_arg(0) \ |
||
165 | : "+m" (var) \ |
||
166 | : "ri" ((pao_T__)(val))); \ |
||
167 | break; \ |
||
168 | case 8: \ |
||
169 | if (pao_ID__ == 1) \ |
||
170 | asm("incq "__percpu_arg(0) : "+m" (var)); \ |
||
171 | else if (pao_ID__ == -1) \ |
||
172 | asm("decq "__percpu_arg(0) : "+m" (var)); \ |
||
173 | else \ |
||
174 | asm("addq %1, "__percpu_arg(0) \ |
||
175 | : "+m" (var) \ |
||
176 | : "re" ((pao_T__)(val))); \ |
||
177 | break; \ |
||
178 | default: __bad_percpu_size(); \ |
||
179 | } \ |
||
180 | } while (0) |
||
181 | |||
182 | #define percpu_from_op(op, var) \ |
||
183 | ({ \ |
||
184 | typeof(var) pfo_ret__; \ |
||
185 | switch (sizeof(var)) { \ |
||
186 | case 1: \ |
||
187 | asm(op "b "__percpu_arg(1)",%0" \ |
||
188 | : "=q" (pfo_ret__) \ |
||
189 | : "m" (var)); \ |
||
190 | break; \ |
||
191 | case 2: \ |
||
192 | asm(op "w "__percpu_arg(1)",%0" \ |
||
193 | : "=r" (pfo_ret__) \ |
||
194 | : "m" (var)); \ |
||
195 | break; \ |
||
196 | case 4: \ |
||
197 | asm(op "l "__percpu_arg(1)",%0" \ |
||
198 | : "=r" (pfo_ret__) \ |
||
199 | : "m" (var)); \ |
||
200 | break; \ |
||
201 | case 8: \ |
||
202 | asm(op "q "__percpu_arg(1)",%0" \ |
||
203 | : "=r" (pfo_ret__) \ |
||
204 | : "m" (var)); \ |
||
205 | break; \ |
||
206 | default: __bad_percpu_size(); \ |
||
207 | } \ |
||
208 | pfo_ret__; \ |
||
209 | }) |
||
210 | |||
211 | #define percpu_stable_op(op, var) \ |
||
212 | ({ \ |
||
213 | typeof(var) pfo_ret__; \ |
||
214 | switch (sizeof(var)) { \ |
||
215 | case 1: \ |
||
216 | asm(op "b "__percpu_arg(P1)",%0" \ |
||
217 | : "=q" (pfo_ret__) \ |
||
218 | : "p" (&(var))); \ |
||
219 | break; \ |
||
220 | case 2: \ |
||
221 | asm(op "w "__percpu_arg(P1)",%0" \ |
||
222 | : "=r" (pfo_ret__) \ |
||
223 | : "p" (&(var))); \ |
||
224 | break; \ |
||
225 | case 4: \ |
||
226 | asm(op "l "__percpu_arg(P1)",%0" \ |
||
227 | : "=r" (pfo_ret__) \ |
||
228 | : "p" (&(var))); \ |
||
229 | break; \ |
||
230 | case 8: \ |
||
231 | asm(op "q "__percpu_arg(P1)",%0" \ |
||
232 | : "=r" (pfo_ret__) \ |
||
233 | : "p" (&(var))); \ |
||
234 | break; \ |
||
235 | default: __bad_percpu_size(); \ |
||
236 | } \ |
||
237 | pfo_ret__; \ |
||
238 | }) |
||
239 | |||
240 | #define percpu_unary_op(op, var) \ |
||
241 | ({ \ |
||
242 | switch (sizeof(var)) { \ |
||
243 | case 1: \ |
||
244 | asm(op "b "__percpu_arg(0) \ |
||
245 | : "+m" (var)); \ |
||
246 | break; \ |
||
247 | case 2: \ |
||
248 | asm(op "w "__percpu_arg(0) \ |
||
249 | : "+m" (var)); \ |
||
250 | break; \ |
||
251 | case 4: \ |
||
252 | asm(op "l "__percpu_arg(0) \ |
||
253 | : "+m" (var)); \ |
||
254 | break; \ |
||
255 | case 8: \ |
||
256 | asm(op "q "__percpu_arg(0) \ |
||
257 | : "+m" (var)); \ |
||
258 | break; \ |
||
259 | default: __bad_percpu_size(); \ |
||
260 | } \ |
||
261 | }) |
||
262 | |||
263 | /* |
||
264 | * Add return operation |
||
265 | */ |
||
266 | #define percpu_add_return_op(var, val) \ |
||
267 | ({ \ |
||
268 | typeof(var) paro_ret__ = val; \ |
||
269 | switch (sizeof(var)) { \ |
||
270 | case 1: \ |
||
271 | asm("xaddb %0, "__percpu_arg(1) \ |
||
272 | : "+q" (paro_ret__), "+m" (var) \ |
||
273 | : : "memory"); \ |
||
274 | break; \ |
||
275 | case 2: \ |
||
276 | asm("xaddw %0, "__percpu_arg(1) \ |
||
277 | : "+r" (paro_ret__), "+m" (var) \ |
||
278 | : : "memory"); \ |
||
279 | break; \ |
||
280 | case 4: \ |
||
281 | asm("xaddl %0, "__percpu_arg(1) \ |
||
282 | : "+r" (paro_ret__), "+m" (var) \ |
||
283 | : : "memory"); \ |
||
284 | break; \ |
||
285 | case 8: \ |
||
286 | asm("xaddq %0, "__percpu_arg(1) \ |
||
287 | : "+re" (paro_ret__), "+m" (var) \ |
||
288 | : : "memory"); \ |
||
289 | break; \ |
||
290 | default: __bad_percpu_size(); \ |
||
291 | } \ |
||
292 | paro_ret__ += val; \ |
||
293 | paro_ret__; \ |
||
294 | }) |
||
295 | |||
296 | /* |
||
297 | * xchg is implemented using cmpxchg without a lock prefix. xchg is |
||
298 | * expensive due to the implied lock prefix. The processor cannot prefetch |
||
299 | * cachelines if xchg is used. |
||
300 | */ |
||
301 | #define percpu_xchg_op(var, nval) \ |
||
302 | ({ \ |
||
303 | typeof(var) pxo_ret__; \ |
||
304 | typeof(var) pxo_new__ = (nval); \ |
||
305 | switch (sizeof(var)) { \ |
||
306 | case 1: \ |
||
307 | asm("\n\tmov "__percpu_arg(1)",%%al" \ |
||
308 | "\n1:\tcmpxchgb %2, "__percpu_arg(1) \ |
||
309 | "\n\tjnz 1b" \ |
||
310 | : "=&a" (pxo_ret__), "+m" (var) \ |
||
311 | : "q" (pxo_new__) \ |
||
312 | : "memory"); \ |
||
313 | break; \ |
||
314 | case 2: \ |
||
315 | asm("\n\tmov "__percpu_arg(1)",%%ax" \ |
||
316 | "\n1:\tcmpxchgw %2, "__percpu_arg(1) \ |
||
317 | "\n\tjnz 1b" \ |
||
318 | : "=&a" (pxo_ret__), "+m" (var) \ |
||
319 | : "r" (pxo_new__) \ |
||
320 | : "memory"); \ |
||
321 | break; \ |
||
322 | case 4: \ |
||
323 | asm("\n\tmov "__percpu_arg(1)",%%eax" \ |
||
324 | "\n1:\tcmpxchgl %2, "__percpu_arg(1) \ |
||
325 | "\n\tjnz 1b" \ |
||
326 | : "=&a" (pxo_ret__), "+m" (var) \ |
||
327 | : "r" (pxo_new__) \ |
||
328 | : "memory"); \ |
||
329 | break; \ |
||
330 | case 8: \ |
||
331 | asm("\n\tmov "__percpu_arg(1)",%%rax" \ |
||
332 | "\n1:\tcmpxchgq %2, "__percpu_arg(1) \ |
||
333 | "\n\tjnz 1b" \ |
||
334 | : "=&a" (pxo_ret__), "+m" (var) \ |
||
335 | : "r" (pxo_new__) \ |
||
336 | : "memory"); \ |
||
337 | break; \ |
||
338 | default: __bad_percpu_size(); \ |
||
339 | } \ |
||
340 | pxo_ret__; \ |
||
341 | }) |
||
342 | |||
343 | /* |
||
344 | * cmpxchg has no such implied lock semantics as a result it is much |
||
345 | * more efficient for cpu local operations. |
||
346 | */ |
||
347 | #define percpu_cmpxchg_op(var, oval, nval) \ |
||
348 | ({ \ |
||
349 | typeof(var) pco_ret__; \ |
||
350 | typeof(var) pco_old__ = (oval); \ |
||
351 | typeof(var) pco_new__ = (nval); \ |
||
352 | switch (sizeof(var)) { \ |
||
353 | case 1: \ |
||
354 | asm("cmpxchgb %2, "__percpu_arg(1) \ |
||
355 | : "=a" (pco_ret__), "+m" (var) \ |
||
356 | : "q" (pco_new__), "0" (pco_old__) \ |
||
357 | : "memory"); \ |
||
358 | break; \ |
||
359 | case 2: \ |
||
360 | asm("cmpxchgw %2, "__percpu_arg(1) \ |
||
361 | : "=a" (pco_ret__), "+m" (var) \ |
||
362 | : "r" (pco_new__), "0" (pco_old__) \ |
||
363 | : "memory"); \ |
||
364 | break; \ |
||
365 | case 4: \ |
||
366 | asm("cmpxchgl %2, "__percpu_arg(1) \ |
||
367 | : "=a" (pco_ret__), "+m" (var) \ |
||
368 | : "r" (pco_new__), "0" (pco_old__) \ |
||
369 | : "memory"); \ |
||
370 | break; \ |
||
371 | case 8: \ |
||
372 | asm("cmpxchgq %2, "__percpu_arg(1) \ |
||
373 | : "=a" (pco_ret__), "+m" (var) \ |
||
374 | : "r" (pco_new__), "0" (pco_old__) \ |
||
375 | : "memory"); \ |
||
376 | break; \ |
||
377 | default: __bad_percpu_size(); \ |
||
378 | } \ |
||
379 | pco_ret__; \ |
||
380 | }) |
||
381 | |||
382 | /* |
||
383 | * this_cpu_read() makes gcc load the percpu variable every time it is |
||
384 | * accessed while this_cpu_read_stable() allows the value to be cached. |
||
385 | * this_cpu_read_stable() is more efficient and can be used if its value |
||
386 | * is guaranteed to be valid across cpus. The current users include |
||
387 | * get_current() and get_thread_info() both of which are actually |
||
388 | * per-thread variables implemented as per-cpu variables and thus |
||
389 | * stable for the duration of the respective task. |
||
390 | */ |
||
391 | #define this_cpu_read_stable(var) percpu_stable_op("mov", var) |
||
392 | |||
393 | #define raw_cpu_read_1(pcp) percpu_from_op("mov", pcp) |
||
394 | #define raw_cpu_read_2(pcp) percpu_from_op("mov", pcp) |
||
395 | #define raw_cpu_read_4(pcp) percpu_from_op("mov", pcp) |
||
396 | |||
397 | #define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) |
||
398 | #define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) |
||
399 | #define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) |
||
400 | #define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
||
401 | #define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val) |
||
402 | #define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val) |
||
403 | #define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
||
404 | #define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) |
||
405 | #define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) |
||
406 | #define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) |
||
407 | #define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) |
||
408 | #define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) |
||
409 | #define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val) |
||
410 | #define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) |
||
411 | #define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) |
||
412 | |||
413 | #define this_cpu_read_1(pcp) percpu_from_op("mov", pcp) |
||
414 | #define this_cpu_read_2(pcp) percpu_from_op("mov", pcp) |
||
415 | #define this_cpu_read_4(pcp) percpu_from_op("mov", pcp) |
||
416 | #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) |
||
417 | #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) |
||
418 | #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) |
||
419 | #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
||
420 | #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) |
||
421 | #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) |
||
422 | #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
||
423 | #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) |
||
424 | #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) |
||
425 | #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) |
||
426 | #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) |
||
427 | #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) |
||
428 | #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) |
||
429 | #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) |
||
430 | #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) |
||
431 | |||
432 | #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) |
||
433 | #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) |
||
434 | #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) |
||
435 | #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
||
436 | #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
||
437 | #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
||
438 | |||
439 | #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) |
||
440 | #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) |
||
441 | #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) |
||
442 | #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
||
443 | #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
||
444 | #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
||
445 | |||
446 | #ifdef CONFIG_X86_CMPXCHG64 |
||
447 | #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ |
||
448 | ({ \ |
||
449 | bool __ret; \ |
||
450 | typeof(pcp1) __o1 = (o1), __n1 = (n1); \ |
||
451 | typeof(pcp2) __o2 = (o2), __n2 = (n2); \ |
||
452 | asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \ |
||
453 | : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \ |
||
454 | : "b" (__n1), "c" (__n2), "a" (__o1)); \ |
||
455 | __ret; \ |
||
456 | }) |
||
457 | |||
458 | #define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double |
||
459 | #define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double |
||
460 | #endif /* CONFIG_X86_CMPXCHG64 */ |
||
461 | |||
462 | /* |
||
463 | * Per cpu atomic 64 bit operations are only available under 64 bit. |
||
464 | * 32 bit must fall back to generic operations. |
||
465 | */ |
||
466 | #ifdef CONFIG_X86_64 |
||
467 | #define raw_cpu_read_8(pcp) percpu_from_op("mov", pcp) |
||
468 | #define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) |
||
469 | #define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
||
470 | #define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
||
471 | #define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
||
472 | #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) |
||
473 | #define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) |
||
474 | #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
||
475 | |||
476 | #define this_cpu_read_8(pcp) percpu_from_op("mov", pcp) |
||
477 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) |
||
478 | #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
||
479 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
||
480 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
||
481 | #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) |
||
482 | #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) |
||
483 | #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
||
484 | |||
485 | /* |
||
486 | * Pretty complex macro to generate cmpxchg16 instruction. The instruction |
||
487 | * is not supported on early AMD64 processors so we must be able to emulate |
||
488 | * it in software. The address used in the cmpxchg16 instruction must be |
||
489 | * aligned to a 16 byte boundary. |
||
490 | */ |
||
491 | #define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \ |
||
492 | ({ \ |
||
493 | bool __ret; \ |
||
494 | typeof(pcp1) __o1 = (o1), __n1 = (n1); \ |
||
495 | typeof(pcp2) __o2 = (o2), __n2 = (n2); \ |
||
496 | alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \ |
||
497 | "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \ |
||
498 | X86_FEATURE_CX16, \ |
||
499 | ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \ |
||
500 | "+m" (pcp2), "+d" (__o2)), \ |
||
501 | "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \ |
||
502 | __ret; \ |
||
503 | }) |
||
504 | |||
505 | #define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double |
||
506 | #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double |
||
507 | |||
508 | #endif |
||
509 | |||
510 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
||
511 | #define x86_test_and_clear_bit_percpu(bit, var) \ |
||
512 | ({ \ |
||
513 | int old__; \ |
||
514 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ |
||
515 | : "=r" (old__), "+m" (var) \ |
||
516 | : "dIr" (bit)); \ |
||
517 | old__; \ |
||
518 | }) |
||
519 | |||
520 | static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, |
||
521 | const unsigned long __percpu *addr) |
||
522 | { |
||
523 | unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; |
||
524 | |||
525 | #ifdef CONFIG_X86_64 |
||
526 | return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0; |
||
527 | #else |
||
528 | return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0; |
||
529 | #endif |
||
530 | } |
||
531 | |||
532 | static inline int x86_this_cpu_variable_test_bit(int nr, |
||
533 | const unsigned long __percpu *addr) |
||
534 | { |
||
535 | int oldbit; |
||
536 | |||
537 | asm volatile("bt "__percpu_arg(2)",%1\n\t" |
||
538 | "sbb %0,%0" |
||
539 | : "=r" (oldbit) |
||
540 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
||
541 | |||
542 | return oldbit; |
||
543 | } |
||
544 | |||
545 | #define x86_this_cpu_test_bit(nr, addr) \ |
||
546 | (__builtin_constant_p((nr)) \ |
||
547 | ? x86_this_cpu_constant_test_bit((nr), (addr)) \ |
||
548 | : x86_this_cpu_variable_test_bit((nr), (addr))) |
||
549 | |||
550 | |||
551 | #include |
||
552 | |||
553 | /* We can use this directly for local CPU (faster). */ |
||
554 | DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); |
||
555 | |||
556 | #endif /* !__ASSEMBLY__ */ |
||
557 | |||
558 | #ifdef CONFIG_SMP |
||
559 | |||
560 | /* |
||
561 | * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu |
||
562 | * variables that are initialized and accessed before there are per_cpu |
||
563 | * areas allocated. |
||
564 | */ |
||
565 | |||
566 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ |
||
567 | DEFINE_PER_CPU(_type, _name) = _initvalue; \ |
||
568 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ |
||
569 | { [0 ... NR_CPUS-1] = _initvalue }; \ |
||
570 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
||
571 | |||
572 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
||
573 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \ |
||
574 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ |
||
575 | { [0 ... NR_CPUS-1] = _initvalue }; \ |
||
576 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
||
577 | |||
578 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
||
579 | EXPORT_PER_CPU_SYMBOL(_name) |
||
580 | |||
581 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ |
||
582 | DECLARE_PER_CPU(_type, _name); \ |
||
583 | extern __typeof__(_type) *_name##_early_ptr; \ |
||
584 | extern __typeof__(_type) _name##_early_map[] |
||
585 | |||
586 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
||
587 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ |
||
588 | extern __typeof__(_type) *_name##_early_ptr; \ |
||
589 | extern __typeof__(_type) _name##_early_map[] |
||
590 | |||
591 | #define early_per_cpu_ptr(_name) (_name##_early_ptr) |
||
592 | #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) |
||
593 | #define early_per_cpu(_name, _cpu) \ |
||
594 | *(early_per_cpu_ptr(_name) ? \ |
||
595 | &early_per_cpu_ptr(_name)[_cpu] : \ |
||
596 | &per_cpu(_name, _cpu)) |
||
597 | |||
598 | #else /* !CONFIG_SMP */ |
||
599 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ |
||
600 | DEFINE_PER_CPU(_type, _name) = _initvalue |
||
601 | |||
602 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
||
603 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue |
||
604 | |||
605 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
||
606 | EXPORT_PER_CPU_SYMBOL(_name) |
||
607 | |||
608 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ |
||
609 | DECLARE_PER_CPU(_type, _name) |
||
610 | |||
611 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
||
612 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name) |
||
613 | |||
614 | #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) |
||
615 | #define early_per_cpu_ptr(_name) NULL |
||
616 | /* no early_per_cpu_map() */ |
||
617 | |||
618 | #endif /* !CONFIG_SMP */ |
||
619 | |||
620 | #endif /* _ASM_X86_PERCPU_H */><>><> |