Rev 5270 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5270 | serge | 1 | /* |
2 | * linux/percpu-defs.h - basic definitions for percpu areas |
||
3 | * |
||
4 | * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. |
||
5 | * |
||
6 | * This file is separate from linux/percpu.h to avoid cyclic inclusion |
||
7 | * dependency from arch header files. Only to be included from |
||
8 | * asm/percpu.h. |
||
9 | * |
||
10 | * This file includes macros necessary to declare percpu sections and |
||
11 | * variables, and definitions of percpu accessors and operations. It |
||
12 | * should provide enough percpu features to arch header files even when |
||
13 | * they can only include asm/percpu.h to avoid cyclic inclusion dependency. |
||
14 | */ |
||
15 | |||
16 | #ifndef _LINUX_PERCPU_DEFS_H |
||
17 | #define _LINUX_PERCPU_DEFS_H |
||
18 | |||
19 | #ifdef CONFIG_SMP |
||
20 | |||
21 | #ifdef MODULE |
||
22 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
||
23 | #define PER_CPU_ALIGNED_SECTION "" |
||
24 | #else |
||
25 | #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" |
||
26 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
||
27 | #endif |
||
28 | #define PER_CPU_FIRST_SECTION "..first" |
||
29 | |||
30 | #else |
||
31 | |||
32 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
||
33 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
||
34 | #define PER_CPU_FIRST_SECTION "" |
||
35 | |||
36 | #endif |
||
37 | |||
38 | /* |
||
39 | * Base implementations of per-CPU variable declarations and definitions, where |
||
40 | * the section in which the variable is to be placed is provided by the |
||
41 | * 'sec' argument. This may be used to affect the parameters governing the |
||
42 | * variable's storage. |
||
43 | * |
||
44 | * NOTE! The sections for the DECLARE and for the DEFINE must match, lest |
||
45 | * linkage errors occur due the compiler generating the wrong code to access |
||
46 | * that section. |
||
47 | */ |
||
48 | #define __PCPU_ATTRS(sec) \ |
||
49 | __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \ |
||
50 | PER_CPU_ATTRIBUTES |
||
51 | |||
52 | #define __PCPU_DUMMY_ATTRS \ |
||
53 | __attribute__((section(".discard"), unused)) |
||
54 | |||
55 | /* |
||
56 | * s390 and alpha modules require percpu variables to be defined as |
||
57 | * weak to force the compiler to generate GOT based external |
||
58 | * references for them. This is necessary because percpu sections |
||
59 | * will be located outside of the usually addressable area. |
||
60 | * |
||
61 | * This definition puts the following two extra restrictions when |
||
62 | * defining percpu variables. |
||
63 | * |
||
64 | * 1. The symbol must be globally unique, even the static ones. |
||
65 | * 2. Static percpu variables cannot be defined inside a function. |
||
66 | * |
||
67 | * Archs which need weak percpu definitions should define |
||
68 | * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. |
||
69 | * |
||
70 | * To ensure that the generic code observes the above two |
||
71 | * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak |
||
72 | * definition is used for all cases. |
||
73 | */ |
||
74 | #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) |
||
75 | /* |
||
76 | * __pcpu_scope_* dummy variable is used to enforce scope. It |
||
77 | * receives the static modifier when it's used in front of |
||
78 | * DEFINE_PER_CPU() and will trigger build failure if |
||
79 | * DECLARE_PER_CPU() is used for the same variable. |
||
80 | * |
||
81 | * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness |
||
82 | * such that hidden weak symbol collision, which will cause unrelated |
||
83 | * variables to share the same address, can be detected during build. |
||
84 | */ |
||
85 | #define DECLARE_PER_CPU_SECTION(type, name, sec) \ |
||
86 | extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
||
87 | extern __PCPU_ATTRS(sec) __typeof__(type) name |
||
88 | |||
89 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ |
||
90 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
||
91 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
||
92 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
||
93 | extern __PCPU_ATTRS(sec) __typeof__(type) name; \ |
||
94 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ |
||
95 | __typeof__(type) name |
||
96 | #else |
||
97 | /* |
||
98 | * Normal declaration and definition macros. |
||
99 | */ |
||
100 | #define DECLARE_PER_CPU_SECTION(type, name, sec) \ |
||
101 | extern __PCPU_ATTRS(sec) __typeof__(type) name |
||
102 | |||
103 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ |
||
104 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ |
||
105 | __typeof__(type) name |
||
106 | #endif |
||
107 | |||
108 | /* |
||
109 | * Variant on the per-CPU variable declaration/definition theme used for |
||
110 | * ordinary per-CPU variables. |
||
111 | */ |
||
112 | #define DECLARE_PER_CPU(type, name) \ |
||
113 | DECLARE_PER_CPU_SECTION(type, name, "") |
||
114 | |||
115 | #define DEFINE_PER_CPU(type, name) \ |
||
116 | DEFINE_PER_CPU_SECTION(type, name, "") |
||
117 | |||
118 | /* |
||
119 | * Declaration/definition used for per-CPU variables that must come first in |
||
120 | * the set of variables. |
||
121 | */ |
||
122 | #define DECLARE_PER_CPU_FIRST(type, name) \ |
||
123 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) |
||
124 | |||
125 | #define DEFINE_PER_CPU_FIRST(type, name) \ |
||
126 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) |
||
127 | |||
128 | /* |
||
129 | * Declaration/definition used for per-CPU variables that must be cacheline |
||
130 | * aligned under SMP conditions so that, whilst a particular instance of the |
||
131 | * data corresponds to a particular CPU, inefficiencies due to direct access by |
||
132 | * other CPUs are reduced by preventing the data from unnecessarily spanning |
||
133 | * cachelines. |
||
134 | * |
||
135 | * An example of this would be statistical data, where each CPU's set of data |
||
136 | * is updated by that CPU alone, but the data from across all CPUs is collated |
||
137 | * by a CPU processing a read from a proc file. |
||
138 | */ |
||
139 | #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ |
||
140 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
||
141 | ____cacheline_aligned_in_smp |
||
142 | |||
143 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
||
144 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
||
145 | ____cacheline_aligned_in_smp |
||
146 | |||
147 | #define DECLARE_PER_CPU_ALIGNED(type, name) \ |
||
148 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ |
||
149 | ____cacheline_aligned |
||
150 | |||
151 | #define DEFINE_PER_CPU_ALIGNED(type, name) \ |
||
152 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ |
||
153 | ____cacheline_aligned |
||
154 | |||
155 | /* |
||
156 | * Declaration/definition used for per-CPU variables that must be page aligned. |
||
157 | */ |
||
158 | #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ |
||
159 | DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
||
160 | __aligned(PAGE_SIZE) |
||
161 | |||
162 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
||
163 | DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
||
164 | __aligned(PAGE_SIZE) |
||
165 | |||
166 | /* |
||
167 | * Declaration/definition used for per-CPU variables that must be read mostly. |
||
168 | */ |
||
169 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ |
||
170 | DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") |
||
171 | |||
172 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ |
||
173 | DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") |
||
174 | |||
175 | /* |
||
176 | * Intermodule exports for per-CPU variables. sparse forgets about |
||
177 | * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to |
||
178 | * noop if __CHECKER__. |
||
179 | */ |
||
180 | #ifndef __CHECKER__ |
||
181 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) |
||
182 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) |
||
183 | #else |
||
184 | #define EXPORT_PER_CPU_SYMBOL(var) |
||
185 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) |
||
186 | #endif |
||
187 | |||
188 | /* |
||
189 | * Accessors and operations. |
||
190 | */ |
||
191 | #ifndef __ASSEMBLY__ |
||
192 | |||
193 | /* |
||
194 | * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating |
||
195 | * @ptr and is invoked once before a percpu area is accessed by all |
||
196 | * accessors and operations. This is performed in the generic part of |
||
197 | * percpu and arch overrides don't need to worry about it; however, if an |
||
198 | * arch wants to implement an arch-specific percpu accessor or operation, |
||
199 | * it may use __verify_pcpu_ptr() to verify the parameters. |
||
200 | * |
||
201 | * + 0 is required in order to convert the pointer type from a |
||
202 | * potential array type to a pointer to a single item of the array. |
||
203 | */ |
||
204 | #define __verify_pcpu_ptr(ptr) \ |
||
205 | do { \ |
||
206 | const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ |
||
207 | (void)__vpp_verify; \ |
||
208 | } while (0) |
||
209 | |||
210 | #ifdef CONFIG_SMP |
||
211 | |||
212 | /* |
||
213 | * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() |
||
214 | * to prevent the compiler from making incorrect assumptions about the |
||
215 | * pointer value. The weird cast keeps both GCC and sparse happy. |
||
216 | */ |
||
217 | #define SHIFT_PERCPU_PTR(__p, __offset) \ |
||
218 | RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) |
||
219 | |||
220 | #define per_cpu_ptr(ptr, cpu) \ |
||
221 | ({ \ |
||
222 | __verify_pcpu_ptr(ptr); \ |
||
223 | SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ |
||
224 | }) |
||
225 | |||
226 | #define raw_cpu_ptr(ptr) \ |
||
227 | ({ \ |
||
228 | __verify_pcpu_ptr(ptr); \ |
||
229 | arch_raw_cpu_ptr(ptr); \ |
||
230 | }) |
||
231 | |||
232 | #ifdef CONFIG_DEBUG_PREEMPT |
||
233 | #define this_cpu_ptr(ptr) \ |
||
234 | ({ \ |
||
235 | __verify_pcpu_ptr(ptr); \ |
||
236 | SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ |
||
237 | }) |
||
238 | #else |
||
239 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) |
||
240 | #endif |
||
241 | |||
242 | #else /* CONFIG_SMP */ |
||
243 | |||
244 | #define VERIFY_PERCPU_PTR(__p) \ |
||
245 | ({ \ |
||
246 | __verify_pcpu_ptr(__p); \ |
||
247 | (typeof(*(__p)) __kernel __force *)(__p); \ |
||
248 | }) |
||
249 | |||
250 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) |
||
251 | #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) |
||
252 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) |
||
253 | |||
254 | #endif /* CONFIG_SMP */ |
||
255 | |||
256 | #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) |
||
257 | |||
258 | /* |
||
259 | * Must be an lvalue. Since @var must be a simple identifier, |
||
260 | * we force a syntax error here if it isn't. |
||
261 | */ |
||
262 | #define get_cpu_var(var) \ |
||
263 | (*({ \ |
||
264 | preempt_disable(); \ |
||
265 | this_cpu_ptr(&var); \ |
||
266 | })) |
||
267 | |||
268 | /* |
||
269 | * The weird & is necessary because sparse considers (void)(var) to be |
||
270 | * a direct dereference of percpu variable (var). |
||
271 | */ |
||
272 | #define put_cpu_var(var) \ |
||
273 | do { \ |
||
274 | (void)&(var); \ |
||
275 | preempt_enable(); \ |
||
276 | } while (0) |
||
277 | |||
278 | #define get_cpu_ptr(var) \ |
||
279 | ({ \ |
||
280 | preempt_disable(); \ |
||
281 | this_cpu_ptr(var); \ |
||
282 | }) |
||
283 | |||
284 | #define put_cpu_ptr(var) \ |
||
285 | do { \ |
||
286 | (void)(var); \ |
||
287 | preempt_enable(); \ |
||
288 | } while (0) |
||
289 | |||
290 | /* |
||
291 | * Branching function to split up a function into a set of functions that |
||
292 | * are called for different scalar sizes of the objects handled. |
||
293 | */ |
||
294 | |||
295 | extern void __bad_size_call_parameter(void); |
||
296 | |||
297 | #ifdef CONFIG_DEBUG_PREEMPT |
||
298 | extern void __this_cpu_preempt_check(const char *op); |
||
299 | #else |
||
300 | static inline void __this_cpu_preempt_check(const char *op) { } |
||
301 | #endif |
||
302 | |||
303 | #define __pcpu_size_call_return(stem, variable) \ |
||
304 | ({ \ |
||
305 | typeof(variable) pscr_ret__; \ |
||
306 | __verify_pcpu_ptr(&(variable)); \ |
||
307 | switch(sizeof(variable)) { \ |
||
308 | case 1: pscr_ret__ = stem##1(variable); break; \ |
||
309 | case 2: pscr_ret__ = stem##2(variable); break; \ |
||
310 | case 4: pscr_ret__ = stem##4(variable); break; \ |
||
311 | case 8: pscr_ret__ = stem##8(variable); break; \ |
||
312 | default: \ |
||
313 | __bad_size_call_parameter(); break; \ |
||
314 | } \ |
||
315 | pscr_ret__; \ |
||
316 | }) |
||
317 | |||
318 | #define __pcpu_size_call_return2(stem, variable, ...) \ |
||
319 | ({ \ |
||
320 | typeof(variable) pscr2_ret__; \ |
||
321 | __verify_pcpu_ptr(&(variable)); \ |
||
322 | switch(sizeof(variable)) { \ |
||
323 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ |
||
324 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ |
||
325 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ |
||
326 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ |
||
327 | default: \ |
||
328 | __bad_size_call_parameter(); break; \ |
||
329 | } \ |
||
330 | pscr2_ret__; \ |
||
331 | }) |
||
332 | |||
333 | /* |
||
334 | * Special handling for cmpxchg_double. cmpxchg_double is passed two |
||
335 | * percpu variables. The first has to be aligned to a double word |
||
336 | * boundary and the second has to follow directly thereafter. |
||
337 | * We enforce this on all architectures even if they don't support |
||
338 | * a double cmpxchg instruction, since it's a cheap requirement, and it |
||
339 | * avoids breaking the requirement for architectures with the instruction. |
||
340 | */ |
||
341 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ |
||
342 | ({ \ |
||
343 | bool pdcrb_ret__; \ |
||
344 | __verify_pcpu_ptr(&(pcp1)); \ |
||
345 | BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ |
||
346 | VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ |
||
347 | VM_BUG_ON((unsigned long)(&(pcp2)) != \ |
||
348 | (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ |
||
349 | switch(sizeof(pcp1)) { \ |
||
350 | case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ |
||
351 | case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ |
||
352 | case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ |
||
353 | case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ |
||
354 | default: \ |
||
355 | __bad_size_call_parameter(); break; \ |
||
356 | } \ |
||
357 | pdcrb_ret__; \ |
||
358 | }) |
||
359 | |||
360 | #define __pcpu_size_call(stem, variable, ...) \ |
||
361 | do { \ |
||
362 | __verify_pcpu_ptr(&(variable)); \ |
||
363 | switch(sizeof(variable)) { \ |
||
364 | case 1: stem##1(variable, __VA_ARGS__);break; \ |
||
365 | case 2: stem##2(variable, __VA_ARGS__);break; \ |
||
366 | case 4: stem##4(variable, __VA_ARGS__);break; \ |
||
367 | case 8: stem##8(variable, __VA_ARGS__);break; \ |
||
368 | default: \ |
||
369 | __bad_size_call_parameter();break; \ |
||
370 | } \ |
||
371 | } while (0) |
||
372 | |||
373 | /* |
||
374 | * this_cpu operations (C) 2008-2013 Christoph Lameter |
||
375 | * |
||
376 | * Optimized manipulation for memory allocated through the per cpu |
||
377 | * allocator or for addresses of per cpu variables. |
||
378 | * |
||
379 | * These operation guarantee exclusivity of access for other operations |
||
380 | * on the *same* processor. The assumption is that per cpu data is only |
||
381 | * accessed by a single processor instance (the current one). |
||
382 | * |
||
383 | * The arch code can provide optimized implementation by defining macros |
||
384 | * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per |
||
385 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does |
||
386 | * not provide operations for a scalar size then the fallback in the |
||
387 | * generic code will be used. |
||
388 | * |
||
389 | * cmpxchg_double replaces two adjacent scalars at once. The first two |
||
390 | * parameters are per cpu variables which have to be of the same size. A |
||
391 | * truth value is returned to indicate success or failure (since a double |
||
392 | * register result is difficult to handle). There is very limited hardware |
||
393 | * support for these operations, so only certain sizes may work. |
||
394 | */ |
||
395 | |||
396 | /* |
||
397 | * Operations for contexts where we do not want to do any checks for |
||
398 | * preemptions. Unless strictly necessary, always use [__]this_cpu_*() |
||
399 | * instead. |
||
400 | * |
||
401 | * If there is no other protection through preempt disable and/or disabling |
||
402 | * interupts then one of these RMW operations can show unexpected behavior |
||
403 | * because the execution thread was rescheduled on another processor or an |
||
404 | * interrupt occurred and the same percpu variable was modified from the |
||
405 | * interrupt context. |
||
406 | */ |
||
407 | #define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) |
||
408 | #define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) |
||
409 | #define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) |
||
410 | #define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) |
||
411 | #define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) |
||
412 | #define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) |
||
413 | #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) |
||
414 | #define raw_cpu_cmpxchg(pcp, oval, nval) \ |
||
415 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) |
||
416 | #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
||
417 | __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) |
||
418 | |||
419 | #define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) |
||
420 | #define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) |
||
421 | #define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) |
||
422 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) |
||
423 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) |
||
424 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) |
||
425 | |||
426 | /* |
||
427 | * Operations for contexts that are safe from preemption/interrupts. These |
||
428 | * operations verify that preemption is disabled. |
||
429 | */ |
||
430 | #define __this_cpu_read(pcp) \ |
||
431 | ({ \ |
||
432 | __this_cpu_preempt_check("read"); \ |
||
433 | raw_cpu_read(pcp); \ |
||
434 | }) |
||
435 | |||
436 | #define __this_cpu_write(pcp, val) \ |
||
437 | ({ \ |
||
438 | __this_cpu_preempt_check("write"); \ |
||
439 | raw_cpu_write(pcp, val); \ |
||
440 | }) |
||
441 | |||
442 | #define __this_cpu_add(pcp, val) \ |
||
443 | ({ \ |
||
444 | __this_cpu_preempt_check("add"); \ |
||
445 | raw_cpu_add(pcp, val); \ |
||
446 | }) |
||
447 | |||
448 | #define __this_cpu_and(pcp, val) \ |
||
449 | ({ \ |
||
450 | __this_cpu_preempt_check("and"); \ |
||
451 | raw_cpu_and(pcp, val); \ |
||
452 | }) |
||
453 | |||
454 | #define __this_cpu_or(pcp, val) \ |
||
455 | ({ \ |
||
456 | __this_cpu_preempt_check("or"); \ |
||
457 | raw_cpu_or(pcp, val); \ |
||
458 | }) |
||
459 | |||
460 | #define __this_cpu_add_return(pcp, val) \ |
||
461 | ({ \ |
||
462 | __this_cpu_preempt_check("add_return"); \ |
||
463 | raw_cpu_add_return(pcp, val); \ |
||
464 | }) |
||
465 | |||
466 | #define __this_cpu_xchg(pcp, nval) \ |
||
467 | ({ \ |
||
468 | __this_cpu_preempt_check("xchg"); \ |
||
469 | raw_cpu_xchg(pcp, nval); \ |
||
470 | }) |
||
471 | |||
472 | #define __this_cpu_cmpxchg(pcp, oval, nval) \ |
||
473 | ({ \ |
||
474 | __this_cpu_preempt_check("cmpxchg"); \ |
||
475 | raw_cpu_cmpxchg(pcp, oval, nval); \ |
||
476 | }) |
||
477 | |||
478 | #define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
||
479 | ({ __this_cpu_preempt_check("cmpxchg_double"); \ |
||
480 | raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ |
||
481 | }) |
||
482 | |||
483 | #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) |
||
484 | #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) |
||
485 | #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) |
||
486 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
||
487 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) |
||
488 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) |
||
489 | |||
490 | /* |
||
6082 | serge | 491 | * Operations with implied preemption/interrupt protection. These |
492 | * operations can be used without worrying about preemption or interrupt. |
||
5270 | serge | 493 | */ |
494 | #define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) |
||
495 | #define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) |
||
496 | #define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) |
||
497 | #define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) |
||
498 | #define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) |
||
499 | #define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) |
||
500 | #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) |
||
501 | #define this_cpu_cmpxchg(pcp, oval, nval) \ |
||
502 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) |
||
503 | #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
||
504 | __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) |
||
505 | |||
506 | #define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) |
||
507 | #define this_cpu_inc(pcp) this_cpu_add(pcp, 1) |
||
508 | #define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) |
||
509 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
||
510 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) |
||
511 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) |
||
512 | |||
513 | #endif /* __ASSEMBLY__ */ |
||
514 | #endif /* _LINUX_PERCPU_DEFS_H */ |