Rev 6934 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6934 | Rev 7143 | ||
---|---|---|---|
1 | #ifndef _ASM_X86_BITOPS_H |
1 | #ifndef _ASM_X86_BITOPS_H |
2 | #define _ASM_X86_BITOPS_H |
2 | #define _ASM_X86_BITOPS_H |
3 | 3 | ||
4 | /* |
4 | /* |
5 | * Copyright 1992, Linus Torvalds. |
5 | * Copyright 1992, Linus Torvalds. |
6 | * |
6 | * |
7 | * Note: inlines with more than a single statement should be marked |
7 | * Note: inlines with more than a single statement should be marked |
8 | * __always_inline to avoid problems with older gcc's inlining heuristics. |
8 | * __always_inline to avoid problems with older gcc's inlining heuristics. |
9 | */ |
9 | */ |
10 | 10 | ||
11 | #ifndef _LINUX_BITOPS_H |
11 | #ifndef _LINUX_BITOPS_H |
12 | #error only |
12 | #error only |
13 | #endif |
13 | #endif |
14 | 14 | ||
15 | #include |
15 | #include |
16 | #include |
16 | #include |
17 | #include |
17 | #include |
18 | #include |
18 | #include |
19 | 19 | ||
20 | #if BITS_PER_LONG == 32 |
20 | #if BITS_PER_LONG == 32 |
21 | # define _BITOPS_LONG_SHIFT 5 |
21 | # define _BITOPS_LONG_SHIFT 5 |
22 | #elif BITS_PER_LONG == 64 |
22 | #elif BITS_PER_LONG == 64 |
23 | # define _BITOPS_LONG_SHIFT 6 |
23 | # define _BITOPS_LONG_SHIFT 6 |
24 | #else |
24 | #else |
25 | # error "Unexpected BITS_PER_LONG" |
25 | # error "Unexpected BITS_PER_LONG" |
26 | #endif |
26 | #endif |
27 | 27 | ||
28 | #define BIT_64(n) (U64_C(1) << (n)) |
28 | #define BIT_64(n) (U64_C(1) << (n)) |
29 | 29 | ||
30 | /* |
30 | /* |
31 | * These have to be done with inline assembly: that way the bit-setting |
31 | * These have to be done with inline assembly: that way the bit-setting |
32 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
32 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
33 | * was cleared before the operation and != 0 if it was not. |
33 | * was cleared before the operation and != 0 if it was not. |
34 | * |
34 | * |
35 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
35 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
36 | */ |
36 | */ |
37 | 37 | ||
38 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
38 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
39 | /* Technically wrong, but this avoids compilation errors on some gcc |
39 | /* Technically wrong, but this avoids compilation errors on some gcc |
40 | versions. */ |
40 | versions. */ |
41 | #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
41 | #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
42 | #else |
42 | #else |
43 | #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
43 | #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
44 | #endif |
44 | #endif |
45 | 45 | ||
46 | #define ADDR BITOP_ADDR(addr) |
46 | #define ADDR BITOP_ADDR(addr) |
47 | 47 | ||
48 | /* |
48 | /* |
49 | * We do the locked ops that don't return the old value as |
49 | * We do the locked ops that don't return the old value as |
50 | * a mask operation on a byte. |
50 | * a mask operation on a byte. |
51 | */ |
51 | */ |
52 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
52 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
53 | #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) |
53 | #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) |
54 | #define CONST_MASK(nr) (1 << ((nr) & 7)) |
54 | #define CONST_MASK(nr) (1 << ((nr) & 7)) |
55 | 55 | ||
56 | /** |
56 | /** |
57 | * set_bit - Atomically set a bit in memory |
57 | * set_bit - Atomically set a bit in memory |
58 | * @nr: the bit to set |
58 | * @nr: the bit to set |
59 | * @addr: the address to start counting from |
59 | * @addr: the address to start counting from |
60 | * |
60 | * |
61 | * This function is atomic and may not be reordered. See __set_bit() |
61 | * This function is atomic and may not be reordered. See __set_bit() |
62 | * if you do not require the atomic guarantees. |
62 | * if you do not require the atomic guarantees. |
63 | * |
63 | * |
64 | * Note: there are no guarantees that this function will not be reordered |
64 | * Note: there are no guarantees that this function will not be reordered |
65 | * on non x86 architectures, so if you are writing portable code, |
65 | * on non x86 architectures, so if you are writing portable code, |
66 | * make sure not to rely on its reordering guarantees. |
66 | * make sure not to rely on its reordering guarantees. |
67 | * |
67 | * |
68 | * Note that @nr may be almost arbitrarily large; this function is not |
68 | * Note that @nr may be almost arbitrarily large; this function is not |
69 | * restricted to acting on a single-word quantity. |
69 | * restricted to acting on a single-word quantity. |
70 | */ |
70 | */ |
71 | static __always_inline void |
71 | static __always_inline void |
72 | set_bit(long nr, volatile unsigned long *addr) |
72 | set_bit(long nr, volatile unsigned long *addr) |
73 | { |
73 | { |
74 | if (IS_IMMEDIATE(nr)) { |
74 | if (IS_IMMEDIATE(nr)) { |
75 | asm volatile(LOCK_PREFIX "orb %1,%0" |
75 | asm volatile(LOCK_PREFIX "orb %1,%0" |
76 | : CONST_MASK_ADDR(nr, addr) |
76 | : CONST_MASK_ADDR(nr, addr) |
77 | : "iq" ((u8)CONST_MASK(nr)) |
77 | : "iq" ((u8)CONST_MASK(nr)) |
78 | : "memory"); |
78 | : "memory"); |
79 | } else { |
79 | } else { |
80 | asm volatile(LOCK_PREFIX "bts %1,%0" |
80 | asm volatile(LOCK_PREFIX "bts %1,%0" |
81 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
81 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
82 | } |
82 | } |
83 | } |
83 | } |
84 | 84 | ||
85 | /** |
85 | /** |
86 | * __set_bit - Set a bit in memory |
86 | * __set_bit - Set a bit in memory |
87 | * @nr: the bit to set |
87 | * @nr: the bit to set |
88 | * @addr: the address to start counting from |
88 | * @addr: the address to start counting from |
89 | * |
89 | * |
90 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
90 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
91 | * If it's called on the same region of memory simultaneously, the effect |
91 | * If it's called on the same region of memory simultaneously, the effect |
92 | * may be that only one operation succeeds. |
92 | * may be that only one operation succeeds. |
93 | */ |
93 | */ |
94 | static inline void __set_bit(long nr, volatile unsigned long *addr) |
94 | static __always_inline void __set_bit(long nr, volatile unsigned long *addr) |
95 | { |
95 | { |
96 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
96 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
97 | } |
97 | } |
98 | 98 | ||
99 | /** |
99 | /** |
100 | * clear_bit - Clears a bit in memory |
100 | * clear_bit - Clears a bit in memory |
101 | * @nr: Bit to clear |
101 | * @nr: Bit to clear |
102 | * @addr: Address to start counting from |
102 | * @addr: Address to start counting from |
103 | * |
103 | * |
104 | * clear_bit() is atomic and may not be reordered. However, it does |
104 | * clear_bit() is atomic and may not be reordered. However, it does |
105 | * not contain a memory barrier, so if it is used for locking purposes, |
105 | * not contain a memory barrier, so if it is used for locking purposes, |
106 | * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
106 | * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
107 | * in order to ensure changes are visible on other processors. |
107 | * in order to ensure changes are visible on other processors. |
108 | */ |
108 | */ |
109 | static __always_inline void |
109 | static __always_inline void |
110 | clear_bit(long nr, volatile unsigned long *addr) |
110 | clear_bit(long nr, volatile unsigned long *addr) |
111 | { |
111 | { |
112 | if (IS_IMMEDIATE(nr)) { |
112 | if (IS_IMMEDIATE(nr)) { |
113 | asm volatile(LOCK_PREFIX "andb %1,%0" |
113 | asm volatile(LOCK_PREFIX "andb %1,%0" |
114 | : CONST_MASK_ADDR(nr, addr) |
114 | : CONST_MASK_ADDR(nr, addr) |
115 | : "iq" ((u8)~CONST_MASK(nr))); |
115 | : "iq" ((u8)~CONST_MASK(nr))); |
116 | } else { |
116 | } else { |
117 | asm volatile(LOCK_PREFIX "btr %1,%0" |
117 | asm volatile(LOCK_PREFIX "btr %1,%0" |
118 | : BITOP_ADDR(addr) |
118 | : BITOP_ADDR(addr) |
119 | : "Ir" (nr)); |
119 | : "Ir" (nr)); |
120 | } |
120 | } |
121 | } |
121 | } |
122 | 122 | ||
123 | /* |
123 | /* |
124 | * clear_bit_unlock - Clears a bit in memory |
124 | * clear_bit_unlock - Clears a bit in memory |
125 | * @nr: Bit to clear |
125 | * @nr: Bit to clear |
126 | * @addr: Address to start counting from |
126 | * @addr: Address to start counting from |
127 | * |
127 | * |
128 | * clear_bit() is atomic and implies release semantics before the memory |
128 | * clear_bit() is atomic and implies release semantics before the memory |
129 | * operation. It can be used for an unlock. |
129 | * operation. It can be used for an unlock. |
130 | */ |
130 | */ |
131 | static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) |
131 | static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr) |
132 | { |
132 | { |
133 | barrier(); |
133 | barrier(); |
134 | clear_bit(nr, addr); |
134 | clear_bit(nr, addr); |
135 | } |
135 | } |
136 | 136 | ||
137 | static inline void __clear_bit(long nr, volatile unsigned long *addr) |
137 | static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) |
138 | { |
138 | { |
139 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
139 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
140 | } |
140 | } |
141 | 141 | ||
142 | /* |
142 | /* |
143 | * __clear_bit_unlock - Clears a bit in memory |
143 | * __clear_bit_unlock - Clears a bit in memory |
144 | * @nr: Bit to clear |
144 | * @nr: Bit to clear |
145 | * @addr: Address to start counting from |
145 | * @addr: Address to start counting from |
146 | * |
146 | * |
147 | * __clear_bit() is non-atomic and implies release semantics before the memory |
147 | * __clear_bit() is non-atomic and implies release semantics before the memory |
148 | * operation. It can be used for an unlock if no other CPUs can concurrently |
148 | * operation. It can be used for an unlock if no other CPUs can concurrently |
149 | * modify other bits in the word. |
149 | * modify other bits in the word. |
150 | * |
150 | * |
151 | * No memory barrier is required here, because x86 cannot reorder stores past |
151 | * No memory barrier is required here, because x86 cannot reorder stores past |
152 | * older loads. Same principle as spin_unlock. |
152 | * older loads. Same principle as spin_unlock. |
153 | */ |
153 | */ |
154 | static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) |
154 | static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) |
155 | { |
155 | { |
156 | barrier(); |
156 | barrier(); |
157 | __clear_bit(nr, addr); |
157 | __clear_bit(nr, addr); |
158 | } |
158 | } |
159 | 159 | ||
160 | /** |
160 | /** |
161 | * __change_bit - Toggle a bit in memory |
161 | * __change_bit - Toggle a bit in memory |
162 | * @nr: the bit to change |
162 | * @nr: the bit to change |
163 | * @addr: the address to start counting from |
163 | * @addr: the address to start counting from |
164 | * |
164 | * |
165 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
165 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
166 | * If it's called on the same region of memory simultaneously, the effect |
166 | * If it's called on the same region of memory simultaneously, the effect |
167 | * may be that only one operation succeeds. |
167 | * may be that only one operation succeeds. |
168 | */ |
168 | */ |
169 | static inline void __change_bit(long nr, volatile unsigned long *addr) |
169 | static __always_inline void __change_bit(long nr, volatile unsigned long *addr) |
170 | { |
170 | { |
171 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
171 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
172 | } |
172 | } |
173 | 173 | ||
174 | /** |
174 | /** |
175 | * change_bit - Toggle a bit in memory |
175 | * change_bit - Toggle a bit in memory |
176 | * @nr: Bit to change |
176 | * @nr: Bit to change |
177 | * @addr: Address to start counting from |
177 | * @addr: Address to start counting from |
178 | * |
178 | * |
179 | * change_bit() is atomic and may not be reordered. |
179 | * change_bit() is atomic and may not be reordered. |
180 | * Note that @nr may be almost arbitrarily large; this function is not |
180 | * Note that @nr may be almost arbitrarily large; this function is not |
181 | * restricted to acting on a single-word quantity. |
181 | * restricted to acting on a single-word quantity. |
182 | */ |
182 | */ |
183 | static inline void change_bit(long nr, volatile unsigned long *addr) |
183 | static __always_inline void change_bit(long nr, volatile unsigned long *addr) |
184 | { |
184 | { |
185 | if (IS_IMMEDIATE(nr)) { |
185 | if (IS_IMMEDIATE(nr)) { |
186 | asm volatile(LOCK_PREFIX "xorb %1,%0" |
186 | asm volatile(LOCK_PREFIX "xorb %1,%0" |
187 | : CONST_MASK_ADDR(nr, addr) |
187 | : CONST_MASK_ADDR(nr, addr) |
188 | : "iq" ((u8)CONST_MASK(nr))); |
188 | : "iq" ((u8)CONST_MASK(nr))); |
189 | } else { |
189 | } else { |
190 | asm volatile(LOCK_PREFIX "btc %1,%0" |
190 | asm volatile(LOCK_PREFIX "btc %1,%0" |
191 | : BITOP_ADDR(addr) |
191 | : BITOP_ADDR(addr) |
192 | : "Ir" (nr)); |
192 | : "Ir" (nr)); |
193 | } |
193 | } |
194 | } |
194 | } |
195 | 195 | ||
196 | /** |
196 | /** |
197 | * test_and_set_bit - Set a bit and return its old value |
197 | * test_and_set_bit - Set a bit and return its old value |
198 | * @nr: Bit to set |
198 | * @nr: Bit to set |
199 | * @addr: Address to count from |
199 | * @addr: Address to count from |
200 | * |
200 | * |
201 | * This operation is atomic and cannot be reordered. |
201 | * This operation is atomic and cannot be reordered. |
202 | * It also implies a memory barrier. |
202 | * It also implies a memory barrier. |
203 | */ |
203 | */ |
204 | static inline int test_and_set_bit(long nr, volatile unsigned long *addr) |
204 | static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr) |
205 | { |
205 | { |
206 | GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); |
206 | GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); |
207 | } |
207 | } |
208 | 208 | ||
209 | /** |
209 | /** |
210 | * test_and_set_bit_lock - Set a bit and return its old value for lock |
210 | * test_and_set_bit_lock - Set a bit and return its old value for lock |
211 | * @nr: Bit to set |
211 | * @nr: Bit to set |
212 | * @addr: Address to count from |
212 | * @addr: Address to count from |
213 | * |
213 | * |
214 | * This is the same as test_and_set_bit on x86. |
214 | * This is the same as test_and_set_bit on x86. |
215 | */ |
215 | */ |
216 | static __always_inline int |
216 | static __always_inline int |
217 | test_and_set_bit_lock(long nr, volatile unsigned long *addr) |
217 | test_and_set_bit_lock(long nr, volatile unsigned long *addr) |
218 | { |
218 | { |
219 | return test_and_set_bit(nr, addr); |
219 | return test_and_set_bit(nr, addr); |
220 | } |
220 | } |
221 | 221 | ||
222 | /** |
222 | /** |
223 | * __test_and_set_bit - Set a bit and return its old value |
223 | * __test_and_set_bit - Set a bit and return its old value |
224 | * @nr: Bit to set |
224 | * @nr: Bit to set |
225 | * @addr: Address to count from |
225 | * @addr: Address to count from |
226 | * |
226 | * |
227 | * This operation is non-atomic and can be reordered. |
227 | * This operation is non-atomic and can be reordered. |
228 | * If two examples of this operation race, one can appear to succeed |
228 | * If two examples of this operation race, one can appear to succeed |
229 | * but actually fail. You must protect multiple accesses with a lock. |
229 | * but actually fail. You must protect multiple accesses with a lock. |
230 | */ |
230 | */ |
231 | static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) |
231 | static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr) |
232 | { |
232 | { |
233 | int oldbit; |
233 | int oldbit; |
234 | 234 | ||
235 | asm("bts %2,%1\n\t" |
235 | asm("bts %2,%1\n\t" |
236 | "sbb %0,%0" |
236 | "sbb %0,%0" |
237 | : "=r" (oldbit), ADDR |
237 | : "=r" (oldbit), ADDR |
238 | : "Ir" (nr)); |
238 | : "Ir" (nr)); |
239 | return oldbit; |
239 | return oldbit; |
240 | } |
240 | } |
241 | 241 | ||
242 | /** |
242 | /** |
243 | * test_and_clear_bit - Clear a bit and return its old value |
243 | * test_and_clear_bit - Clear a bit and return its old value |
244 | * @nr: Bit to clear |
244 | * @nr: Bit to clear |
245 | * @addr: Address to count from |
245 | * @addr: Address to count from |
246 | * |
246 | * |
247 | * This operation is atomic and cannot be reordered. |
247 | * This operation is atomic and cannot be reordered. |
248 | * It also implies a memory barrier. |
248 | * It also implies a memory barrier. |
249 | */ |
249 | */ |
250 | static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) |
250 | static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr) |
251 | { |
251 | { |
252 | GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); |
252 | GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); |
253 | } |
253 | } |
254 | 254 | ||
255 | /** |
255 | /** |
256 | * __test_and_clear_bit - Clear a bit and return its old value |
256 | * __test_and_clear_bit - Clear a bit and return its old value |
257 | * @nr: Bit to clear |
257 | * @nr: Bit to clear |
258 | * @addr: Address to count from |
258 | * @addr: Address to count from |
259 | * |
259 | * |
260 | * This operation is non-atomic and can be reordered. |
260 | * This operation is non-atomic and can be reordered. |
261 | * If two examples of this operation race, one can appear to succeed |
261 | * If two examples of this operation race, one can appear to succeed |
262 | * but actually fail. You must protect multiple accesses with a lock. |
262 | * but actually fail. You must protect multiple accesses with a lock. |
263 | * |
263 | * |
264 | * Note: the operation is performed atomically with respect to |
264 | * Note: the operation is performed atomically with respect to |
265 | * the local CPU, but not other CPUs. Portable code should not |
265 | * the local CPU, but not other CPUs. Portable code should not |
266 | * rely on this behaviour. |
266 | * rely on this behaviour. |
267 | * KVM relies on this behaviour on x86 for modifying memory that is also |
267 | * KVM relies on this behaviour on x86 for modifying memory that is also |
268 | * accessed from a hypervisor on the same CPU if running in a VM: don't change |
268 | * accessed from a hypervisor on the same CPU if running in a VM: don't change |
269 | * this without also updating arch/x86/kernel/kvm.c |
269 | * this without also updating arch/x86/kernel/kvm.c |
270 | */ |
270 | */ |
271 | static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) |
271 | static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) |
272 | { |
272 | { |
273 | int oldbit; |
273 | int oldbit; |
274 | 274 | ||
275 | asm volatile("btr %2,%1\n\t" |
275 | asm volatile("btr %2,%1\n\t" |
276 | "sbb %0,%0" |
276 | "sbb %0,%0" |
277 | : "=r" (oldbit), ADDR |
277 | : "=r" (oldbit), ADDR |
278 | : "Ir" (nr)); |
278 | : "Ir" (nr)); |
279 | return oldbit; |
279 | return oldbit; |
280 | } |
280 | } |
281 | 281 | ||
282 | /* WARNING: non atomic and it can be reordered! */ |
282 | /* WARNING: non atomic and it can be reordered! */ |
283 | static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) |
283 | static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr) |
284 | { |
284 | { |
285 | int oldbit; |
285 | int oldbit; |
286 | 286 | ||
287 | asm volatile("btc %2,%1\n\t" |
287 | asm volatile("btc %2,%1\n\t" |
288 | "sbb %0,%0" |
288 | "sbb %0,%0" |
289 | : "=r" (oldbit), ADDR |
289 | : "=r" (oldbit), ADDR |
290 | : "Ir" (nr) : "memory"); |
290 | : "Ir" (nr) : "memory"); |
291 | 291 | ||
292 | return oldbit; |
292 | return oldbit; |
293 | } |
293 | } |
294 | 294 | ||
295 | /** |
295 | /** |
296 | * test_and_change_bit - Change a bit and return its old value |
296 | * test_and_change_bit - Change a bit and return its old value |
297 | * @nr: Bit to change |
297 | * @nr: Bit to change |
298 | * @addr: Address to count from |
298 | * @addr: Address to count from |
299 | * |
299 | * |
300 | * This operation is atomic and cannot be reordered. |
300 | * This operation is atomic and cannot be reordered. |
301 | * It also implies a memory barrier. |
301 | * It also implies a memory barrier. |
302 | */ |
302 | */ |
303 | static inline int test_and_change_bit(long nr, volatile unsigned long *addr) |
303 | static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr) |
304 | { |
304 | { |
305 | GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); |
305 | GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); |
306 | } |
306 | } |
307 | 307 | ||
308 | static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) |
308 | static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) |
309 | { |
309 | { |
310 | return ((1UL << (nr & (BITS_PER_LONG-1))) & |
310 | return ((1UL << (nr & (BITS_PER_LONG-1))) & |
311 | (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; |
311 | (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; |
312 | } |
312 | } |
313 | 313 | ||
314 | static inline int variable_test_bit(long nr, volatile const unsigned long *addr) |
314 | static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr) |
315 | { |
315 | { |
316 | int oldbit; |
316 | int oldbit; |
317 | 317 | ||
318 | asm volatile("bt %2,%1\n\t" |
318 | asm volatile("bt %2,%1\n\t" |
319 | "sbb %0,%0" |
319 | "sbb %0,%0" |
320 | : "=r" (oldbit) |
320 | : "=r" (oldbit) |
321 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
321 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
322 | 322 | ||
323 | return oldbit; |
323 | return oldbit; |
324 | } |
324 | } |
325 | 325 | ||
326 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
326 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
327 | /** |
327 | /** |
328 | * test_bit - Determine whether a bit is set |
328 | * test_bit - Determine whether a bit is set |
329 | * @nr: bit number to test |
329 | * @nr: bit number to test |
330 | * @addr: Address to start counting from |
330 | * @addr: Address to start counting from |
331 | */ |
331 | */ |
332 | static int test_bit(int nr, const volatile unsigned long *addr); |
332 | static int test_bit(int nr, const volatile unsigned long *addr); |
333 | #endif |
333 | #endif |
334 | 334 | ||
335 | #define test_bit(nr, addr) \ |
335 | #define test_bit(nr, addr) \ |
336 | (__builtin_constant_p((nr)) \ |
336 | (__builtin_constant_p((nr)) \ |
337 | ? constant_test_bit((nr), (addr)) \ |
337 | ? constant_test_bit((nr), (addr)) \ |
338 | : variable_test_bit((nr), (addr))) |
338 | : variable_test_bit((nr), (addr))) |
339 | 339 | ||
340 | /** |
340 | /** |
341 | * __ffs - find first set bit in word |
341 | * __ffs - find first set bit in word |
342 | * @word: The word to search |
342 | * @word: The word to search |
343 | * |
343 | * |
344 | * Undefined if no bit exists, so code should check against 0 first. |
344 | * Undefined if no bit exists, so code should check against 0 first. |
345 | */ |
345 | */ |
346 | static inline unsigned long __ffs(unsigned long word) |
346 | static __always_inline unsigned long __ffs(unsigned long word) |
347 | { |
347 | { |
348 | asm("rep; bsf %1,%0" |
348 | asm("rep; bsf %1,%0" |
349 | : "=r" (word) |
349 | : "=r" (word) |
350 | : "rm" (word)); |
350 | : "rm" (word)); |
351 | return word; |
351 | return word; |
352 | } |
352 | } |
353 | 353 | ||
354 | /** |
354 | /** |
355 | * ffz - find first zero bit in word |
355 | * ffz - find first zero bit in word |
356 | * @word: The word to search |
356 | * @word: The word to search |
357 | * |
357 | * |
358 | * Undefined if no zero exists, so code should check against ~0UL first. |
358 | * Undefined if no zero exists, so code should check against ~0UL first. |
359 | */ |
359 | */ |
360 | static inline unsigned long ffz(unsigned long word) |
360 | static __always_inline unsigned long ffz(unsigned long word) |
361 | { |
361 | { |
362 | asm("rep; bsf %1,%0" |
362 | asm("rep; bsf %1,%0" |
363 | : "=r" (word) |
363 | : "=r" (word) |
364 | : "r" (~word)); |
364 | : "r" (~word)); |
365 | return word; |
365 | return word; |
366 | } |
366 | } |
367 | 367 | ||
368 | /* |
368 | /* |
369 | * __fls: find last set bit in word |
369 | * __fls: find last set bit in word |
370 | * @word: The word to search |
370 | * @word: The word to search |
371 | * |
371 | * |
372 | * Undefined if no set bit exists, so code should check against 0 first. |
372 | * Undefined if no set bit exists, so code should check against 0 first. |
373 | */ |
373 | */ |
374 | static inline unsigned long __fls(unsigned long word) |
374 | static __always_inline unsigned long __fls(unsigned long word) |
375 | { |
375 | { |
376 | asm("bsr %1,%0" |
376 | asm("bsr %1,%0" |
377 | : "=r" (word) |
377 | : "=r" (word) |
378 | : "rm" (word)); |
378 | : "rm" (word)); |
379 | return word; |
379 | return word; |
380 | } |
380 | } |
381 | 381 | ||
382 | #undef ADDR |
382 | #undef ADDR |
383 | 383 | ||
384 | #ifdef __KERNEL__ |
384 | #ifdef __KERNEL__ |
385 | /** |
385 | /** |
386 | * ffs - find first set bit in word |
386 | * ffs - find first set bit in word |
387 | * @x: the word to search |
387 | * @x: the word to search |
388 | * |
388 | * |
389 | * This is defined the same way as the libc and compiler builtin ffs |
389 | * This is defined the same way as the libc and compiler builtin ffs |
390 | * routines, therefore differs in spirit from the other bitops. |
390 | * routines, therefore differs in spirit from the other bitops. |
391 | * |
391 | * |
392 | * ffs(value) returns 0 if value is 0 or the position of the first |
392 | * ffs(value) returns 0 if value is 0 or the position of the first |
393 | * set bit if value is nonzero. The first (least significant) bit |
393 | * set bit if value is nonzero. The first (least significant) bit |
394 | * is at position 1. |
394 | * is at position 1. |
395 | */ |
395 | */ |
396 | static inline int ffs(int x) |
396 | static __always_inline int ffs(int x) |
397 | { |
397 | { |
398 | int r; |
398 | int r; |
399 | 399 | ||
400 | #ifdef CONFIG_X86_64 |
400 | #ifdef CONFIG_X86_64 |
401 | /* |
401 | /* |
402 | * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the |
402 | * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the |
403 | * dest reg is undefined if x==0, but their CPU architect says its |
403 | * dest reg is undefined if x==0, but their CPU architect says its |
404 | * value is written to set it to the same as before, except that the |
404 | * value is written to set it to the same as before, except that the |
405 | * top 32 bits will be cleared. |
405 | * top 32 bits will be cleared. |
406 | * |
406 | * |
407 | * We cannot do this on 32 bits because at the very least some |
407 | * We cannot do this on 32 bits because at the very least some |
408 | * 486 CPUs did not behave this way. |
408 | * 486 CPUs did not behave this way. |
409 | */ |
409 | */ |
410 | asm("bsfl %1,%0" |
410 | asm("bsfl %1,%0" |
411 | : "=r" (r) |
411 | : "=r" (r) |
412 | : "rm" (x), "0" (-1)); |
412 | : "rm" (x), "0" (-1)); |
413 | #elif defined(CONFIG_X86_CMOV) |
413 | #elif defined(CONFIG_X86_CMOV) |
414 | asm("bsfl %1,%0\n\t" |
414 | asm("bsfl %1,%0\n\t" |
415 | "cmovzl %2,%0" |
415 | "cmovzl %2,%0" |
416 | : "=&r" (r) : "rm" (x), "r" (-1)); |
416 | : "=&r" (r) : "rm" (x), "r" (-1)); |
417 | #else |
417 | #else |
418 | asm("bsfl %1,%0\n\t" |
418 | asm("bsfl %1,%0\n\t" |
419 | "jnz 1f\n\t" |
419 | "jnz 1f\n\t" |
420 | "movl $-1,%0\n" |
420 | "movl $-1,%0\n" |
421 | "1:" : "=r" (r) : "rm" (x)); |
421 | "1:" : "=r" (r) : "rm" (x)); |
422 | #endif |
422 | #endif |
423 | return r + 1; |
423 | return r + 1; |
424 | } |
424 | } |
425 | 425 | ||
426 | /** |
426 | /** |
427 | * fls - find last set bit in word |
427 | * fls - find last set bit in word |
428 | * @x: the word to search |
428 | * @x: the word to search |
429 | * |
429 | * |
430 | * This is defined in a similar way as the libc and compiler builtin |
430 | * This is defined in a similar way as the libc and compiler builtin |
431 | * ffs, but returns the position of the most significant set bit. |
431 | * ffs, but returns the position of the most significant set bit. |
432 | * |
432 | * |
433 | * fls(value) returns 0 if value is 0 or the position of the last |
433 | * fls(value) returns 0 if value is 0 or the position of the last |
434 | * set bit if value is nonzero. The last (most significant) bit is |
434 | * set bit if value is nonzero. The last (most significant) bit is |
435 | * at position 32. |
435 | * at position 32. |
436 | */ |
436 | */ |
437 | static inline int fls(int x) |
437 | static __always_inline int fls(int x) |
438 | { |
438 | { |
439 | int r; |
439 | int r; |
440 | 440 | ||
441 | #ifdef CONFIG_X86_64 |
441 | #ifdef CONFIG_X86_64 |
442 | /* |
442 | /* |
443 | * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the |
443 | * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the |
444 | * dest reg is undefined if x==0, but their CPU architect says its |
444 | * dest reg is undefined if x==0, but their CPU architect says its |
445 | * value is written to set it to the same as before, except that the |
445 | * value is written to set it to the same as before, except that the |
446 | * top 32 bits will be cleared. |
446 | * top 32 bits will be cleared. |
447 | * |
447 | * |
448 | * We cannot do this on 32 bits because at the very least some |
448 | * We cannot do this on 32 bits because at the very least some |
449 | * 486 CPUs did not behave this way. |
449 | * 486 CPUs did not behave this way. |
450 | */ |
450 | */ |
451 | asm("bsrl %1,%0" |
451 | asm("bsrl %1,%0" |
452 | : "=r" (r) |
452 | : "=r" (r) |
453 | : "rm" (x), "0" (-1)); |
453 | : "rm" (x), "0" (-1)); |
454 | #elif defined(CONFIG_X86_CMOV) |
454 | #elif defined(CONFIG_X86_CMOV) |
455 | asm("bsrl %1,%0\n\t" |
455 | asm("bsrl %1,%0\n\t" |
456 | "cmovzl %2,%0" |
456 | "cmovzl %2,%0" |
457 | : "=&r" (r) : "rm" (x), "rm" (-1)); |
457 | : "=&r" (r) : "rm" (x), "rm" (-1)); |
458 | #else |
458 | #else |
459 | asm("bsrl %1,%0\n\t" |
459 | asm("bsrl %1,%0\n\t" |
460 | "jnz 1f\n\t" |
460 | "jnz 1f\n\t" |
461 | "movl $-1,%0\n" |
461 | "movl $-1,%0\n" |
462 | "1:" : "=r" (r) : "rm" (x)); |
462 | "1:" : "=r" (r) : "rm" (x)); |
463 | #endif |
463 | #endif |
464 | return r + 1; |
464 | return r + 1; |
465 | } |
465 | } |
466 | 466 | ||
467 | /** |
467 | /** |
468 | * fls64 - find last set bit in a 64-bit word |
468 | * fls64 - find last set bit in a 64-bit word |
469 | * @x: the word to search |
469 | * @x: the word to search |
470 | * |
470 | * |
471 | * This is defined in a similar way as the libc and compiler builtin |
471 | * This is defined in a similar way as the libc and compiler builtin |
472 | * ffsll, but returns the position of the most significant set bit. |
472 | * ffsll, but returns the position of the most significant set bit. |
473 | * |
473 | * |
474 | * fls64(value) returns 0 if value is 0 or the position of the last |
474 | * fls64(value) returns 0 if value is 0 or the position of the last |
475 | * set bit if value is nonzero. The last (most significant) bit is |
475 | * set bit if value is nonzero. The last (most significant) bit is |
476 | * at position 64. |
476 | * at position 64. |
477 | */ |
477 | */ |
478 | #ifdef CONFIG_X86_64 |
478 | #ifdef CONFIG_X86_64 |
479 | static __always_inline int fls64(__u64 x) |
479 | static __always_inline int fls64(__u64 x) |
480 | { |
480 | { |
481 | int bitpos = -1; |
481 | int bitpos = -1; |
482 | /* |
482 | /* |
483 | * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the |
483 | * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the |
484 | * dest reg is undefined if x==0, but their CPU architect says its |
484 | * dest reg is undefined if x==0, but their CPU architect says its |
485 | * value is written to set it to the same as before. |
485 | * value is written to set it to the same as before. |
486 | */ |
486 | */ |
487 | asm("bsrq %1,%q0" |
487 | asm("bsrq %1,%q0" |
488 | : "+r" (bitpos) |
488 | : "+r" (bitpos) |
489 | : "rm" (x)); |
489 | : "rm" (x)); |
490 | return bitpos + 1; |
490 | return bitpos + 1; |
491 | } |
491 | } |
492 | #else |
492 | #else |
493 | #include |
493 | #include |
494 | #endif |
494 | #endif |
495 | 495 | ||
496 | #include |
496 | #include |
497 | 497 | ||
498 | #include |
498 | #include |
499 | 499 | ||
500 | #include |
500 | #include |
501 | 501 | ||
502 | #include |
502 | #include |
503 | 503 | ||
504 | #include |
504 | #include |
505 | 505 | ||
506 | #include |
506 | #include |
507 | 507 | ||
508 | #endif /* __KERNEL__ */ |
508 | #endif /* __KERNEL__ */ |
509 | #endif /* _ASM_X86_BITOPS_H */><>><>>>><> |
509 | #endif /* _ASM_X86_BITOPS_H */><>><>>>><> |