Subversion Repositories Kolibri OS

Rev

Rev 6934 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6934 Rev 7143
Line 89... Line 89...
89
 *
89
 *
90
 * Unlike set_bit(), this function is non-atomic and may be reordered.
90
 * Unlike set_bit(), this function is non-atomic and may be reordered.
91
 * If it's called on the same region of memory simultaneously, the effect
91
 * If it's called on the same region of memory simultaneously, the effect
92
 * may be that only one operation succeeds.
92
 * may be that only one operation succeeds.
93
 */
93
 */
94
static inline void __set_bit(long nr, volatile unsigned long *addr)
94
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
95
{
95
{
96
	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
96
	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
97
}
97
}
Line 98... Line 98...
98
 
98
 
Line 126... Line 126...
126
 * @addr: Address to start counting from
126
 * @addr: Address to start counting from
127
 *
127
 *
128
 * clear_bit() is atomic and implies release semantics before the memory
128
 * clear_bit() is atomic and implies release semantics before the memory
129
 * operation. It can be used for an unlock.
129
 * operation. It can be used for an unlock.
130
 */
130
 */
131
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
131
static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132
{
132
{
133
	barrier();
133
	barrier();
134
	clear_bit(nr, addr);
134
	clear_bit(nr, addr);
135
}
135
}
Line 136... Line 136...
136
 
136
 
137
static inline void __clear_bit(long nr, volatile unsigned long *addr)
137
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
138
{
138
{
139
	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
139
	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
Line 140... Line 140...
140
}
140
}
Line 149... Line 149...
149
 * modify other bits in the word.
149
 * modify other bits in the word.
150
 *
150
 *
151
 * No memory barrier is required here, because x86 cannot reorder stores past
151
 * No memory barrier is required here, because x86 cannot reorder stores past
152
 * older loads. Same principle as spin_unlock.
152
 * older loads. Same principle as spin_unlock.
153
 */
153
 */
154
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
154
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
155
{
155
{
156
	barrier();
156
	barrier();
157
	__clear_bit(nr, addr);
157
	__clear_bit(nr, addr);
158
}
158
}
Line 164... Line 164...
164
 *
164
 *
165
 * Unlike change_bit(), this function is non-atomic and may be reordered.
165
 * Unlike change_bit(), this function is non-atomic and may be reordered.
166
 * If it's called on the same region of memory simultaneously, the effect
166
 * If it's called on the same region of memory simultaneously, the effect
167
 * may be that only one operation succeeds.
167
 * may be that only one operation succeeds.
168
 */
168
 */
169
static inline void __change_bit(long nr, volatile unsigned long *addr)
169
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
170
{
170
{
171
	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
171
	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
172
}
172
}
Line 173... Line 173...
173
 
173
 
Line 178... Line 178...
178
 *
178
 *
179
 * change_bit() is atomic and may not be reordered.
179
 * change_bit() is atomic and may not be reordered.
180
 * Note that @nr may be almost arbitrarily large; this function is not
180
 * Note that @nr may be almost arbitrarily large; this function is not
181
 * restricted to acting on a single-word quantity.
181
 * restricted to acting on a single-word quantity.
182
 */
182
 */
183
static inline void change_bit(long nr, volatile unsigned long *addr)
183
static __always_inline void change_bit(long nr, volatile unsigned long *addr)
184
{
184
{
185
	if (IS_IMMEDIATE(nr)) {
185
	if (IS_IMMEDIATE(nr)) {
186
		asm volatile(LOCK_PREFIX "xorb %1,%0"
186
		asm volatile(LOCK_PREFIX "xorb %1,%0"
187
			: CONST_MASK_ADDR(nr, addr)
187
			: CONST_MASK_ADDR(nr, addr)
188
			: "iq" ((u8)CONST_MASK(nr)));
188
			: "iq" ((u8)CONST_MASK(nr)));
Line 199... Line 199...
199
 * @addr: Address to count from
199
 * @addr: Address to count from
200
 *
200
 *
201
 * This operation is atomic and cannot be reordered.
201
 * This operation is atomic and cannot be reordered.
202
 * It also implies a memory barrier.
202
 * It also implies a memory barrier.
203
 */
203
 */
204
static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
204
static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
205
{
205
{
206
	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
206
	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
207
}
207
}
Line 208... Line 208...
208
 
208
 
Line 226... Line 226...
226
 *
226
 *
227
 * This operation is non-atomic and can be reordered.
227
 * This operation is non-atomic and can be reordered.
228
 * If two examples of this operation race, one can appear to succeed
228
 * If two examples of this operation race, one can appear to succeed
229
 * but actually fail.  You must protect multiple accesses with a lock.
229
 * but actually fail.  You must protect multiple accesses with a lock.
230
 */
230
 */
231
static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
231
static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
232
{
232
{
233
	int oldbit;
233
	int oldbit;
Line 234... Line 234...
234
 
234
 
235
	asm("bts %2,%1\n\t"
235
	asm("bts %2,%1\n\t"
Line 245... Line 245...
245
 * @addr: Address to count from
245
 * @addr: Address to count from
246
 *
246
 *
247
 * This operation is atomic and cannot be reordered.
247
 * This operation is atomic and cannot be reordered.
248
 * It also implies a memory barrier.
248
 * It also implies a memory barrier.
249
 */
249
 */
250
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
250
static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
251
{
251
{
252
	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
252
	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
253
}
253
}
Line 254... Line 254...
254
 
254
 
Line 266... Line 266...
266
 * rely on this behaviour.
266
 * rely on this behaviour.
267
 * KVM relies on this behaviour on x86 for modifying memory that is also
267
 * KVM relies on this behaviour on x86 for modifying memory that is also
268
 * accessed from a hypervisor on the same CPU if running in a VM: don't change
268
 * accessed from a hypervisor on the same CPU if running in a VM: don't change
269
 * this without also updating arch/x86/kernel/kvm.c
269
 * this without also updating arch/x86/kernel/kvm.c
270
 */
270
 */
271
static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
271
static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
272
{
272
{
273
	int oldbit;
273
	int oldbit;
Line 274... Line 274...
274
 
274
 
275
	asm volatile("btr %2,%1\n\t"
275
	asm volatile("btr %2,%1\n\t"
Line 278... Line 278...
278
		     : "Ir" (nr));
278
		     : "Ir" (nr));
279
	return oldbit;
279
	return oldbit;
280
}
280
}
Line 281... Line 281...
281
 
281
 
282
/* WARNING: non atomic and it can be reordered! */
282
/* WARNING: non atomic and it can be reordered! */
283
static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
283
static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
284
{
284
{
Line 285... Line 285...
285
	int oldbit;
285
	int oldbit;
286
 
286
 
Line 298... Line 298...
298
 * @addr: Address to count from
298
 * @addr: Address to count from
299
 *
299
 *
300
 * This operation is atomic and cannot be reordered.
300
 * This operation is atomic and cannot be reordered.
301
 * It also implies a memory barrier.
301
 * It also implies a memory barrier.
302
 */
302
 */
303
static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
303
static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
304
{
304
{
305
	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
305
	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
306
}
306
}
Line 307... Line 307...
307
 
307
 
308
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
308
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
309
{
309
{
310
	return ((1UL << (nr & (BITS_PER_LONG-1))) &
310
	return ((1UL << (nr & (BITS_PER_LONG-1))) &
311
		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
311
		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
Line 312... Line 312...
312
}
312
}
313
 
313
 
314
static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
314
static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
Line 315... Line 315...
315
{
315
{
316
	int oldbit;
316
	int oldbit;
Line 341... Line 341...
341
 * __ffs - find first set bit in word
341
 * __ffs - find first set bit in word
342
 * @word: The word to search
342
 * @word: The word to search
343
 *
343
 *
344
 * Undefined if no bit exists, so code should check against 0 first.
344
 * Undefined if no bit exists, so code should check against 0 first.
345
 */
345
 */
346
static inline unsigned long __ffs(unsigned long word)
346
static __always_inline unsigned long __ffs(unsigned long word)
347
{
347
{
348
	asm("rep; bsf %1,%0"
348
	asm("rep; bsf %1,%0"
349
		: "=r" (word)
349
		: "=r" (word)
350
		: "rm" (word));
350
		: "rm" (word));
351
	return word;
351
	return word;
Line 355... Line 355...
355
 * ffz - find first zero bit in word
355
 * ffz - find first zero bit in word
356
 * @word: The word to search
356
 * @word: The word to search
357
 *
357
 *
358
 * Undefined if no zero exists, so code should check against ~0UL first.
358
 * Undefined if no zero exists, so code should check against ~0UL first.
359
 */
359
 */
360
static inline unsigned long ffz(unsigned long word)
360
static __always_inline unsigned long ffz(unsigned long word)
361
{
361
{
362
	asm("rep; bsf %1,%0"
362
	asm("rep; bsf %1,%0"
363
		: "=r" (word)
363
		: "=r" (word)
364
		: "r" (~word));
364
		: "r" (~word));
365
	return word;
365
	return word;
Line 369... Line 369...
369
 * __fls: find last set bit in word
369
 * __fls: find last set bit in word
370
 * @word: The word to search
370
 * @word: The word to search
371
 *
371
 *
372
 * Undefined if no set bit exists, so code should check against 0 first.
372
 * Undefined if no set bit exists, so code should check against 0 first.
373
 */
373
 */
374
static inline unsigned long __fls(unsigned long word)
374
static __always_inline unsigned long __fls(unsigned long word)
375
{
375
{
376
	asm("bsr %1,%0"
376
	asm("bsr %1,%0"
377
	    : "=r" (word)
377
	    : "=r" (word)
378
	    : "rm" (word));
378
	    : "rm" (word));
379
	return word;
379
	return word;
Line 391... Line 391...
391
 *
391
 *
392
 * ffs(value) returns 0 if value is 0 or the position of the first
392
 * ffs(value) returns 0 if value is 0 or the position of the first
393
 * set bit if value is nonzero. The first (least significant) bit
393
 * set bit if value is nonzero. The first (least significant) bit
394
 * is at position 1.
394
 * is at position 1.
395
 */
395
 */
396
static inline int ffs(int x)
396
static __always_inline int ffs(int x)
397
{
397
{
398
	int r;
398
	int r;
Line 399... Line 399...
399
 
399
 
400
#ifdef CONFIG_X86_64
400
#ifdef CONFIG_X86_64
Line 432... Line 432...
432
 *
432
 *
433
 * fls(value) returns 0 if value is 0 or the position of the last
433
 * fls(value) returns 0 if value is 0 or the position of the last
434
 * set bit if value is nonzero. The last (most significant) bit is
434
 * set bit if value is nonzero. The last (most significant) bit is
435
 * at position 32.
435
 * at position 32.
436
 */
436
 */
437
static inline int fls(int x)
437
static __always_inline int fls(int x)
438
{
438
{
439
	int r;
439
	int r;
Line 440... Line 440...
440
 
440
 
441
#ifdef CONFIG_X86_64
441
#ifdef CONFIG_X86_64