Subversion Repositories Kolibri OS

Rev

Rev 5056 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5056 Rev 5270
1
#ifndef _LINUX_BITOPS_H
1
#ifndef _LINUX_BITOPS_H
2
#define _LINUX_BITOPS_H
2
#define _LINUX_BITOPS_H
3
#include 
3
#include 
4
 
4
 
5
#ifdef	__KERNEL__
5
#ifdef	__KERNEL__
6
#define BIT(nr)         (1UL << (nr))
6
#define BIT(nr)         (1UL << (nr))
7
#define BIT_ULL(nr)		(1ULL << (nr))
7
#define BIT_ULL(nr)		(1ULL << (nr))
8
#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
8
#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
9
#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
9
#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
10
#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
10
#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
11
#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
11
#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
12
#define BITS_PER_BYTE		8
12
#define BITS_PER_BYTE		8
13
#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
13
#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
14
#endif
14
#endif
15
 
15
 
16
/*
16
/*
17
 * Create a contiguous bitmask starting at bit position @l and ending at
17
 * Create a contiguous bitmask starting at bit position @l and ending at
18
 * position @h. For example
18
 * position @h. For example
19
 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
19
 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
20
 */
20
 */
-
 
21
#define GENMASK(h, l) \
21
#define GENMASK(h, l)		(((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
22
	(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
 
23
 
-
 
24
#define GENMASK_ULL(h, l) \
22
#define GENMASK_ULL(h, l)	(((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
25
	(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
23
 
26
 
24
extern unsigned int __sw_hweight8(unsigned int w);
27
extern unsigned int __sw_hweight8(unsigned int w);
25
extern unsigned int __sw_hweight16(unsigned int w);
28
extern unsigned int __sw_hweight16(unsigned int w);
26
extern unsigned int __sw_hweight32(unsigned int w);
29
extern unsigned int __sw_hweight32(unsigned int w);
27
extern unsigned long __sw_hweight64(__u64 w);
30
extern unsigned long __sw_hweight64(__u64 w);
28
 
31
 
29
/*
32
/*
30
 * Include this here because some architectures need generic_ffs/fls in
33
 * Include this here because some architectures need generic_ffs/fls in
31
 * scope
34
 * scope
32
 */
35
 */
33
#include 
36
#include 
34
 
37
 
35
#define for_each_set_bit(bit, addr, size) \
38
#define for_each_set_bit(bit, addr, size) \
36
	for ((bit) = find_first_bit((addr), (size)); \
39
	for ((bit) = find_first_bit((addr), (size)); \
37
	     (bit) < (size); \
40
	     (bit) < (size); \
38
	     (bit) = find_next_bit((addr), (size), (bit) + 1))
41
	     (bit) = find_next_bit((addr), (size), (bit) + 1))
39
 
42
 
40
/* same as for_each_set_bit() but use bit as value to start with */
43
/* same as for_each_set_bit() but use bit as value to start with */
41
#define for_each_set_bit_from(bit, addr, size) \
44
#define for_each_set_bit_from(bit, addr, size) \
42
	for ((bit) = find_next_bit((addr), (size), (bit));	\
45
	for ((bit) = find_next_bit((addr), (size), (bit));	\
43
	     (bit) < (size);					\
46
	     (bit) < (size);					\
44
	     (bit) = find_next_bit((addr), (size), (bit) + 1))
47
	     (bit) = find_next_bit((addr), (size), (bit) + 1))
45
 
48
 
46
#define for_each_clear_bit(bit, addr, size) \
49
#define for_each_clear_bit(bit, addr, size) \
47
	for ((bit) = find_first_zero_bit((addr), (size));	\
50
	for ((bit) = find_first_zero_bit((addr), (size));	\
48
	     (bit) < (size);					\
51
	     (bit) < (size);					\
49
	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
52
	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
50
 
53
 
51
/* same as for_each_clear_bit() but use bit as value to start with */
54
/* same as for_each_clear_bit() but use bit as value to start with */
52
#define for_each_clear_bit_from(bit, addr, size) \
55
#define for_each_clear_bit_from(bit, addr, size) \
53
	for ((bit) = find_next_zero_bit((addr), (size), (bit));	\
56
	for ((bit) = find_next_zero_bit((addr), (size), (bit));	\
54
	     (bit) < (size);					\
57
	     (bit) < (size);					\
55
	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
58
	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
56
 
59
 
57
static __inline__ int get_bitmask_order(unsigned int count)
60
static __inline__ int get_bitmask_order(unsigned int count)
58
{
61
{
59
	int order;
62
	int order;
60
 
63
 
61
	order = fls(count);
64
	order = fls(count);
62
	return order;	/* We could be slightly more clever with -1 here... */
65
	return order;	/* We could be slightly more clever with -1 here... */
63
}
66
}
64
 
67
 
65
static __inline__ int get_count_order(unsigned int count)
68
static __inline__ int get_count_order(unsigned int count)
66
{
69
{
67
	int order;
70
	int order;
68
 
71
 
69
	order = fls(count) - 1;
72
	order = fls(count) - 1;
70
	if (count & (count - 1))
73
	if (count & (count - 1))
71
		order++;
74
		order++;
72
	return order;
75
	return order;
73
}
76
}
74
 
77
 
75
static inline unsigned long hweight_long(unsigned long w)
78
static inline unsigned long hweight_long(unsigned long w)
76
{
79
{
77
	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
80
	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
78
}
81
}
79
 
82
 
80
/**
83
/**
81
 * rol64 - rotate a 64-bit value left
84
 * rol64 - rotate a 64-bit value left
82
 * @word: value to rotate
85
 * @word: value to rotate
83
 * @shift: bits to roll
86
 * @shift: bits to roll
84
 */
87
 */
85
static inline __u64 rol64(__u64 word, unsigned int shift)
88
static inline __u64 rol64(__u64 word, unsigned int shift)
86
{
89
{
87
	return (word << shift) | (word >> (64 - shift));
90
	return (word << shift) | (word >> (64 - shift));
88
}
91
}
89
 
92
 
90
/**
93
/**
91
 * ror64 - rotate a 64-bit value right
94
 * ror64 - rotate a 64-bit value right
92
 * @word: value to rotate
95
 * @word: value to rotate
93
 * @shift: bits to roll
96
 * @shift: bits to roll
94
 */
97
 */
95
static inline __u64 ror64(__u64 word, unsigned int shift)
98
static inline __u64 ror64(__u64 word, unsigned int shift)
96
{
99
{
97
	return (word >> shift) | (word << (64 - shift));
100
	return (word >> shift) | (word << (64 - shift));
98
}
101
}
99
 
102
 
100
/**
103
/**
101
 * rol32 - rotate a 32-bit value left
104
 * rol32 - rotate a 32-bit value left
102
 * @word: value to rotate
105
 * @word: value to rotate
103
 * @shift: bits to roll
106
 * @shift: bits to roll
104
 */
107
 */
105
static inline __u32 rol32(__u32 word, unsigned int shift)
108
static inline __u32 rol32(__u32 word, unsigned int shift)
106
{
109
{
107
	return (word << shift) | (word >> (32 - shift));
110
	return (word << shift) | (word >> (32 - shift));
108
}
111
}
109
 
112
 
110
/**
113
/**
111
 * ror32 - rotate a 32-bit value right
114
 * ror32 - rotate a 32-bit value right
112
 * @word: value to rotate
115
 * @word: value to rotate
113
 * @shift: bits to roll
116
 * @shift: bits to roll
114
 */
117
 */
115
static inline __u32 ror32(__u32 word, unsigned int shift)
118
static inline __u32 ror32(__u32 word, unsigned int shift)
116
{
119
{
117
	return (word >> shift) | (word << (32 - shift));
120
	return (word >> shift) | (word << (32 - shift));
118
}
121
}
119
 
122
 
120
/**
123
/**
121
 * rol16 - rotate a 16-bit value left
124
 * rol16 - rotate a 16-bit value left
122
 * @word: value to rotate
125
 * @word: value to rotate
123
 * @shift: bits to roll
126
 * @shift: bits to roll
124
 */
127
 */
125
static inline __u16 rol16(__u16 word, unsigned int shift)
128
static inline __u16 rol16(__u16 word, unsigned int shift)
126
{
129
{
127
	return (word << shift) | (word >> (16 - shift));
130
	return (word << shift) | (word >> (16 - shift));
128
}
131
}
129
 
132
 
130
/**
133
/**
131
 * ror16 - rotate a 16-bit value right
134
 * ror16 - rotate a 16-bit value right
132
 * @word: value to rotate
135
 * @word: value to rotate
133
 * @shift: bits to roll
136
 * @shift: bits to roll
134
 */
137
 */
135
static inline __u16 ror16(__u16 word, unsigned int shift)
138
static inline __u16 ror16(__u16 word, unsigned int shift)
136
{
139
{
137
	return (word >> shift) | (word << (16 - shift));
140
	return (word >> shift) | (word << (16 - shift));
138
}
141
}
139
 
142
 
140
/**
143
/**
141
 * rol8 - rotate an 8-bit value left
144
 * rol8 - rotate an 8-bit value left
142
 * @word: value to rotate
145
 * @word: value to rotate
143
 * @shift: bits to roll
146
 * @shift: bits to roll
144
 */
147
 */
145
static inline __u8 rol8(__u8 word, unsigned int shift)
148
static inline __u8 rol8(__u8 word, unsigned int shift)
146
{
149
{
147
	return (word << shift) | (word >> (8 - shift));
150
	return (word << shift) | (word >> (8 - shift));
148
}
151
}
149
 
152
 
150
/**
153
/**
151
 * ror8 - rotate an 8-bit value right
154
 * ror8 - rotate an 8-bit value right
152
 * @word: value to rotate
155
 * @word: value to rotate
153
 * @shift: bits to roll
156
 * @shift: bits to roll
154
 */
157
 */
155
static inline __u8 ror8(__u8 word, unsigned int shift)
158
static inline __u8 ror8(__u8 word, unsigned int shift)
156
{
159
{
157
	return (word >> shift) | (word << (8 - shift));
160
	return (word >> shift) | (word << (8 - shift));
158
}
161
}
159
 
162
 
160
/**
163
/**
161
 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
164
 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
162
 * @value: value to sign extend
165
 * @value: value to sign extend
163
 * @index: 0 based bit index (0<=index<32) to sign bit
166
 * @index: 0 based bit index (0<=index<32) to sign bit
164
 */
167
 */
165
static inline __s32 sign_extend32(__u32 value, int index)
168
static inline __s32 sign_extend32(__u32 value, int index)
166
{
169
{
167
	__u8 shift = 31 - index;
170
	__u8 shift = 31 - index;
168
	return (__s32)(value << shift) >> shift;
171
	return (__s32)(value << shift) >> shift;
169
}
172
}
170
 
173
 
171
static inline unsigned fls_long(unsigned long l)
174
static inline unsigned fls_long(unsigned long l)
172
{
175
{
173
	if (sizeof(l) == 4)
176
	if (sizeof(l) == 4)
174
		return fls(l);
177
		return fls(l);
175
	return fls64(l);
178
	return fls64(l);
176
}
179
}
177
 
180
 
178
/**
181
/**
179
 * __ffs64 - find first set bit in a 64 bit word
182
 * __ffs64 - find first set bit in a 64 bit word
180
 * @word: The 64 bit word
183
 * @word: The 64 bit word
181
 *
184
 *
182
 * On 64 bit arches this is a synomyn for __ffs
185
 * On 64 bit arches this is a synomyn for __ffs
183
 * The result is not defined if no bits are set, so check that @word
186
 * The result is not defined if no bits are set, so check that @word
184
 * is non-zero before calling this.
187
 * is non-zero before calling this.
185
 */
188
 */
186
static inline unsigned long __ffs64(u64 word)
189
static inline unsigned long __ffs64(u64 word)
187
{
190
{
188
#if BITS_PER_LONG == 32
191
#if BITS_PER_LONG == 32
189
	if (((u32)word) == 0UL)
192
	if (((u32)word) == 0UL)
190
		return __ffs((u32)(word >> 32)) + 32;
193
		return __ffs((u32)(word >> 32)) + 32;
191
#elif BITS_PER_LONG != 64
194
#elif BITS_PER_LONG != 64
192
#error BITS_PER_LONG not 32 or 64
195
#error BITS_PER_LONG not 32 or 64
193
#endif
196
#endif
194
	return __ffs((unsigned long)word);
197
	return __ffs((unsigned long)word);
195
}
198
}
196
 
199
 
197
#ifdef __KERNEL__
200
#ifdef __KERNEL__
198
 
201
 
199
#ifndef set_mask_bits
202
#ifndef set_mask_bits
200
#define set_mask_bits(ptr, _mask, _bits)	\
203
#define set_mask_bits(ptr, _mask, _bits)	\
201
({								\
204
({								\
202
	const typeof(*ptr) mask = (_mask), bits = (_bits);	\
205
	const typeof(*ptr) mask = (_mask), bits = (_bits);	\
203
	typeof(*ptr) old, new;					\
206
	typeof(*ptr) old, new;					\
204
								\
207
								\
205
	do {							\
208
	do {							\
206
		old = ACCESS_ONCE(*ptr);			\
209
		old = ACCESS_ONCE(*ptr);			\
207
		new = (old & ~mask) | bits;			\
210
		new = (old & ~mask) | bits;			\
208
	} while (cmpxchg(ptr, old, new) != old);		\
211
	} while (cmpxchg(ptr, old, new) != old);		\
209
								\
212
								\
210
	new;							\
213
	new;							\
211
})
214
})
212
#endif
215
#endif
213
 
216
 
214
#ifndef find_last_bit
217
#ifndef find_last_bit
215
/**
218
/**
216
 * find_last_bit - find the last set bit in a memory region
219
 * find_last_bit - find the last set bit in a memory region
217
 * @addr: The address to start the search at
220
 * @addr: The address to start the search at
218
 * @size: The maximum size to search
221
 * @size: The maximum size to search
219
 *
222
 *
220
 * Returns the bit number of the first set bit, or size.
223
 * Returns the bit number of the first set bit, or size.
221
 */
224
 */
222
extern unsigned long find_last_bit(const unsigned long *addr,
225
extern unsigned long find_last_bit(const unsigned long *addr,
223
				   unsigned long size);
226
				   unsigned long size);
224
#endif
227
#endif
225
 
228
 
226
#endif /* __KERNEL__ */
229
#endif /* __KERNEL__ */
227
#endif
230
#endif