Subversion Repositories Kolibri OS

Rev

Rev 5056 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3031 serge 1
#ifndef _LINUX_MATH64_H
2
#define _LINUX_MATH64_H
3
 
4
#include 
5
#include 
6
 
7
#if BITS_PER_LONG == 64
8
 
4538 Serge 9
#define div64_long(x, y) div64_s64((x), (y))
4065 Serge 10
#define div64_ul(x, y)   div64_u64((x), (y))
3031 serge 11
 
12
/**
13
 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
14
 *
15
 * This is commonly provided by 32bit archs to provide an optimized 64bit
16
 * divide.
17
 */
18
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
19
{
20
	*remainder = dividend % divisor;
21
	return dividend / divisor;
22
}
23
 
24
/**
25
 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
26
 */
27
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
28
{
29
	*remainder = dividend % divisor;
30
	return dividend / divisor;
31
}
32
 
33
/**
4103 Serge 34
 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
35
 */
36
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
37
{
38
	*remainder = dividend % divisor;
39
	return dividend / divisor;
40
}
41
 
42
/**
3031 serge 43
 * div64_u64 - unsigned 64bit divide with 64bit divisor
44
 */
45
static inline u64 div64_u64(u64 dividend, u64 divisor)
46
{
47
	return dividend / divisor;
48
}
49
 
50
/**
51
 * div64_s64 - signed 64bit divide with 64bit divisor
52
 */
53
static inline s64 div64_s64(s64 dividend, s64 divisor)
54
{
55
	return dividend / divisor;
56
}
57
 
58
#elif BITS_PER_LONG == 32
59
 
4538 Serge 60
#define div64_long(x, y) div_s64((x), (y))
4065 Serge 61
#define div64_ul(x, y)   div_u64((x), (y))
3031 serge 62
 
63
#ifndef div_u64_rem
64
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
65
{
66
	*remainder = do_div(dividend, divisor);
67
	return dividend;
68
}
69
#endif
70
 
71
#ifndef div_s64_rem
72
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
73
#endif
74
 
4103 Serge 75
#ifndef div64_u64_rem
76
extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
77
#endif
78
 
3031 serge 79
#ifndef div64_u64
80
extern u64 div64_u64(u64 dividend, u64 divisor);
81
#endif
82
 
83
#ifndef div64_s64
84
extern s64 div64_s64(s64 dividend, s64 divisor);
85
#endif
86
 
87
#endif /* BITS_PER_LONG */
88
 
89
/**
90
 * div_u64 - unsigned 64bit divide with 32bit divisor
91
 *
92
 * This is the most common 64bit divide and should be used if possible,
93
 * as many 32bit archs can optimize this variant better than a full 64bit
94
 * divide.
95
 */
96
#ifndef div_u64
97
static inline u64 div_u64(u64 dividend, u32 divisor)
98
{
99
	u32 remainder;
100
	return div_u64_rem(dividend, divisor, &remainder);
101
}
102
#endif
103
 
104
/**
105
 * div_s64 - signed 64bit divide with 32bit divisor
106
 */
107
#ifndef div_s64
108
static inline s64 div_s64(s64 dividend, s32 divisor)
109
{
110
	s32 remainder;
111
	return div_s64_rem(dividend, divisor, &remainder);
112
}
113
#endif
114
 
115
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
116
 
117
static __always_inline u32
118
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
119
{
120
	u32 ret = 0;
121
 
122
	while (dividend >= divisor) {
123
		/* The following asm() prevents the compiler from
124
		   optimising this loop into a modulo operation.  */
125
		asm("" : "+rm"(dividend));
126
 
127
		dividend -= divisor;
128
		ret++;
129
	}
130
 
131
	*remainder = dividend;
132
 
133
	return ret;
134
}
135
 
5056 serge 136
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137
 
138
#ifndef mul_u64_u32_shr
139
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
140
{
141
	return (u64)(((unsigned __int128)a * mul) >> shift);
142
}
143
#endif /* mul_u64_u32_shr */
144
 
6082 serge 145
#ifndef mul_u64_u64_shr
146
static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
147
{
148
	return (u64)(((unsigned __int128)a * mul) >> shift);
149
}
150
#endif /* mul_u64_u64_shr */
151
 
5056 serge 152
#else
153
 
154
#ifndef mul_u64_u32_shr
155
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
156
{
157
	u32 ah, al;
158
	u64 ret;
159
 
160
	al = a;
161
	ah = a >> 32;
162
 
163
	ret = ((u64)al * mul) >> shift;
164
	if (ah)
165
		ret += ((u64)ah * mul) << (32 - shift);
166
 
167
	return ret;
168
}
169
#endif /* mul_u64_u32_shr */
170
 
6082 serge 171
#ifndef mul_u64_u64_shr
172
static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
173
{
174
	union {
175
		u64 ll;
176
		struct {
177
#ifdef __BIG_ENDIAN
178
			u32 high, low;
179
#else
180
			u32 low, high;
5056 serge 181
#endif
6082 serge 182
		} l;
183
	} rl, rm, rn, rh, a0, b0;
184
	u64 c;
5056 serge 185
 
6082 serge 186
	a0.ll = a;
187
	b0.ll = b;
188
 
189
	rl.ll = (u64)a0.l.low * b0.l.low;
190
	rm.ll = (u64)a0.l.low * b0.l.high;
191
	rn.ll = (u64)a0.l.high * b0.l.low;
192
	rh.ll = (u64)a0.l.high * b0.l.high;
193
 
194
	/*
195
	 * Each of these lines computes a 64-bit intermediate result into "c",
196
	 * starting at bits 32-95.  The low 32-bits go into the result of the
197
	 * multiplication, the high 32-bits are carried into the next step.
198
	 */
199
	rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
200
	rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
201
	rh.l.high = (c >> 32) + rh.l.high;
202
 
203
	/*
204
	 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
205
	 * shift it right and throw away the high part of the result.
206
	 */
207
	if (shift == 0)
208
		return rl.ll;
209
	if (shift < 64)
210
		return (rl.ll >> shift) | (rh.ll << (64 - shift));
211
	return rh.ll >> (shift & 63);
212
}
213
#endif /* mul_u64_u64_shr */
214
 
215
#endif
216
 
217
#ifndef mul_u64_u32_div
218
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
219
{
220
	union {
221
		u64 ll;
222
		struct {
223
#ifdef __BIG_ENDIAN
224
			u32 high, low;
225
#else
226
			u32 low, high;
227
#endif
228
		} l;
229
	} u, rl, rh;
230
 
231
	u.ll = a;
232
	rl.ll = (u64)u.l.low * mul;
233
	rh.ll = (u64)u.l.high * mul + rl.l.high;
234
 
235
	/* Bits 32-63 of the result will be in rh.l.low. */
236
	rl.l.high = do_div(rh.ll, divisor);
237
 
238
	/* Bits 0-31 of the result will be in rl.l.low.	*/
239
	do_div(rl.ll, divisor);
240
 
241
	rl.l.high = rh.l.low;
242
	return rl.ll;
243
}
244
#endif /* mul_u64_u32_div */
245
 
3031 serge 246
#endif /* _LINUX_MATH64_H */