Subversion Repositories Kolibri OS

Rev

Rev 1870 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1870 Rev 3031
Line 1... Line 1...
1
#ifndef _ASM_X86_DIV64_H
1
#ifndef _ASM_X86_DIV64_H
2
#define _ASM_X86_DIV64_H
2
#define _ASM_X86_DIV64_H
3
 
3
 
4
#ifdef CONFIG_X86_32
4
#ifdef CONFIG_X86_32
5
 
5
 
6
#include 
6
#include 
-
 
7
#include 
7
 
8
 
8
/*
9
/*
9
 * do_div() is NOT a C function. It wants to return
10
 * do_div() is NOT a C function. It wants to return
10
 * two values (the quotient and the remainder), but
11
 * two values (the quotient and the remainder), but
11
 * since that doesn't work very well in C, what it
12
 * since that doesn't work very well in C, what it
12
 * does is:
13
 * does is:
13
 *
14
 *
14
 * - modifies the 64-bit dividend _in_place_
15
 * - modifies the 64-bit dividend _in_place_
15
 * - returns the 32-bit remainder
16
 * - returns the 32-bit remainder
16
 *
17
 *
17
 * This ends up being the most efficient "calling
18
 * This ends up being the most efficient "calling
18
 * convention" on x86.
19
 * convention" on x86.
19
 */
20
 */
20
#define do_div(n, base)						\
21
#define do_div(n, base)						\
21
({								\
22
({								\
22
	unsigned long __upper, __low, __high, __mod, __base;	\
23
	unsigned long __upper, __low, __high, __mod, __base;	\
23
	__base = (base);					\
24
	__base = (base);					\
-
 
25
	if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
-
 
26
		__mod = n & (__base - 1);			\
-
 
27
		n >>= ilog2(__base);				\
-
 
28
	} else {						\
24
	asm("":"=a" (__low), "=d" (__high) : "A" (n));		\
29
		asm("" : "=a" (__low), "=d" (__high) : "A" (n));\
25
	__upper = __high;					\
30
		__upper = __high;				\
26
	if (__high) {						\
31
		if (__high) {					\
27
		__upper = __high % (__base);			\
32
			__upper = __high % (__base);		\
28
		__high = __high / (__base);			\
33
			__high = __high / (__base);		\
29
	}							\
34
		}						\
30
	asm("divl %2":"=a" (__low), "=d" (__mod)		\
35
		asm("divl %2" : "=a" (__low), "=d" (__mod)	\
31
	    : "rm" (__base), "0" (__low), "1" (__upper));	\
36
			: "rm" (__base), "0" (__low), "1" (__upper));	\
32
	asm("":"=A" (n) : "a" (__low), "d" (__high));		\
37
		asm("" : "=A" (n) : "a" (__low), "d" (__high));	\
-
 
38
	}							\
33
	__mod;							\
39
	__mod;							\
34
})
40
})
35
 
41
 
36
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
42
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
37
{
43
{
38
	union {
44
	union {
39
		u64 v64;
45
		u64 v64;
40
		u32 v32[2];
46
		u32 v32[2];
41
	} d = { dividend };
47
	} d = { dividend };
42
	u32 upper;
48
	u32 upper;
43
 
49
 
44
	upper = d.v32[1];
50
	upper = d.v32[1];
45
	d.v32[1] = 0;
51
	d.v32[1] = 0;
46
	if (upper >= divisor) {
52
	if (upper >= divisor) {
47
		d.v32[1] = upper / divisor;
53
		d.v32[1] = upper / divisor;
48
		upper %= divisor;
54
		upper %= divisor;
49
	}
55
	}
50
	asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
56
	asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
51
		"rm" (divisor), "0" (d.v32[0]), "1" (upper));
57
		"rm" (divisor), "0" (d.v32[0]), "1" (upper));
52
	return d.v64;
58
	return d.v64;
53
}
59
}
54
#define div_u64_rem	div_u64_rem
60
#define div_u64_rem	div_u64_rem
55
 
61
 
56
#else
62
#else
57
# include 
63
# include 
58
#endif /* CONFIG_X86_32 */
64
#endif /* CONFIG_X86_32 */
59
 
65
 
60
#endif /* _ASM_X86_DIV64_H */
66
#endif /* _ASM_X86_DIV64_H */