Rev 3031 | Rev 4103 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3031 | serge | 1 | #ifndef _LINUX_MATH64_H |
2 | #define _LINUX_MATH64_H |
||
3 | |||
4 | #include |
||
5 | #include |
||
6 | |||
7 | #if BITS_PER_LONG == 64 |
||
8 | |||
9 | #define div64_long(x,y) div64_s64((x),(y)) |
||
4065 | Serge | 10 | #define div64_ul(x, y) div64_u64((x), (y)) |
3031 | serge | 11 | |
12 | /** |
||
13 | * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder |
||
14 | * |
||
15 | * This is commonly provided by 32bit archs to provide an optimized 64bit |
||
16 | * divide. |
||
17 | */ |
||
18 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
||
19 | { |
||
20 | *remainder = dividend % divisor; |
||
21 | return dividend / divisor; |
||
22 | } |
||
23 | |||
24 | /** |
||
25 | * div_s64_rem - signed 64bit divide with 32bit divisor with remainder |
||
26 | */ |
||
27 | static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) |
||
28 | { |
||
29 | *remainder = dividend % divisor; |
||
30 | return dividend / divisor; |
||
31 | } |
||
32 | |||
33 | /** |
||
34 | * div64_u64 - unsigned 64bit divide with 64bit divisor |
||
35 | */ |
||
36 | static inline u64 div64_u64(u64 dividend, u64 divisor) |
||
37 | { |
||
38 | return dividend / divisor; |
||
39 | } |
||
40 | |||
41 | /** |
||
42 | * div64_s64 - signed 64bit divide with 64bit divisor |
||
43 | */ |
||
44 | static inline s64 div64_s64(s64 dividend, s64 divisor) |
||
45 | { |
||
46 | return dividend / divisor; |
||
47 | } |
||
48 | |||
49 | #elif BITS_PER_LONG == 32 |
||
50 | |||
51 | #define div64_long(x,y) div_s64((x),(y)) |
||
4065 | Serge | 52 | #define div64_ul(x, y) div_u64((x), (y)) |
3031 | serge | 53 | |
54 | #ifndef div_u64_rem |
||
55 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
||
56 | { |
||
57 | *remainder = do_div(dividend, divisor); |
||
58 | return dividend; |
||
59 | } |
||
60 | #endif |
||
61 | |||
62 | #ifndef div_s64_rem |
||
63 | extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); |
||
64 | #endif |
||
65 | |||
66 | #ifndef div64_u64 |
||
67 | extern u64 div64_u64(u64 dividend, u64 divisor); |
||
68 | #endif |
||
69 | |||
70 | #ifndef div64_s64 |
||
71 | extern s64 div64_s64(s64 dividend, s64 divisor); |
||
72 | #endif |
||
73 | |||
74 | #endif /* BITS_PER_LONG */ |
||
75 | |||
76 | /** |
||
77 | * div_u64 - unsigned 64bit divide with 32bit divisor |
||
78 | * |
||
79 | * This is the most common 64bit divide and should be used if possible, |
||
80 | * as many 32bit archs can optimize this variant better than a full 64bit |
||
81 | * divide. |
||
82 | */ |
||
83 | #ifndef div_u64 |
||
84 | static inline u64 div_u64(u64 dividend, u32 divisor) |
||
85 | { |
||
86 | u32 remainder; |
||
87 | return div_u64_rem(dividend, divisor, &remainder); |
||
88 | } |
||
89 | #endif |
||
90 | |||
91 | /** |
||
92 | * div_s64 - signed 64bit divide with 32bit divisor |
||
93 | */ |
||
94 | #ifndef div_s64 |
||
95 | static inline s64 div_s64(s64 dividend, s32 divisor) |
||
96 | { |
||
97 | s32 remainder; |
||
98 | return div_s64_rem(dividend, divisor, &remainder); |
||
99 | } |
||
100 | #endif |
||
101 | |||
102 | u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); |
||
103 | |||
104 | static __always_inline u32 |
||
105 | __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) |
||
106 | { |
||
107 | u32 ret = 0; |
||
108 | |||
109 | while (dividend >= divisor) { |
||
110 | /* The following asm() prevents the compiler from |
||
111 | optimising this loop into a modulo operation. */ |
||
112 | asm("" : "+rm"(dividend)); |
||
113 | |||
114 | dividend -= divisor; |
||
115 | ret++; |
||
116 | } |
||
117 | |||
118 | *remainder = dividend; |
||
119 | |||
120 | return ret; |
||
121 | } |
||
122 | |||
123 | #endif /* _LINUX_MATH64_H */ |