Rev 6082 | Rev 6293 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6082 | Rev 6102 | ||
---|---|---|---|
1 | #ifndef _LINUX_KERNEL_H |
1 | #ifndef _LINUX_KERNEL_H |
2 | #define _LINUX_KERNEL_H |
2 | #define _LINUX_KERNEL_H |
3 | 3 | ||
4 | 4 | ||
5 | #include |
5 | #include |
6 | #include |
6 | #include |
7 | #include |
7 | #include |
8 | #include |
8 | #include |
9 | #include |
9 | #include |
10 | #include |
10 | #include |
11 | #include |
11 | #include |
12 | #include |
12 | #include |
13 | #include |
13 | #include |
14 | #include |
14 | #include |
15 | #include |
15 | #include |
16 | 16 | ||
17 | #define USHRT_MAX ((u16)(~0U)) |
17 | #define USHRT_MAX ((u16)(~0U)) |
18 | #define SHRT_MAX ((s16)(USHRT_MAX>>1)) |
18 | #define SHRT_MAX ((s16)(USHRT_MAX>>1)) |
19 | #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) |
19 | #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) |
20 | #define INT_MAX ((int)(~0U>>1)) |
20 | #define INT_MAX ((int)(~0U>>1)) |
21 | #define INT_MIN (-INT_MAX - 1) |
21 | #define INT_MIN (-INT_MAX - 1) |
22 | #define UINT_MAX (~0U) |
22 | #define UINT_MAX (~0U) |
23 | #define LONG_MAX ((long)(~0UL>>1)) |
23 | #define LONG_MAX ((long)(~0UL>>1)) |
24 | #define LONG_MIN (-LONG_MAX - 1) |
24 | #define LONG_MIN (-LONG_MAX - 1) |
25 | #define ULONG_MAX (~0UL) |
25 | #define ULONG_MAX (~0UL) |
26 | #define LLONG_MAX ((long long)(~0ULL>>1)) |
26 | #define LLONG_MAX ((long long)(~0ULL>>1)) |
27 | #define LLONG_MIN (-LLONG_MAX - 1) |
27 | #define LLONG_MIN (-LLONG_MAX - 1) |
28 | #define ULLONG_MAX (~0ULL) |
28 | #define ULLONG_MAX (~0ULL) |
29 | #define SIZE_MAX (~(size_t)0) |
29 | #define SIZE_MAX (~(size_t)0) |
30 | 30 | ||
31 | #define U8_MAX ((u8)~0U) |
31 | #define U8_MAX ((u8)~0U) |
32 | #define S8_MAX ((s8)(U8_MAX>>1)) |
32 | #define S8_MAX ((s8)(U8_MAX>>1)) |
33 | #define S8_MIN ((s8)(-S8_MAX - 1)) |
33 | #define S8_MIN ((s8)(-S8_MAX - 1)) |
34 | #define U16_MAX ((u16)~0U) |
34 | #define U16_MAX ((u16)~0U) |
35 | #define S16_MAX ((s16)(U16_MAX>>1)) |
35 | #define S16_MAX ((s16)(U16_MAX>>1)) |
36 | #define S16_MIN ((s16)(-S16_MAX - 1)) |
36 | #define S16_MIN ((s16)(-S16_MAX - 1)) |
37 | #define U32_MAX ((u32)~0U) |
37 | #define U32_MAX ((u32)~0U) |
38 | #define S32_MAX ((s32)(U32_MAX>>1)) |
38 | #define S32_MAX ((s32)(U32_MAX>>1)) |
39 | #define S32_MIN ((s32)(-S32_MAX - 1)) |
39 | #define S32_MIN ((s32)(-S32_MAX - 1)) |
40 | #define U64_MAX ((u64)~0ULL) |
40 | #define U64_MAX ((u64)~0ULL) |
41 | #define S64_MAX ((s64)(U64_MAX>>1)) |
41 | #define S64_MAX ((s64)(U64_MAX>>1)) |
42 | #define S64_MIN ((s64)(-S64_MAX - 1)) |
42 | #define S64_MIN ((s64)(-S64_MAX - 1)) |
43 | 43 | ||
44 | #define STACK_MAGIC 0xdeadbeef |
44 | #define STACK_MAGIC 0xdeadbeef |
45 | 45 | ||
46 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
46 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
47 | 47 | ||
48 | #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) |
48 | #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) |
49 | #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) |
49 | #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) |
50 | #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) |
50 | #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) |
51 | #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) |
51 | #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) |
52 | 52 | ||
53 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
53 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
54 | 54 | ||
55 | /* |
55 | /* |
56 | * This looks more complex than it should be. But we need to |
56 | * This looks more complex than it should be. But we need to |
57 | * get the type for the ~ right in round_down (it needs to be |
57 | * get the type for the ~ right in round_down (it needs to be |
58 | * as wide as the result!), and we want to evaluate the macro |
58 | * as wide as the result!), and we want to evaluate the macro |
59 | * arguments just once each. |
59 | * arguments just once each. |
60 | */ |
60 | */ |
61 | #define __round_mask(x, y) ((__typeof__(x))((y)-1)) |
61 | #define __round_mask(x, y) ((__typeof__(x))((y)-1)) |
62 | #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) |
62 | #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) |
63 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) |
63 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) |
64 | 64 | ||
65 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
65 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
66 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
66 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
67 | #define DIV_ROUND_UP_ULL(ll,d) \ |
67 | #define DIV_ROUND_UP_ULL(ll,d) \ |
68 | ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; }) |
68 | ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; }) |
69 | 69 | ||
70 | #if BITS_PER_LONG == 32 |
70 | #if BITS_PER_LONG == 32 |
71 | # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) |
71 | # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) |
72 | #else |
72 | #else |
73 | # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) |
73 | # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) |
74 | #endif |
74 | #endif |
75 | 75 | ||
76 | /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */ |
76 | /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */ |
77 | #define roundup(x, y) ( \ |
77 | #define roundup(x, y) ( \ |
78 | { \ |
78 | { \ |
79 | const typeof(y) __y = y; \ |
79 | const typeof(y) __y = y; \ |
80 | (((x) + (__y - 1)) / __y) * __y; \ |
80 | (((x) + (__y - 1)) / __y) * __y; \ |
81 | } \ |
81 | } \ |
82 | ) |
82 | ) |
83 | #define rounddown(x, y) ( \ |
83 | #define rounddown(x, y) ( \ |
84 | { \ |
84 | { \ |
85 | typeof(x) __x = (x); \ |
85 | typeof(x) __x = (x); \ |
86 | __x - (__x % (y)); \ |
86 | __x - (__x % (y)); \ |
87 | } \ |
87 | } \ |
88 | ) |
88 | ) |
89 | 89 | ||
90 | /* |
90 | /* |
91 | * Divide positive or negative dividend by positive divisor and round |
91 | * Divide positive or negative dividend by positive divisor and round |
92 | * to closest integer. Result is undefined for negative divisors and |
92 | * to closest integer. Result is undefined for negative divisors and |
93 | * for negative dividends if the divisor variable type is unsigned. |
93 | * for negative dividends if the divisor variable type is unsigned. |
94 | */ |
94 | */ |
95 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
95 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
96 | { \ |
96 | { \ |
97 | typeof(x) __x = x; \ |
97 | typeof(x) __x = x; \ |
98 | typeof(divisor) __d = divisor; \ |
98 | typeof(divisor) __d = divisor; \ |
99 | (((typeof(x))-1) > 0 || \ |
99 | (((typeof(x))-1) > 0 || \ |
100 | ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ |
100 | ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ |
101 | (((__x) + ((__d) / 2)) / (__d)) : \ |
101 | (((__x) + ((__d) / 2)) / (__d)) : \ |
102 | (((__x) - ((__d) / 2)) / (__d)); \ |
102 | (((__x) - ((__d) / 2)) / (__d)); \ |
103 | } \ |
103 | } \ |
104 | ) |
104 | ) |
105 | /* |
105 | /* |
106 | * Same as above but for u64 dividends. divisor must be a 32-bit |
106 | * Same as above but for u64 dividends. divisor must be a 32-bit |
107 | * number. |
107 | * number. |
108 | */ |
108 | */ |
109 | #define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ |
109 | #define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ |
110 | { \ |
110 | { \ |
111 | typeof(divisor) __d = divisor; \ |
111 | typeof(divisor) __d = divisor; \ |
112 | unsigned long long _tmp = (x) + (__d) / 2; \ |
112 | unsigned long long _tmp = (x) + (__d) / 2; \ |
113 | do_div(_tmp, __d); \ |
113 | do_div(_tmp, __d); \ |
114 | _tmp; \ |
114 | _tmp; \ |
115 | } \ |
115 | } \ |
116 | ) |
116 | ) |
117 | 117 | ||
118 | /* |
118 | /* |
119 | * Multiplies an integer by a fraction, while avoiding unnecessary |
119 | * Multiplies an integer by a fraction, while avoiding unnecessary |
120 | * overflow or loss of precision. |
120 | * overflow or loss of precision. |
121 | */ |
121 | */ |
122 | #define mult_frac(x, numer, denom)( \ |
122 | #define mult_frac(x, numer, denom)( \ |
123 | { \ |
123 | { \ |
124 | typeof(x) quot = (x) / (denom); \ |
124 | typeof(x) quot = (x) / (denom); \ |
125 | typeof(x) rem = (x) % (denom); \ |
125 | typeof(x) rem = (x) % (denom); \ |
126 | (quot * (numer)) + ((rem * (numer)) / (denom)); \ |
126 | (quot * (numer)) + ((rem * (numer)) / (denom)); \ |
127 | } \ |
127 | } \ |
128 | ) |
128 | ) |
129 | 129 | ||
130 | 130 | ||
131 | #define _RET_IP_ (unsigned long)__builtin_return_address(0) |
131 | #define _RET_IP_ (unsigned long)__builtin_return_address(0) |
132 | #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) |
132 | #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) |
133 | 133 | ||
134 | #ifdef CONFIG_LBDAF |
134 | #ifdef CONFIG_LBDAF |
135 | # include |
135 | # include |
136 | # define sector_div(a, b) do_div(a, b) |
136 | # define sector_div(a, b) do_div(a, b) |
137 | #else |
137 | #else |
138 | # define sector_div(n, b)( \ |
138 | # define sector_div(n, b)( \ |
139 | { \ |
139 | { \ |
140 | int _res; \ |
140 | int _res; \ |
141 | _res = (n) % (b); \ |
141 | _res = (n) % (b); \ |
142 | (n) /= (b); \ |
142 | (n) /= (b); \ |
143 | _res; \ |
143 | _res; \ |
144 | } \ |
144 | } \ |
145 | ) |
145 | ) |
146 | #endif |
146 | #endif |
147 | 147 | ||
148 | /** |
148 | /** |
149 | * upper_32_bits - return bits 32-63 of a number |
149 | * upper_32_bits - return bits 32-63 of a number |
150 | * @n: the number we're accessing |
150 | * @n: the number we're accessing |
151 | * |
151 | * |
152 | * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress |
152 | * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress |
153 | * the "right shift count >= width of type" warning when that quantity is |
153 | * the "right shift count >= width of type" warning when that quantity is |
154 | * 32-bits. |
154 | * 32-bits. |
155 | */ |
155 | */ |
156 | #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) |
156 | #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) |
157 | 157 | ||
158 | /** |
158 | /** |
159 | * lower_32_bits - return bits 0-31 of a number |
159 | * lower_32_bits - return bits 0-31 of a number |
160 | * @n: the number we're accessing |
160 | * @n: the number we're accessing |
161 | */ |
161 | */ |
162 | #define lower_32_bits(n) ((u32)(n)) |
162 | #define lower_32_bits(n) ((u32)(n)) |
- | 163 | ||
- | 164 | struct completion; |
|
- | 165 | struct pt_regs; |
|
- | 166 | struct user; |
|
163 | 167 | ||
164 | #ifdef CONFIG_PREEMPT_VOLUNTARY |
168 | #ifdef CONFIG_PREEMPT_VOLUNTARY |
165 | extern int _cond_resched(void); |
169 | extern int _cond_resched(void); |
166 | # define might_resched() _cond_resched() |
170 | # define might_resched() _cond_resched() |
167 | #else |
171 | #else |
168 | # define might_resched() do { } while (0) |
172 | # define might_resched() do { } while (0) |
169 | #endif |
173 | #endif |
170 | 174 | ||
171 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
175 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
172 | void ___might_sleep(const char *file, int line, int preempt_offset); |
176 | void ___might_sleep(const char *file, int line, int preempt_offset); |
173 | void __might_sleep(const char *file, int line, int preempt_offset); |
177 | void __might_sleep(const char *file, int line, int preempt_offset); |
174 | /** |
178 | /** |
175 | * might_sleep - annotation for functions that can sleep |
179 | * might_sleep - annotation for functions that can sleep |
176 | * |
180 | * |
177 | * this macro will print a stack trace if it is executed in an atomic |
181 | * this macro will print a stack trace if it is executed in an atomic |
178 | * context (spinlock, irq-handler, ...). |
182 | * context (spinlock, irq-handler, ...). |
179 | * |
183 | * |
180 | * This is a useful debugging help to be able to catch problems early and not |
184 | * This is a useful debugging help to be able to catch problems early and not |
181 | * be bitten later when the calling function happens to sleep when it is not |
185 | * be bitten later when the calling function happens to sleep when it is not |
182 | * supposed to. |
186 | * supposed to. |
183 | */ |
187 | */ |
184 | # define might_sleep() \ |
188 | # define might_sleep() \ |
185 | do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) |
189 | do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) |
186 | # define sched_annotate_sleep() (current->task_state_change = 0) |
190 | # define sched_annotate_sleep() (current->task_state_change = 0) |
187 | #else |
191 | #else |
188 | static inline void ___might_sleep(const char *file, int line, |
192 | static inline void ___might_sleep(const char *file, int line, |
189 | int preempt_offset) { } |
193 | int preempt_offset) { } |
190 | static inline void __might_sleep(const char *file, int line, |
194 | static inline void __might_sleep(const char *file, int line, |
191 | int preempt_offset) { } |
195 | int preempt_offset) { } |
192 | # define might_sleep() do { might_resched(); } while (0) |
196 | # define might_sleep() do { might_resched(); } while (0) |
193 | # define sched_annotate_sleep() do { } while (0) |
197 | # define sched_annotate_sleep() do { } while (0) |
194 | #endif |
198 | #endif |
195 | 199 | ||
196 | #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) |
200 | #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) |
197 | 201 | ||
198 | /** |
202 | /** |
199 | * abs - return absolute value of an argument |
203 | * abs - return absolute value of an argument |
200 | * @x: the value. If it is unsigned type, it is converted to signed type first |
204 | * @x: the value. If it is unsigned type, it is converted to signed type first |
201 | * (s64, long or int depending on its size). |
205 | * (s64, long or int depending on its size). |
202 | * |
206 | * |
203 | * Return: an absolute value of x. If x is 64-bit, macro's return type is s64, |
207 | * Return: an absolute value of x. If x is 64-bit, macro's return type is s64, |
204 | * otherwise it is signed long. |
208 | * otherwise it is signed long. |
205 | */ |
209 | */ |
206 | #define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \ |
210 | #define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \ |
207 | s64 __x = (x); \ |
211 | s64 __x = (x); \ |
208 | (__x < 0) ? -__x : __x; \ |
212 | (__x < 0) ? -__x : __x; \ |
209 | }), ({ \ |
213 | }), ({ \ |
210 | long ret; \ |
214 | long ret; \ |
211 | if (sizeof(x) == sizeof(long)) { \ |
215 | if (sizeof(x) == sizeof(long)) { \ |
212 | long __x = (x); \ |
216 | long __x = (x); \ |
213 | ret = (__x < 0) ? -__x : __x; \ |
217 | ret = (__x < 0) ? -__x : __x; \ |
214 | } else { \ |
218 | } else { \ |
215 | int __x = (x); \ |
219 | int __x = (x); \ |
216 | ret = (__x < 0) ? -__x : __x; \ |
220 | ret = (__x < 0) ? -__x : __x; \ |
217 | } \ |
221 | } \ |
218 | ret; \ |
222 | ret; \ |
219 | })) |
223 | })) |
220 | 224 | ||
221 | /** |
225 | /** |
222 | * reciprocal_scale - "scale" a value into range [0, ep_ro) |
226 | * reciprocal_scale - "scale" a value into range [0, ep_ro) |
223 | * @val: value |
227 | * @val: value |
224 | * @ep_ro: right open interval endpoint |
228 | * @ep_ro: right open interval endpoint |
225 | * |
229 | * |
226 | * Perform a "reciprocal multiplication" in order to "scale" a value into |
230 | * Perform a "reciprocal multiplication" in order to "scale" a value into |
227 | * range [0, ep_ro), where the upper interval endpoint is right-open. |
231 | * range [0, ep_ro), where the upper interval endpoint is right-open. |
228 | * This is useful, e.g. for accessing a index of an array containing |
232 | * This is useful, e.g. for accessing a index of an array containing |
229 | * ep_ro elements, for example. Think of it as sort of modulus, only that |
233 | * ep_ro elements, for example. Think of it as sort of modulus, only that |
230 | * the result isn't that of modulo. ;) Note that if initial input is a |
234 | * the result isn't that of modulo. ;) Note that if initial input is a |
231 | * small value, then result will return 0. |
235 | * small value, then result will return 0. |
232 | * |
236 | * |
233 | * Return: a result based on val in interval [0, ep_ro). |
237 | * Return: a result based on val in interval [0, ep_ro). |
234 | */ |
238 | */ |
235 | static inline u32 reciprocal_scale(u32 val, u32 ep_ro) |
239 | static inline u32 reciprocal_scale(u32 val, u32 ep_ro) |
236 | { |
240 | { |
237 | return (u32)(((u64) val * ep_ro) >> 32); |
241 | return (u32)(((u64) val * ep_ro) >> 32); |
238 | } |
242 | } |
239 | 243 | ||
240 | #if defined(CONFIG_MMU) && \ |
244 | #if defined(CONFIG_MMU) && \ |
241 | (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) |
245 | (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) |
242 | #define might_fault() __might_fault(__FILE__, __LINE__) |
246 | #define might_fault() __might_fault(__FILE__, __LINE__) |
243 | void __might_fault(const char *file, int line); |
247 | void __might_fault(const char *file, int line); |
244 | #else |
248 | #else |
245 | static inline void might_fault(void) { } |
249 | static inline void might_fault(void) { } |
246 | #endif |
250 | #endif |
247 | 251 | ||
248 | #define KERN_EMERG "<0>" /* system is unusable */ |
252 | #define KERN_EMERG "<0>" /* system is unusable */ |
249 | #define KERN_ALERT "<1>" /* action must be taken immediately */ |
253 | #define KERN_ALERT "<1>" /* action must be taken immediately */ |
250 | #define KERN_CRIT "<2>" /* critical conditions */ |
254 | #define KERN_CRIT "<2>" /* critical conditions */ |
251 | #define KERN_ERR "<3>" /* error conditions */ |
255 | #define KERN_ERR "<3>" /* error conditions */ |
252 | #define KERN_WARNING "<4>" /* warning conditions */ |
256 | #define KERN_WARNING "<4>" /* warning conditions */ |
253 | #define KERN_NOTICE "<5>" /* normal but significant condition */ |
257 | #define KERN_NOTICE "<5>" /* normal but significant condition */ |
254 | #define KERN_INFO "<6>" /* informational */ |
258 | #define KERN_INFO "<6>" /* informational */ |
255 | #define KERN_DEBUG "<7>" /* debug-level messages */ |
259 | #define KERN_DEBUG "<7>" /* debug-level messages */ |
256 | extern unsigned long simple_strtoul(const char *,char **,unsigned int); |
260 | extern unsigned long simple_strtoul(const char *,char **,unsigned int); |
257 | extern long simple_strtol(const char *,char **,unsigned int); |
261 | extern long simple_strtol(const char *,char **,unsigned int); |
258 | extern unsigned long long simple_strtoull(const char *,char **,unsigned int); |
262 | extern unsigned long long simple_strtoull(const char *,char **,unsigned int); |
259 | extern long long simple_strtoll(const char *,char **,unsigned int); |
263 | extern long long simple_strtoll(const char *,char **,unsigned int); |
260 | 264 | ||
261 | extern int num_to_str(char *buf, int size, unsigned long long num); |
265 | extern int num_to_str(char *buf, int size, unsigned long long num); |
262 | 266 | ||
263 | /* lib/printf utilities */ |
267 | /* lib/printf utilities */ |
264 | 268 | ||
265 | extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); |
269 | extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); |
266 | extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list); |
270 | extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list); |
267 | extern __printf(3, 4) |
271 | extern __printf(3, 4) |
268 | int snprintf(char *buf, size_t size, const char *fmt, ...); |
272 | int snprintf(char *buf, size_t size, const char *fmt, ...); |
269 | extern __printf(3, 0) |
273 | extern __printf(3, 0) |
270 | int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); |
274 | int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); |
271 | extern __printf(3, 4) |
275 | extern __printf(3, 4) |
272 | int scnprintf(char *buf, size_t size, const char *fmt, ...); |
276 | int scnprintf(char *buf, size_t size, const char *fmt, ...); |
273 | extern __printf(3, 0) |
277 | extern __printf(3, 0) |
274 | int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); |
278 | int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); |
275 | extern __printf(2, 3) |
279 | extern __printf(2, 3) |
276 | char *kasprintf(gfp_t gfp, const char *fmt, ...); |
280 | char *kasprintf(gfp_t gfp, const char *fmt, ...); |
277 | extern __printf(2, 0) |
281 | extern __printf(2, 0) |
278 | char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); |
282 | char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); |
279 | extern __printf(2, 0) |
283 | extern __printf(2, 0) |
280 | const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); |
284 | const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); |
281 | 285 | ||
282 | extern __scanf(2, 3) |
286 | extern __scanf(2, 3) |
283 | int sscanf(const char *, const char *, ...); |
287 | int sscanf(const char *, const char *, ...); |
284 | extern __scanf(2, 0) |
288 | extern __scanf(2, 0) |
285 | int vsscanf(const char *, const char *, va_list); |
289 | int vsscanf(const char *, const char *, va_list); |
286 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ |
290 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ |
287 | enum lockdep_ok { |
291 | enum lockdep_ok { |
288 | LOCKDEP_STILL_OK, |
292 | LOCKDEP_STILL_OK, |
289 | LOCKDEP_NOW_UNRELIABLE |
293 | LOCKDEP_NOW_UNRELIABLE |
290 | }; |
294 | }; |
291 | extern void add_taint(unsigned flag, enum lockdep_ok); |
295 | extern void add_taint(unsigned flag, enum lockdep_ok); |
292 | extern int test_taint(unsigned flag); |
296 | extern int test_taint(unsigned flag); |
293 | extern unsigned long get_taint(void); |
297 | extern unsigned long get_taint(void); |
294 | extern int root_mountflags; |
298 | extern int root_mountflags; |
295 | 299 | ||
296 | extern bool early_boot_irqs_disabled; |
300 | extern bool early_boot_irqs_disabled; |
297 | 301 | ||
298 | /* Values used for system_state */ |
302 | /* Values used for system_state */ |
299 | extern enum system_states { |
303 | extern enum system_states { |
300 | SYSTEM_BOOTING, |
304 | SYSTEM_BOOTING, |
301 | SYSTEM_RUNNING, |
305 | SYSTEM_RUNNING, |
302 | SYSTEM_HALT, |
306 | SYSTEM_HALT, |
303 | SYSTEM_POWER_OFF, |
307 | SYSTEM_POWER_OFF, |
304 | SYSTEM_RESTART, |
308 | SYSTEM_RESTART, |
305 | } system_state; |
309 | } system_state; |
306 | 310 | ||
307 | #define TAINT_PROPRIETARY_MODULE 0 |
311 | #define TAINT_PROPRIETARY_MODULE 0 |
308 | #define TAINT_FORCED_MODULE 1 |
312 | #define TAINT_FORCED_MODULE 1 |
309 | #define TAINT_CPU_OUT_OF_SPEC 2 |
313 | #define TAINT_CPU_OUT_OF_SPEC 2 |
310 | #define TAINT_FORCED_RMMOD 3 |
314 | #define TAINT_FORCED_RMMOD 3 |
311 | #define TAINT_MACHINE_CHECK 4 |
315 | #define TAINT_MACHINE_CHECK 4 |
312 | #define TAINT_BAD_PAGE 5 |
316 | #define TAINT_BAD_PAGE 5 |
313 | #define TAINT_USER 6 |
317 | #define TAINT_USER 6 |
314 | #define TAINT_DIE 7 |
318 | #define TAINT_DIE 7 |
315 | #define TAINT_OVERRIDDEN_ACPI_TABLE 8 |
319 | #define TAINT_OVERRIDDEN_ACPI_TABLE 8 |
316 | #define TAINT_WARN 9 |
320 | #define TAINT_WARN 9 |
317 | #define TAINT_CRAP 10 |
321 | #define TAINT_CRAP 10 |
318 | #define TAINT_FIRMWARE_WORKAROUND 11 |
322 | #define TAINT_FIRMWARE_WORKAROUND 11 |
319 | #define TAINT_OOT_MODULE 12 |
323 | #define TAINT_OOT_MODULE 12 |
320 | #define TAINT_UNSIGNED_MODULE 13 |
324 | #define TAINT_UNSIGNED_MODULE 13 |
321 | #define TAINT_SOFTLOCKUP 14 |
325 | #define TAINT_SOFTLOCKUP 14 |
322 | #define TAINT_LIVEPATCH 15 |
326 | #define TAINT_LIVEPATCH 15 |
323 | 327 | ||
324 | extern const char hex_asc[]; |
328 | extern const char hex_asc[]; |
325 | #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] |
329 | #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] |
326 | #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] |
330 | #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] |
327 | 331 | ||
328 | static inline char *hex_byte_pack(char *buf, u8 byte) |
332 | static inline char *hex_byte_pack(char *buf, u8 byte) |
329 | { |
333 | { |
330 | *buf++ = hex_asc_hi(byte); |
334 | *buf++ = hex_asc_hi(byte); |
331 | *buf++ = hex_asc_lo(byte); |
335 | *buf++ = hex_asc_lo(byte); |
332 | return buf; |
336 | return buf; |
333 | } |
337 | } |
334 | 338 | ||
335 | extern const char hex_asc_upper[]; |
339 | extern const char hex_asc_upper[]; |
336 | #define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)] |
340 | #define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)] |
337 | #define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4] |
341 | #define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4] |
338 | 342 | ||
339 | static inline char *hex_byte_pack_upper(char *buf, u8 byte) |
343 | static inline char *hex_byte_pack_upper(char *buf, u8 byte) |
340 | { |
344 | { |
341 | *buf++ = hex_asc_upper_hi(byte); |
345 | *buf++ = hex_asc_upper_hi(byte); |
342 | *buf++ = hex_asc_upper_lo(byte); |
346 | *buf++ = hex_asc_upper_lo(byte); |
343 | return buf; |
347 | return buf; |
344 | } |
348 | } |
345 | 349 | ||
346 | extern int hex_to_bin(char ch); |
350 | extern int hex_to_bin(char ch); |
347 | extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); |
351 | extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); |
348 | extern char *bin2hex(char *dst, const void *src, size_t count); |
352 | extern char *bin2hex(char *dst, const void *src, size_t count); |
349 | 353 | ||
350 | bool mac_pton(const char *s, u8 *mac); |
354 | bool mac_pton(const char *s, u8 *mac); |
351 | 355 | ||
352 | /* |
356 | /* |
353 | * General tracing related utility functions - trace_printk(), |
357 | * General tracing related utility functions - trace_printk(), |
354 | * tracing_on/tracing_off and tracing_start()/tracing_stop |
358 | * tracing_on/tracing_off and tracing_start()/tracing_stop |
355 | * |
359 | * |
356 | * Use tracing_on/tracing_off when you want to quickly turn on or off |
360 | * Use tracing_on/tracing_off when you want to quickly turn on or off |
357 | * tracing. It simply enables or disables the recording of the trace events. |
361 | * tracing. It simply enables or disables the recording of the trace events. |
358 | * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on |
362 | * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on |
359 | * file, which gives a means for the kernel and userspace to interact. |
363 | * file, which gives a means for the kernel and userspace to interact. |
360 | * Place a tracing_off() in the kernel where you want tracing to end. |
364 | * Place a tracing_off() in the kernel where you want tracing to end. |
361 | * From user space, examine the trace, and then echo 1 > tracing_on |
365 | * From user space, examine the trace, and then echo 1 > tracing_on |
362 | * to continue tracing. |
366 | * to continue tracing. |
363 | * |
367 | * |
364 | * tracing_stop/tracing_start has slightly more overhead. It is used |
368 | * tracing_stop/tracing_start has slightly more overhead. It is used |
365 | * by things like suspend to ram where disabling the recording of the |
369 | * by things like suspend to ram where disabling the recording of the |
366 | * trace is not enough, but tracing must actually stop because things |
370 | * trace is not enough, but tracing must actually stop because things |
367 | * like calling smp_processor_id() may crash the system. |
371 | * like calling smp_processor_id() may crash the system. |
368 | * |
372 | * |
369 | * Most likely, you want to use tracing_on/tracing_off. |
373 | * Most likely, you want to use tracing_on/tracing_off. |
370 | */ |
374 | */ |
371 | 375 | ||
372 | enum ftrace_dump_mode { |
376 | enum ftrace_dump_mode { |
373 | DUMP_NONE, |
377 | DUMP_NONE, |
374 | DUMP_ALL, |
378 | DUMP_ALL, |
375 | DUMP_ORIG, |
379 | DUMP_ORIG, |
376 | }; |
380 | }; |
377 | 381 | ||
378 | #ifdef CONFIG_TRACING |
382 | #ifdef CONFIG_TRACING |
379 | void tracing_on(void); |
383 | void tracing_on(void); |
380 | void tracing_off(void); |
384 | void tracing_off(void); |
381 | int tracing_is_on(void); |
385 | int tracing_is_on(void); |
382 | void tracing_snapshot(void); |
386 | void tracing_snapshot(void); |
383 | void tracing_snapshot_alloc(void); |
387 | void tracing_snapshot_alloc(void); |
384 | 388 | ||
385 | extern void tracing_start(void); |
389 | extern void tracing_start(void); |
386 | extern void tracing_stop(void); |
390 | extern void tracing_stop(void); |
387 | 391 | ||
388 | static inline __printf(1, 2) |
392 | static inline __printf(1, 2) |
389 | void ____trace_printk_check_format(const char *fmt, ...) |
393 | void ____trace_printk_check_format(const char *fmt, ...) |
390 | { |
394 | { |
391 | } |
395 | } |
392 | #define __trace_printk_check_format(fmt, args...) \ |
396 | #define __trace_printk_check_format(fmt, args...) \ |
393 | do { \ |
397 | do { \ |
394 | if (0) \ |
398 | if (0) \ |
395 | ____trace_printk_check_format(fmt, ##args); \ |
399 | ____trace_printk_check_format(fmt, ##args); \ |
396 | } while (0) |
400 | } while (0) |
397 | 401 | ||
398 | /** |
402 | /** |
399 | * trace_printk - printf formatting in the ftrace buffer |
403 | * trace_printk - printf formatting in the ftrace buffer |
400 | * @fmt: the printf format for printing |
404 | * @fmt: the printf format for printing |
401 | * |
405 | * |
402 | * Note: __trace_printk is an internal function for trace_printk and |
406 | * Note: __trace_printk is an internal function for trace_printk and |
403 | * the @ip is passed in via the trace_printk macro. |
407 | * the @ip is passed in via the trace_printk macro. |
404 | * |
408 | * |
405 | * This function allows a kernel developer to debug fast path sections |
409 | * This function allows a kernel developer to debug fast path sections |
406 | * that printk is not appropriate for. By scattering in various |
410 | * that printk is not appropriate for. By scattering in various |
407 | * printk like tracing in the code, a developer can quickly see |
411 | * printk like tracing in the code, a developer can quickly see |
408 | * where problems are occurring. |
412 | * where problems are occurring. |
409 | * |
413 | * |
410 | * This is intended as a debugging tool for the developer only. |
414 | * This is intended as a debugging tool for the developer only. |
411 | * Please refrain from leaving trace_printks scattered around in |
415 | * Please refrain from leaving trace_printks scattered around in |
412 | * your code. (Extra memory is used for special buffers that are |
416 | * your code. (Extra memory is used for special buffers that are |
413 | * allocated when trace_printk() is used) |
417 | * allocated when trace_printk() is used) |
414 | * |
418 | * |
415 | * A little optization trick is done here. If there's only one |
419 | * A little optization trick is done here. If there's only one |
416 | * argument, there's no need to scan the string for printf formats. |
420 | * argument, there's no need to scan the string for printf formats. |
417 | * The trace_puts() will suffice. But how can we take advantage of |
421 | * The trace_puts() will suffice. But how can we take advantage of |
418 | * using trace_puts() when trace_printk() has only one argument? |
422 | * using trace_puts() when trace_printk() has only one argument? |
419 | * By stringifying the args and checking the size we can tell |
423 | * By stringifying the args and checking the size we can tell |
420 | * whether or not there are args. __stringify((__VA_ARGS__)) will |
424 | * whether or not there are args. __stringify((__VA_ARGS__)) will |
421 | * turn into "()\0" with a size of 3 when there are no args, anything |
425 | * turn into "()\0" with a size of 3 when there are no args, anything |
422 | * else will be bigger. All we need to do is define a string to this, |
426 | * else will be bigger. All we need to do is define a string to this, |
423 | * and then take its size and compare to 3. If it's bigger, use |
427 | * and then take its size and compare to 3. If it's bigger, use |
424 | * do_trace_printk() otherwise, optimize it to trace_puts(). Then just |
428 | * do_trace_printk() otherwise, optimize it to trace_puts(). Then just |
425 | * let gcc optimize the rest. |
429 | * let gcc optimize the rest. |
426 | */ |
430 | */ |
427 | 431 | ||
428 | #define trace_printk(fmt, ...) \ |
432 | #define trace_printk(fmt, ...) \ |
429 | do { \ |
433 | do { \ |
430 | char _______STR[] = __stringify((__VA_ARGS__)); \ |
434 | char _______STR[] = __stringify((__VA_ARGS__)); \ |
431 | if (sizeof(_______STR) > 3) \ |
435 | if (sizeof(_______STR) > 3) \ |
432 | do_trace_printk(fmt, ##__VA_ARGS__); \ |
436 | do_trace_printk(fmt, ##__VA_ARGS__); \ |
433 | else \ |
437 | else \ |
434 | trace_puts(fmt); \ |
438 | trace_puts(fmt); \ |
435 | } while (0) |
439 | } while (0) |
436 | 440 | ||
437 | #define do_trace_printk(fmt, args...) \ |
441 | #define do_trace_printk(fmt, args...) \ |
438 | do { \ |
442 | do { \ |
439 | static const char *trace_printk_fmt \ |
443 | static const char *trace_printk_fmt \ |
440 | __attribute__((section("__trace_printk_fmt"))) = \ |
444 | __attribute__((section("__trace_printk_fmt"))) = \ |
441 | __builtin_constant_p(fmt) ? fmt : NULL; \ |
445 | __builtin_constant_p(fmt) ? fmt : NULL; \ |
442 | \ |
446 | \ |
443 | __trace_printk_check_format(fmt, ##args); \ |
447 | __trace_printk_check_format(fmt, ##args); \ |
444 | \ |
448 | \ |
445 | if (__builtin_constant_p(fmt)) \ |
449 | if (__builtin_constant_p(fmt)) \ |
446 | __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ |
450 | __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ |
447 | else \ |
451 | else \ |
448 | __trace_printk(_THIS_IP_, fmt, ##args); \ |
452 | __trace_printk(_THIS_IP_, fmt, ##args); \ |
449 | } while (0) |
453 | } while (0) |
450 | 454 | ||
451 | extern __printf(2, 3) |
455 | extern __printf(2, 3) |
452 | int __trace_bprintk(unsigned long ip, const char *fmt, ...); |
456 | int __trace_bprintk(unsigned long ip, const char *fmt, ...); |
453 | 457 | ||
454 | extern __printf(2, 3) |
458 | extern __printf(2, 3) |
455 | int __trace_printk(unsigned long ip, const char *fmt, ...); |
459 | int __trace_printk(unsigned long ip, const char *fmt, ...); |
456 | 460 | ||
457 | /** |
461 | /** |
458 | * trace_puts - write a string into the ftrace buffer |
462 | * trace_puts - write a string into the ftrace buffer |
459 | * @str: the string to record |
463 | * @str: the string to record |
460 | * |
464 | * |
461 | * Note: __trace_bputs is an internal function for trace_puts and |
465 | * Note: __trace_bputs is an internal function for trace_puts and |
462 | * the @ip is passed in via the trace_puts macro. |
466 | * the @ip is passed in via the trace_puts macro. |
463 | * |
467 | * |
464 | * This is similar to trace_printk() but is made for those really fast |
468 | * This is similar to trace_printk() but is made for those really fast |
465 | * paths that a developer wants the least amount of "Heisenbug" affects, |
469 | * paths that a developer wants the least amount of "Heisenbug" affects, |
466 | * where the processing of the print format is still too much. |
470 | * where the processing of the print format is still too much. |
467 | * |
471 | * |
468 | * This function allows a kernel developer to debug fast path sections |
472 | * This function allows a kernel developer to debug fast path sections |
469 | * that printk is not appropriate for. By scattering in various |
473 | * that printk is not appropriate for. By scattering in various |
470 | * printk like tracing in the code, a developer can quickly see |
474 | * printk like tracing in the code, a developer can quickly see |
471 | * where problems are occurring. |
475 | * where problems are occurring. |
472 | * |
476 | * |
473 | * This is intended as a debugging tool for the developer only. |
477 | * This is intended as a debugging tool for the developer only. |
474 | * Please refrain from leaving trace_puts scattered around in |
478 | * Please refrain from leaving trace_puts scattered around in |
475 | * your code. (Extra memory is used for special buffers that are |
479 | * your code. (Extra memory is used for special buffers that are |
476 | * allocated when trace_puts() is used) |
480 | * allocated when trace_puts() is used) |
477 | * |
481 | * |
478 | * Returns: 0 if nothing was written, positive # if string was. |
482 | * Returns: 0 if nothing was written, positive # if string was. |
479 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) |
483 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) |
480 | */ |
484 | */ |
481 | 485 | ||
482 | #define trace_puts(str) ({ \ |
486 | #define trace_puts(str) ({ \ |
483 | static const char *trace_printk_fmt \ |
487 | static const char *trace_printk_fmt \ |
484 | __attribute__((section("__trace_printk_fmt"))) = \ |
488 | __attribute__((section("__trace_printk_fmt"))) = \ |
485 | __builtin_constant_p(str) ? str : NULL; \ |
489 | __builtin_constant_p(str) ? str : NULL; \ |
486 | \ |
490 | \ |
487 | if (__builtin_constant_p(str)) \ |
491 | if (__builtin_constant_p(str)) \ |
488 | __trace_bputs(_THIS_IP_, trace_printk_fmt); \ |
492 | __trace_bputs(_THIS_IP_, trace_printk_fmt); \ |
489 | else \ |
493 | else \ |
490 | __trace_puts(_THIS_IP_, str, strlen(str)); \ |
494 | __trace_puts(_THIS_IP_, str, strlen(str)); \ |
491 | }) |
495 | }) |
492 | extern int __trace_bputs(unsigned long ip, const char *str); |
496 | extern int __trace_bputs(unsigned long ip, const char *str); |
493 | extern int __trace_puts(unsigned long ip, const char *str, int size); |
497 | extern int __trace_puts(unsigned long ip, const char *str, int size); |
494 | 498 | ||
495 | extern void trace_dump_stack(int skip); |
499 | extern void trace_dump_stack(int skip); |
496 | 500 | ||
497 | /* |
501 | /* |
498 | * The double __builtin_constant_p is because gcc will give us an error |
502 | * The double __builtin_constant_p is because gcc will give us an error |
499 | * if we try to allocate the static variable to fmt if it is not a |
503 | * if we try to allocate the static variable to fmt if it is not a |
500 | * constant. Even with the outer if statement. |
504 | * constant. Even with the outer if statement. |
501 | */ |
505 | */ |
502 | #define ftrace_vprintk(fmt, vargs) \ |
506 | #define ftrace_vprintk(fmt, vargs) \ |
503 | do { \ |
507 | do { \ |
504 | if (__builtin_constant_p(fmt)) { \ |
508 | if (__builtin_constant_p(fmt)) { \ |
505 | static const char *trace_printk_fmt \ |
509 | static const char *trace_printk_fmt \ |
506 | __attribute__((section("__trace_printk_fmt"))) = \ |
510 | __attribute__((section("__trace_printk_fmt"))) = \ |
507 | __builtin_constant_p(fmt) ? fmt : NULL; \ |
511 | __builtin_constant_p(fmt) ? fmt : NULL; \ |
508 | \ |
512 | \ |
509 | __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ |
513 | __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ |
510 | } else \ |
514 | } else \ |
511 | __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ |
515 | __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ |
512 | } while (0) |
516 | } while (0) |
513 | 517 | ||
514 | extern __printf(2, 0) int |
518 | extern __printf(2, 0) int |
515 | __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); |
519 | __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); |
516 | 520 | ||
517 | extern __printf(2, 0) int |
521 | extern __printf(2, 0) int |
518 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); |
522 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); |
519 | 523 | ||
520 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); |
524 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); |
521 | #else |
525 | #else |
522 | static inline void tracing_start(void) { } |
526 | static inline void tracing_start(void) { } |
523 | static inline void tracing_stop(void) { } |
527 | static inline void tracing_stop(void) { } |
524 | static inline void trace_dump_stack(int skip) { } |
528 | static inline void trace_dump_stack(int skip) { } |
525 | 529 | ||
526 | static inline void tracing_on(void) { } |
530 | static inline void tracing_on(void) { } |
527 | static inline void tracing_off(void) { } |
531 | static inline void tracing_off(void) { } |
528 | static inline int tracing_is_on(void) { return 0; } |
532 | static inline int tracing_is_on(void) { return 0; } |
529 | static inline void tracing_snapshot(void) { } |
533 | static inline void tracing_snapshot(void) { } |
530 | static inline void tracing_snapshot_alloc(void) { } |
534 | static inline void tracing_snapshot_alloc(void) { } |
531 | 535 | ||
532 | static inline __printf(1, 2) |
536 | static inline __printf(1, 2) |
533 | int trace_printk(const char *fmt, ...) |
537 | int trace_printk(const char *fmt, ...) |
534 | { |
538 | { |
535 | return 0; |
539 | return 0; |
536 | } |
540 | } |
537 | static __printf(1, 0) inline int |
541 | static __printf(1, 0) inline int |
538 | ftrace_vprintk(const char *fmt, va_list ap) |
542 | ftrace_vprintk(const char *fmt, va_list ap) |
539 | { |
543 | { |
540 | return 0; |
544 | return 0; |
541 | } |
545 | } |
542 | static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } |
546 | static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } |
543 | #endif /* CONFIG_TRACING */ |
547 | #endif /* CONFIG_TRACING */ |
544 | 548 | ||
545 | /* |
549 | /* |
546 | * min()/max()/clamp() macros that also do |
550 | * min()/max()/clamp() macros that also do |
547 | * strict type-checking.. See the |
551 | * strict type-checking.. See the |
548 | * "unnecessary" pointer comparison. |
552 | * "unnecessary" pointer comparison. |
549 | */ |
553 | */ |
550 | #define min(x, y) ({ \ |
554 | #define min(x, y) ({ \ |
551 | typeof(x) _min1 = (x); \ |
555 | typeof(x) _min1 = (x); \ |
552 | typeof(y) _min2 = (y); \ |
556 | typeof(y) _min2 = (y); \ |
553 | (void) (&_min1 == &_min2); \ |
557 | (void) (&_min1 == &_min2); \ |
554 | _min1 < _min2 ? _min1 : _min2; }) |
558 | _min1 < _min2 ? _min1 : _min2; }) |
555 | 559 | ||
556 | #define max(x, y) ({ \ |
560 | #define max(x, y) ({ \ |
557 | typeof(x) _max1 = (x); \ |
561 | typeof(x) _max1 = (x); \ |
558 | typeof(y) _max2 = (y); \ |
562 | typeof(y) _max2 = (y); \ |
559 | (void) (&_max1 == &_max2); \ |
563 | (void) (&_max1 == &_max2); \ |
560 | _max1 > _max2 ? _max1 : _max2; }) |
564 | _max1 > _max2 ? _max1 : _max2; }) |
561 | 565 | ||
562 | #define min3(x, y, z) min((typeof(x))min(x, y), z) |
566 | #define min3(x, y, z) min((typeof(x))min(x, y), z) |
563 | #define max3(x, y, z) max((typeof(x))max(x, y), z) |
567 | #define max3(x, y, z) max((typeof(x))max(x, y), z) |
564 | 568 | ||
565 | /** |
569 | /** |
566 | * min_not_zero - return the minimum that is _not_ zero, unless both are zero |
570 | * min_not_zero - return the minimum that is _not_ zero, unless both are zero |
567 | * @x: value1 |
571 | * @x: value1 |
568 | * @y: value2 |
572 | * @y: value2 |
569 | */ |
573 | */ |
570 | #define min_not_zero(x, y) ({ \ |
574 | #define min_not_zero(x, y) ({ \ |
571 | typeof(x) __x = (x); \ |
575 | typeof(x) __x = (x); \ |
572 | typeof(y) __y = (y); \ |
576 | typeof(y) __y = (y); \ |
573 | __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) |
577 | __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) |
574 | 578 | ||
575 | /** |
579 | /** |
576 | * clamp - return a value clamped to a given range with strict typechecking |
580 | * clamp - return a value clamped to a given range with strict typechecking |
577 | * @val: current value |
581 | * @val: current value |
578 | * @lo: lowest allowable value |
582 | * @lo: lowest allowable value |
579 | * @hi: highest allowable value |
583 | * @hi: highest allowable value |
580 | * |
584 | * |
581 | * This macro does strict typechecking of lo/hi to make sure they are of the |
585 | * This macro does strict typechecking of lo/hi to make sure they are of the |
582 | * same type as val. See the unnecessary pointer comparisons. |
586 | * same type as val. See the unnecessary pointer comparisons. |
583 | */ |
587 | */ |
584 | #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) |
588 | #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) |
585 | 589 | ||
586 | /* |
590 | /* |
587 | * ..and if you can't take the strict |
591 | * ..and if you can't take the strict |
588 | * types, you can specify one yourself. |
592 | * types, you can specify one yourself. |
589 | * |
593 | * |
590 | * Or not use min/max/clamp at all, of course. |
594 | * Or not use min/max/clamp at all, of course. |
591 | */ |
595 | */ |
592 | #define min_t(type, x, y) ({ \ |
596 | #define min_t(type, x, y) ({ \ |
593 | type __min1 = (x); \ |
597 | type __min1 = (x); \ |
594 | type __min2 = (y); \ |
598 | type __min2 = (y); \ |
595 | __min1 < __min2 ? __min1: __min2; }) |
599 | __min1 < __min2 ? __min1: __min2; }) |
596 | 600 | ||
597 | #define max_t(type, x, y) ({ \ |
601 | #define max_t(type, x, y) ({ \ |
598 | type __max1 = (x); \ |
602 | type __max1 = (x); \ |
599 | type __max2 = (y); \ |
603 | type __max2 = (y); \ |
600 | __max1 > __max2 ? __max1: __max2; }) |
604 | __max1 > __max2 ? __max1: __max2; }) |
601 | 605 | ||
602 | /** |
606 | /** |
603 | * clamp_t - return a value clamped to a given range using a given type |
607 | * clamp_t - return a value clamped to a given range using a given type |
604 | * @type: the type of variable to use |
608 | * @type: the type of variable to use |
605 | * @val: current value |
609 | * @val: current value |
606 | * @lo: minimum allowable value |
610 | * @lo: minimum allowable value |
607 | * @hi: maximum allowable value |
611 | * @hi: maximum allowable value |
608 | * |
612 | * |
609 | * This macro does no typechecking and uses temporary variables of type |
613 | * This macro does no typechecking and uses temporary variables of type |
610 | * 'type' to make all the comparisons. |
614 | * 'type' to make all the comparisons. |
611 | */ |
615 | */ |
612 | #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) |
616 | #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) |
613 | 617 | ||
614 | /** |
618 | /** |
615 | * clamp_val - return a value clamped to a given range using val's type |
619 | * clamp_val - return a value clamped to a given range using val's type |
616 | * @val: current value |
620 | * @val: current value |
617 | * @lo: minimum allowable value |
621 | * @lo: minimum allowable value |
618 | * @hi: maximum allowable value |
622 | * @hi: maximum allowable value |
619 | * |
623 | * |
620 | * This macro does no typechecking and uses temporary variables of whatever |
624 | * This macro does no typechecking and uses temporary variables of whatever |
621 | * type the input argument 'val' is. This is useful when val is an unsigned |
625 | * type the input argument 'val' is. This is useful when val is an unsigned |
622 | * type and min and max are literals that will otherwise be assigned a signed |
626 | * type and min and max are literals that will otherwise be assigned a signed |
623 | * integer type. |
627 | * integer type. |
624 | */ |
628 | */ |
625 | #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) |
629 | #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) |
626 | 630 | ||
627 | 631 | ||
628 | /* |
632 | /* |
629 | * swap - swap value of @a and @b |
633 | * swap - swap value of @a and @b |
630 | */ |
634 | */ |
631 | #define swap(a, b) \ |
635 | #define swap(a, b) \ |
632 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
636 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
633 | 637 | ||
634 | /** |
638 | /** |
635 | * container_of - cast a member of a structure out to the containing structure |
639 | * container_of - cast a member of a structure out to the containing structure |
636 | * @ptr: the pointer to the member. |
640 | * @ptr: the pointer to the member. |
637 | * @type: the type of the container struct this is embedded in. |
641 | * @type: the type of the container struct this is embedded in. |
638 | * @member: the name of the member within the struct. |
642 | * @member: the name of the member within the struct. |
639 | * |
643 | * |
640 | */ |
644 | */ |
641 | #define container_of(ptr, type, member) ({ \ |
645 | #define container_of(ptr, type, member) ({ \ |
642 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \ |
646 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \ |
643 | (type *)( (char *)__mptr - offsetof(type,member) );}) |
647 | (type *)( (char *)__mptr - offsetof(type,member) );}) |
644 | 648 | ||
645 | /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ |
649 | /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ |
646 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
650 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
647 | # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD |
651 | # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD |
648 | #endif |
652 | #endif |
649 | 653 | ||
650 | /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ |
654 | /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ |
651 | #define VERIFY_OCTAL_PERMISSIONS(perms) \ |
655 | #define VERIFY_OCTAL_PERMISSIONS(perms) \ |
652 | (BUILD_BUG_ON_ZERO((perms) < 0) + \ |
656 | (BUILD_BUG_ON_ZERO((perms) < 0) + \ |
653 | BUILD_BUG_ON_ZERO((perms) > 0777) + \ |
657 | BUILD_BUG_ON_ZERO((perms) > 0777) + \ |
654 | /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \ |
658 | /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \ |
655 | BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \ |
659 | BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \ |
656 | BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \ |
660 | BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \ |
657 | /* USER_WRITABLE >= GROUP_WRITABLE */ \ |
661 | /* USER_WRITABLE >= GROUP_WRITABLE */ \ |
658 | BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \ |
662 | BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \ |
659 | /* OTHER_WRITABLE? Generally considered a bad idea. */ \ |
663 | /* OTHER_WRITABLE? Generally considered a bad idea. */ \ |
660 | BUILD_BUG_ON_ZERO((perms) & 2) + \ |
664 | BUILD_BUG_ON_ZERO((perms) & 2) + \ |
661 | (perms)) |
665 | (perms)) |
662 | - | ||
663 | 666 | ||
664 | void free (void *ptr); |
- | |
665 | 667 | void free (void *ptr); |
|
- | 668 | ||
- | 669 | typedef unsigned long pgprotval_t; |
|
- | 670 | ||
- | 671 | typedef struct |
|
- | 672 | { |
|
- | 673 | u32 code; |
|
- | 674 | u32 data[5]; |
|
- | 675 | }kevent_t; |
|
- | 676 | ||
- | 677 | typedef union |
|
- | 678 | { |
|
- | 679 | struct |
|
- | 680 | { |
|
- | 681 | u32 handle; |
|
- | 682 | u32 euid; |
|
666 | 683 | }; |
|
667 | typedef unsigned long pgprotval_t; |
684 | u64 raw; |
668 | 685 | }evhandle_t; |
|
669 | 686 | ||
670 | struct file |
687 | struct file |
671 | { |
688 | { |
672 | struct page **pages; /* physical memory backend */ |
689 | struct page **pages; /* physical memory backend */ |
673 | unsigned int count; |
690 | unsigned int count; |
674 | unsigned int allocated; |
691 | unsigned int allocated; |
675 | void *vma; |
692 | void *vma; |
676 | }; |
693 | }; |
677 | 694 | ||
678 | struct vm_area_struct {}; |
695 | struct vm_area_struct {}; |
679 | struct address_space {}; |
696 | struct address_space {}; |
680 | - | ||
681 | 697 | ||
682 | #define in_dbg_master() (0) |
698 | #define in_dbg_master() (0) |
683 | 699 | ||
684 | #define HZ 100 |
700 | #define HZ 100 |
685 | 701 | ||
686 | struct tvec_base; |
702 | struct tvec_base; |
687 | 703 | ||
688 | struct timer_list { |
704 | struct timer_list { |
689 | struct list_head entry; |
705 | struct list_head entry; |
690 | unsigned long expires; |
706 | unsigned long expires; |
691 | 707 | ||
692 | void (*function)(unsigned long); |
708 | void (*function)(unsigned long); |
693 | unsigned long data; |
709 | unsigned long data; |
694 | u32 handle; |
710 | u32 handle; |
695 | }; |
711 | }; |
696 | 712 | ||
697 | #define setup_timer(_timer, _fn, _data) \ |
713 | #define setup_timer(_timer, _fn, _data) \ |
698 | do { \ |
714 | do { \ |
699 | (_timer)->function = (_fn); \ |
715 | (_timer)->function = (_fn); \ |
700 | (_timer)->data = (_data); \ |
716 | (_timer)->data = (_data); \ |
701 | (_timer)->handle = 0; \ |
717 | (_timer)->handle = 0; \ |
702 | } while (0) |
718 | } while (0) |
703 | 719 | ||
704 | int del_timer(struct timer_list *timer); |
720 | int del_timer(struct timer_list *timer); |
705 | 721 | ||
706 | # define del_timer_sync(t) del_timer(t) |
722 | # define del_timer_sync(t) del_timer(t) |
707 | 723 | ||
708 | 724 | ||
709 | #define build_mmio_read(name, size, type, reg, barrier) \ |
725 | #define build_mmio_read(name, size, type, reg, barrier) \ |
710 | static inline type name(const volatile void __iomem *addr) \ |
726 | static inline type name(const volatile void __iomem *addr) \ |
711 | { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ |
727 | { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ |
712 | :"m" (*(volatile type __force *)addr) barrier); return ret; } |
728 | :"m" (*(volatile type __force *)addr) barrier); return ret; } |
713 | 729 | ||
714 | #define build_mmio_write(name, size, type, reg, barrier) \ |
730 | #define build_mmio_write(name, size, type, reg, barrier) \ |
715 | static inline void name(type val, volatile void __iomem *addr) \ |
731 | static inline void name(type val, volatile void __iomem *addr) \ |
716 | { asm volatile("mov" size " %0,%1": :reg (val), \ |
732 | { asm volatile("mov" size " %0,%1": :reg (val), \ |
717 | "m" (*(volatile type __force *)addr) barrier); } |
733 | "m" (*(volatile type __force *)addr) barrier); } |
718 | 734 | ||
719 | build_mmio_read(readb, "b", unsigned char, "=q", :"memory") |
735 | build_mmio_read(readb, "b", unsigned char, "=q", :"memory") |
720 | build_mmio_read(readw, "w", unsigned short, "=r", :"memory") |
736 | build_mmio_read(readw, "w", unsigned short, "=r", :"memory") |
721 | build_mmio_read(readl, "l", unsigned int, "=r", :"memory") |
737 | build_mmio_read(readl, "l", unsigned int, "=r", :"memory") |
722 | 738 | ||
723 | build_mmio_read(__readb, "b", unsigned char, "=q", ) |
739 | build_mmio_read(__readb, "b", unsigned char, "=q", ) |
724 | build_mmio_read(__readw, "w", unsigned short, "=r", ) |
740 | build_mmio_read(__readw, "w", unsigned short, "=r", ) |
725 | build_mmio_read(__readl, "l", unsigned int, "=r", ) |
741 | build_mmio_read(__readl, "l", unsigned int, "=r", ) |
726 | 742 | ||
727 | build_mmio_write(writeb, "b", unsigned char, "q", :"memory") |
743 | build_mmio_write(writeb, "b", unsigned char, "q", :"memory") |
728 | build_mmio_write(writew, "w", unsigned short, "r", :"memory") |
744 | build_mmio_write(writew, "w", unsigned short, "r", :"memory") |
729 | build_mmio_write(writel, "l", unsigned int, "r", :"memory") |
745 | build_mmio_write(writel, "l", unsigned int, "r", :"memory") |
730 | 746 | ||
731 | build_mmio_write(__writeb, "b", unsigned char, "q", ) |
747 | build_mmio_write(__writeb, "b", unsigned char, "q", ) |
732 | build_mmio_write(__writew, "w", unsigned short, "r", ) |
748 | build_mmio_write(__writew, "w", unsigned short, "r", ) |
733 | build_mmio_write(__writel, "l", unsigned int, "r", ) |
749 | build_mmio_write(__writel, "l", unsigned int, "r", ) |
734 | 750 | ||
735 | #define readb_relaxed(a) __readb(a) |
751 | #define readb_relaxed(a) __readb(a) |
736 | #define readw_relaxed(a) __readw(a) |
752 | #define readw_relaxed(a) __readw(a) |
737 | #define readl_relaxed(a) __readl(a) |
753 | #define readl_relaxed(a) __readl(a) |
738 | #define __raw_readb __readb |
754 | #define __raw_readb __readb |
739 | #define __raw_readw __readw |
755 | #define __raw_readw __readw |
740 | #define __raw_readl __readl |
756 | #define __raw_readl __readl |
741 | 757 | ||
742 | #define __raw_writeb __writeb |
758 | #define __raw_writeb __writeb |
743 | #define __raw_writew __writew |
759 | #define __raw_writew __writew |
744 | #define __raw_writel __writel |
760 | #define __raw_writel __writel |
745 | 761 | ||
746 | #define swap(a, b) \ |
762 | #define swap(a, b) \ |
747 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
763 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
748 | 764 | ||
749 | 765 | ||
750 | #define mmiowb() barrier() |
766 | #define mmiowb() barrier() |
751 | 767 | ||
752 | #define dev_err(dev, format, arg...) \ |
768 | #define dev_err(dev, format, arg...) \ |
753 | printk("Error %s " format, __func__ , ## arg) |
769 | printk("Error %s " format, __func__ , ## arg) |
754 | 770 | ||
755 | #define dev_warn(dev, format, arg...) \ |
771 | #define dev_warn(dev, format, arg...) \ |
756 | printk("Warning %s " format, __func__ , ## arg) |
772 | printk("Warning %s " format, __func__ , ## arg) |
757 | 773 | ||
758 | #define dev_info(dev, format, arg...) \ |
774 | #define dev_info(dev, format, arg...) \ |
759 | printk("Info %s " format , __func__, ## arg) |
775 | printk("Info %s " format , __func__, ## arg) |
760 | 776 | ||
761 | struct page |
777 | struct page |
762 | { |
778 | { |
763 | unsigned int addr; |
779 | unsigned int addr; |
764 | }; |
780 | }; |
765 | 781 | ||
766 | #define page_to_phys(page) ((dma_addr_t)(page)) |
782 | #define page_to_phys(page) ((dma_addr_t)(page)) |
767 | 783 | ||
768 | struct vm_fault { |
784 | struct vm_fault { |
769 | unsigned int flags; /* FAULT_FLAG_xxx flags */ |
785 | unsigned int flags; /* FAULT_FLAG_xxx flags */ |
770 | pgoff_t pgoff; /* Logical page offset based on vma */ |
786 | pgoff_t pgoff; /* Logical page offset based on vma */ |
771 | void __user *virtual_address; /* Faulting virtual address */ |
787 | void __user *virtual_address; /* Faulting virtual address */ |
772 | 788 | ||
773 | struct page *page; /* ->fault handlers should return a |
789 | struct page *page; /* ->fault handlers should return a |
774 | * page here, unless VM_FAULT_NOPAGE |
790 | * page here, unless VM_FAULT_NOPAGE |
775 | * is set (which is also implied by |
791 | * is set (which is also implied by |
776 | * VM_FAULT_ERROR). |
792 | * VM_FAULT_ERROR). |
777 | */ |
793 | */ |
778 | }; |
794 | }; |
779 | 795 | ||
780 | struct pagelist { |
796 | struct pagelist { |
781 | dma_addr_t *page; |
797 | dma_addr_t *page; |
782 | unsigned int nents; |
798 | unsigned int nents; |
783 | }; |
799 | }; |
784 | 800 | ||
785 | #define page_cache_release(page) FreePage(page_to_phys(page)) |
801 | #define page_cache_release(page) FreePage(page_to_phys(page)) |
786 | 802 | ||
787 | #define alloc_page(gfp_mask) (struct page*)AllocPage() |
803 | #define alloc_page(gfp_mask) (struct page*)AllocPage() |
788 | 804 | ||
789 | #define __free_page(page) FreePage(page_to_phys(page)) |
805 | #define __free_page(page) FreePage(page_to_phys(page)) |
790 | 806 | ||
791 | #define get_page(a) |
807 | #define get_page(a) |
792 | #define put_page(a) |
808 | #define put_page(a) |
793 | - | ||
794 | #define pci_map_page(dev, page, offset, size, direction) \ |
- | |
795 | (dma_addr_t)( (offset)+page_to_phys(page)) |
- | |
796 | - | ||
797 | #define pci_unmap_page(dev, dma_address, size, direction) |
- | |
798 | 809 | ||
799 | #define IS_ENABLED(a) 0 |
810 | #define IS_ENABLED(a) 0 |
800 | 811 | ||
801 | 812 | ||
802 | 813 | ||
803 | #define cpufreq_quick_get_max(x) GetCpuFreq() |
814 | #define cpufreq_quick_get_max(x) GetCpuFreq() |
804 | 815 | ||
805 | extern unsigned int tsc_khz; |
816 | extern unsigned int tsc_khz; |
806 | 817 | ||
807 | #define on_each_cpu(func,info,wait) \ |
818 | #define on_each_cpu(func,info,wait) \ |
808 | ({ \ |
819 | ({ \ |
809 | func(info); \ |
820 | func(info); \ |
810 | 0; \ |
821 | 0; \ |
811 | }) |
822 | }) |
812 | 823 | ||
813 | 824 | ||
814 | static inline __must_check long __copy_to_user(void __user *to, |
825 | static inline __must_check long __copy_to_user(void __user *to, |
815 | const void *from, unsigned long n) |
826 | const void *from, unsigned long n) |
816 | { |
827 | { |
817 | if (__builtin_constant_p(n)) { |
828 | if (__builtin_constant_p(n)) { |
818 | switch(n) { |
829 | switch(n) { |
819 | case 1: |
830 | case 1: |
820 | *(u8 __force *)to = *(u8 *)from; |
831 | *(u8 __force *)to = *(u8 *)from; |
821 | return 0; |
832 | return 0; |
822 | case 2: |
833 | case 2: |
823 | *(u16 __force *)to = *(u16 *)from; |
834 | *(u16 __force *)to = *(u16 *)from; |
824 | return 0; |
835 | return 0; |
825 | case 4: |
836 | case 4: |
826 | *(u32 __force *)to = *(u32 *)from; |
837 | *(u32 __force *)to = *(u32 *)from; |
827 | return 0; |
838 | return 0; |
828 | #ifdef CONFIG_64BIT |
839 | #ifdef CONFIG_64BIT |
829 | case 8: |
840 | case 8: |
830 | *(u64 __force *)to = *(u64 *)from; |
841 | *(u64 __force *)to = *(u64 *)from; |
831 | return 0; |
842 | return 0; |
832 | #endif |
843 | #endif |
833 | default: |
844 | default: |
834 | break; |
845 | break; |
835 | } |
846 | } |
836 | } |
847 | } |
837 | 848 | ||
838 | __builtin_memcpy((void __force *)to, from, n); |
849 | __builtin_memcpy((void __force *)to, from, n); |
839 | return 0; |
850 | return 0; |
840 | } |
851 | } |
841 | 852 | ||
842 | void *kmap(struct page *page); |
853 | void *kmap(struct page *page); |
843 | void *kmap_atomic(struct page *page); |
854 | void *kmap_atomic(struct page *page); |
844 | void kunmap(struct page *page); |
855 | void kunmap(struct page *page); |
845 | void kunmap_atomic(void *vaddr); |
856 | void kunmap_atomic(void *vaddr); |
846 | 857 | ||
847 | typedef u64 async_cookie_t; |
858 | typedef u64 async_cookie_t; |
848 | 859 | ||
849 | #define iowrite32(v, addr) writel((v), (addr)) |
860 | #define iowrite32(v, addr) writel((v), (addr)) |
850 | - | ||
851 | 861 | ||
852 | #define __init |
862 | #define __init |
853 | 863 | ||
854 | #define CONFIG_PAGE_OFFSET 0 |
864 | #define CONFIG_PAGE_OFFSET 0 |
855 | 865 | ||
856 | #endif>>>>>>7>6>5>4>3>2>1>0>>>> |
866 | #endif>>>>>>7>6>5>4>3>2>1>0>>>> |