Subversion Repositories Kolibri OS

Rev

Rev 5272 | Rev 6102 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef _LINUX_KERNEL_H
  2. #define _LINUX_KERNEL_H
  3.  
  4.  
  5. #include <stdarg.h>
  6. #include <linux/linkage.h>
  7. #include <linux/stddef.h>
  8. #include <linux/types.h>
  9. #include <linux/compiler.h>
  10. #include <linux/bitops.h>
  11. #include <linux/log2.h>
  12. #include <linux/typecheck.h>
  13. #include <linux/printk.h>
  14. #include <asm/byteorder.h>
  15. #include <uapi/linux/kernel.h>
  16.  
  17. #define USHRT_MAX       ((u16)(~0U))
  18. #define SHRT_MAX        ((s16)(USHRT_MAX>>1))
  19. #define SHRT_MIN        ((s16)(-SHRT_MAX - 1))
  20. #define INT_MAX         ((int)(~0U>>1))
  21. #define INT_MIN         (-INT_MAX - 1)
  22. #define UINT_MAX        (~0U)
  23. #define LONG_MAX        ((long)(~0UL>>1))
  24. #define LONG_MIN        (-LONG_MAX - 1)
  25. #define ULONG_MAX       (~0UL)
  26. #define LLONG_MAX       ((long long)(~0ULL>>1))
  27. #define LLONG_MIN       (-LLONG_MAX - 1)
  28. #define ULLONG_MAX      (~0ULL)
  29. #define SIZE_MAX        (~(size_t)0)
  30.  
  31. #define U8_MAX          ((u8)~0U)
  32. #define S8_MAX          ((s8)(U8_MAX>>1))
  33. #define S8_MIN          ((s8)(-S8_MAX - 1))
  34. #define U16_MAX         ((u16)~0U)
  35. #define S16_MAX         ((s16)(U16_MAX>>1))
  36. #define S16_MIN         ((s16)(-S16_MAX - 1))
  37. #define U32_MAX         ((u32)~0U)
  38. #define S32_MAX         ((s32)(U32_MAX>>1))
  39. #define S32_MIN         ((s32)(-S32_MAX - 1))
  40. #define U64_MAX         ((u64)~0ULL)
  41. #define S64_MAX         ((s64)(U64_MAX>>1))
  42. #define S64_MIN         ((s64)(-S64_MAX - 1))
  43.  
  44. #define STACK_MAGIC     0xdeadbeef
  45.  
  46. #define REPEAT_BYTE(x)  ((~0ul / 0xff) * (x))
  47.  
  48. #define ALIGN(x, a)             __ALIGN_KERNEL((x), (a))
  49. #define __ALIGN_MASK(x, mask)   __ALIGN_KERNEL_MASK((x), (mask))
  50. #define PTR_ALIGN(p, a)         ((typeof(p))ALIGN((unsigned long)(p), (a)))
  51. #define IS_ALIGNED(x, a)                (((x) & ((typeof(x))(a) - 1)) == 0)
  52.  
  53. #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
  54.  
  55. /*
  56.  * This looks more complex than it should be. But we need to
  57.  * get the type for the ~ right in round_down (it needs to be
  58.  * as wide as the result!), and we want to evaluate the macro
  59.  * arguments just once each.
  60.  */
  61. #define __round_mask(x, y) ((__typeof__(x))((y)-1))
  62. #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
  63. #define round_down(x, y) ((x) & ~__round_mask(x, y))
  64.  
  65. #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
  66. #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
  67. #define DIV_ROUND_UP_ULL(ll,d) \
  68.         ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
  69.  
  70. #if BITS_PER_LONG == 32
  71. # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
  72. #else
  73. # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
  74. #endif
  75.  
  76. /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
  77. #define roundup(x, y) (                                 \
  78. {                                                       \
  79.         const typeof(y) __y = y;                        \
  80.         (((x) + (__y - 1)) / __y) * __y;                \
  81. }                                                       \
  82. )
  83. #define rounddown(x, y) (                               \
  84. {                                                       \
  85.         typeof(x) __x = (x);                            \
  86.         __x - (__x % (y));                              \
  87. }                                                       \
  88. )
  89.  
  90. /*
  91.  * Divide positive or negative dividend by positive divisor and round
  92.  * to closest integer. Result is undefined for negative divisors and
  93.  * for negative dividends if the divisor variable type is unsigned.
  94.  */
  95. #define DIV_ROUND_CLOSEST(x, divisor)(                  \
  96. {                                                       \
  97.         typeof(x) __x = x;                              \
  98.         typeof(divisor) __d = divisor;                  \
  99.         (((typeof(x))-1) > 0 ||                         \
  100.          ((typeof(divisor))-1) > 0 || (__x) > 0) ?      \
  101.                 (((__x) + ((__d) / 2)) / (__d)) :       \
  102.                 (((__x) - ((__d) / 2)) / (__d));        \
  103. }                                                       \
  104. )
  105. /*
  106.  * Same as above but for u64 dividends. divisor must be a 32-bit
  107.  * number.
  108.  */
  109. #define DIV_ROUND_CLOSEST_ULL(x, divisor)(              \
  110. {                                                       \
  111.         typeof(divisor) __d = divisor;                  \
  112.         unsigned long long _tmp = (x) + (__d) / 2;      \
  113.         do_div(_tmp, __d);                              \
  114.         _tmp;                                           \
  115. }                                                       \
  116. )
  117.  
  118. /*
  119.  * Multiplies an integer by a fraction, while avoiding unnecessary
  120.  * overflow or loss of precision.
  121.  */
  122. #define mult_frac(x, numer, denom)(                     \
  123. {                                                       \
  124.         typeof(x) quot = (x) / (denom);                 \
  125.         typeof(x) rem  = (x) % (denom);                 \
  126.         (quot * (numer)) + ((rem * (numer)) / (denom)); \
  127. }                                                       \
  128. )
  129.  
  130.  
  131. #define _RET_IP_                (unsigned long)__builtin_return_address(0)
  132. #define _THIS_IP_  ({ __label__ __here; __here: (unsigned long)&&__here; })
  133.  
  134. #ifdef CONFIG_LBDAF
  135. # include <asm/div64.h>
  136. # define sector_div(a, b) do_div(a, b)
  137. #else
  138. # define sector_div(n, b)( \
  139. { \
  140.         int _res; \
  141.         _res = (n) % (b); \
  142.         (n) /= (b); \
  143.         _res; \
  144. } \
  145. )
  146. #endif
  147.  
  148. /**
  149.  * upper_32_bits - return bits 32-63 of a number
  150.  * @n: the number we're accessing
  151.  *
  152.  * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
  153.  * the "right shift count >= width of type" warning when that quantity is
  154.  * 32-bits.
  155.  */
  156. #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
  157.  
  158. /**
  159.  * lower_32_bits - return bits 0-31 of a number
  160.  * @n: the number we're accessing
  161.  */
  162. #define lower_32_bits(n) ((u32)(n))
  163.  
  164. #ifdef CONFIG_PREEMPT_VOLUNTARY
  165. extern int _cond_resched(void);
  166. # define might_resched() _cond_resched()
  167. #else
  168. # define might_resched() do { } while (0)
  169. #endif
  170.  
  171. #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  172.   void ___might_sleep(const char *file, int line, int preempt_offset);
  173.   void __might_sleep(const char *file, int line, int preempt_offset);
  174. /**
  175.  * might_sleep - annotation for functions that can sleep
  176.  *
  177.  * this macro will print a stack trace if it is executed in an atomic
  178.  * context (spinlock, irq-handler, ...).
  179.  *
  180.  * This is a useful debugging help to be able to catch problems early and not
  181.  * be bitten later when the calling function happens to sleep when it is not
  182.  * supposed to.
  183.  */
  184. # define might_sleep() \
  185.         do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
  186. # define sched_annotate_sleep() (current->task_state_change = 0)
  187. #else
  188.   static inline void ___might_sleep(const char *file, int line,
  189.                                    int preempt_offset) { }
  190.   static inline void __might_sleep(const char *file, int line,
  191.                                    int preempt_offset) { }
  192. # define might_sleep() do { might_resched(); } while (0)
  193. # define sched_annotate_sleep() do { } while (0)
  194. #endif
  195.  
  196. #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
  197.  
  198. /**
  199.  * abs - return absolute value of an argument
  200.  * @x: the value.  If it is unsigned type, it is converted to signed type first
  201.  *   (s64, long or int depending on its size).
  202.  *
  203.  * Return: an absolute value of x.  If x is 64-bit, macro's return type is s64,
  204.  *   otherwise it is signed long.
  205.  */
  206. #define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({       \
  207.                 s64 __x = (x);                                          \
  208.                 (__x < 0) ? -__x : __x;                                 \
  209.         }), ({                                                          \
  210.                 long ret;                                               \
  211.                 if (sizeof(x) == sizeof(long)) {                        \
  212.                         long __x = (x);                                 \
  213.                         ret = (__x < 0) ? -__x : __x;                   \
  214.                 } else {                                                \
  215.                         int __x = (x);                                  \
  216.                         ret = (__x < 0) ? -__x : __x;                   \
  217.                 }                                                       \
  218.                 ret;                                                    \
  219.         }))
  220.  
  221. /**
  222.  * reciprocal_scale - "scale" a value into range [0, ep_ro)
  223.  * @val: value
  224.  * @ep_ro: right open interval endpoint
  225.  *
  226.  * Perform a "reciprocal multiplication" in order to "scale" a value into
  227.  * range [0, ep_ro), where the upper interval endpoint is right-open.
  228.  * This is useful, e.g. for accessing a index of an array containing
  229.  * ep_ro elements, for example. Think of it as sort of modulus, only that
  230.  * the result isn't that of modulo. ;) Note that if initial input is a
  231.  * small value, then result will return 0.
  232.  *
  233.  * Return: a result based on val in interval [0, ep_ro).
  234.  */
  235. static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
  236. {
  237.         return (u32)(((u64) val * ep_ro) >> 32);
  238. }
  239.  
  240. #if defined(CONFIG_MMU) && \
  241.         (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
  242. #define might_fault() __might_fault(__FILE__, __LINE__)
  243. void __might_fault(const char *file, int line);
  244. #else
  245. static inline void might_fault(void) { }
  246. #endif
  247.  
  248. #define KERN_EMERG      "<0>"   /* system is unusable                   */
  249. #define KERN_ALERT      "<1>"   /* action must be taken immediately     */
  250. #define KERN_CRIT       "<2>"   /* critical conditions                  */
  251. #define KERN_ERR        "<3>"   /* error conditions                     */
  252. #define KERN_WARNING    "<4>"   /* warning conditions                   */
  253. #define KERN_NOTICE     "<5>"   /* normal but significant condition     */
  254. #define KERN_INFO       "<6>"   /* informational                        */
  255. #define KERN_DEBUG      "<7>"   /* debug-level messages                 */
  256. extern unsigned long simple_strtoul(const char *,char **,unsigned int);
  257. extern long simple_strtol(const char *,char **,unsigned int);
  258. extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
  259. extern long long simple_strtoll(const char *,char **,unsigned int);
  260.  
  261. extern int num_to_str(char *buf, int size, unsigned long long num);
  262.  
  263. /* lib/printf utilities */
  264.  
  265. extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
  266. extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
  267. extern __printf(3, 4)
  268. int snprintf(char *buf, size_t size, const char *fmt, ...);
  269. extern __printf(3, 0)
  270. int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
  271. extern __printf(3, 4)
  272. int scnprintf(char *buf, size_t size, const char *fmt, ...);
  273. extern __printf(3, 0)
  274. int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
  275. extern __printf(2, 3)
  276. char *kasprintf(gfp_t gfp, const char *fmt, ...);
  277. extern __printf(2, 0)
  278. char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
  279. extern __printf(2, 0)
  280. const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
  281.  
  282. extern __scanf(2, 3)
  283. int sscanf(const char *, const char *, ...);
  284. extern __scanf(2, 0)
  285. int vsscanf(const char *, const char *, va_list);
  286. extern int oops_in_progress;            /* If set, an oops, panic(), BUG() or die() is in progress */
  287. enum lockdep_ok {
  288.         LOCKDEP_STILL_OK,
  289.         LOCKDEP_NOW_UNRELIABLE
  290. };
  291. extern void add_taint(unsigned flag, enum lockdep_ok);
  292. extern int test_taint(unsigned flag);
  293. extern unsigned long get_taint(void);
  294. extern int root_mountflags;
  295.  
  296. extern bool early_boot_irqs_disabled;
  297.  
  298. /* Values used for system_state */
  299. extern enum system_states {
  300.         SYSTEM_BOOTING,
  301.         SYSTEM_RUNNING,
  302.         SYSTEM_HALT,
  303.         SYSTEM_POWER_OFF,
  304.         SYSTEM_RESTART,
  305. } system_state;
  306.  
  307. #define TAINT_PROPRIETARY_MODULE        0
  308. #define TAINT_FORCED_MODULE             1
  309. #define TAINT_CPU_OUT_OF_SPEC           2
  310. #define TAINT_FORCED_RMMOD              3
  311. #define TAINT_MACHINE_CHECK             4
  312. #define TAINT_BAD_PAGE                  5
  313. #define TAINT_USER                      6
  314. #define TAINT_DIE                       7
  315. #define TAINT_OVERRIDDEN_ACPI_TABLE     8
  316. #define TAINT_WARN                      9
  317. #define TAINT_CRAP                      10
  318. #define TAINT_FIRMWARE_WORKAROUND       11
  319. #define TAINT_OOT_MODULE                12
  320. #define TAINT_UNSIGNED_MODULE           13
  321. #define TAINT_SOFTLOCKUP                14
  322. #define TAINT_LIVEPATCH                 15
  323.  
  324. extern const char hex_asc[];
  325. #define hex_asc_lo(x)   hex_asc[((x) & 0x0f)]
  326. #define hex_asc_hi(x)   hex_asc[((x) & 0xf0) >> 4]
  327.  
  328. static inline char *hex_byte_pack(char *buf, u8 byte)
  329. {
  330.         *buf++ = hex_asc_hi(byte);
  331.         *buf++ = hex_asc_lo(byte);
  332.         return buf;
  333. }
  334.  
  335. extern const char hex_asc_upper[];
  336. #define hex_asc_upper_lo(x)     hex_asc_upper[((x) & 0x0f)]
  337. #define hex_asc_upper_hi(x)     hex_asc_upper[((x) & 0xf0) >> 4]
  338.  
  339. static inline char *hex_byte_pack_upper(char *buf, u8 byte)
  340. {
  341.         *buf++ = hex_asc_upper_hi(byte);
  342.         *buf++ = hex_asc_upper_lo(byte);
  343.         return buf;
  344. }
  345.  
  346. extern int hex_to_bin(char ch);
  347. extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
  348. extern char *bin2hex(char *dst, const void *src, size_t count);
  349.  
  350. bool mac_pton(const char *s, u8 *mac);
  351.  
  352. /*
  353.  * General tracing related utility functions - trace_printk(),
  354.  * tracing_on/tracing_off and tracing_start()/tracing_stop
  355.  *
  356.  * Use tracing_on/tracing_off when you want to quickly turn on or off
  357.  * tracing. It simply enables or disables the recording of the trace events.
  358.  * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
  359.  * file, which gives a means for the kernel and userspace to interact.
  360.  * Place a tracing_off() in the kernel where you want tracing to end.
  361.  * From user space, examine the trace, and then echo 1 > tracing_on
  362.  * to continue tracing.
  363.  *
  364.  * tracing_stop/tracing_start has slightly more overhead. It is used
  365.  * by things like suspend to ram where disabling the recording of the
  366.  * trace is not enough, but tracing must actually stop because things
  367.  * like calling smp_processor_id() may crash the system.
  368.  *
  369.  * Most likely, you want to use tracing_on/tracing_off.
  370.  */
  371.  
  372. enum ftrace_dump_mode {
  373.         DUMP_NONE,
  374.         DUMP_ALL,
  375.         DUMP_ORIG,
  376. };
  377.  
  378. #ifdef CONFIG_TRACING
  379. void tracing_on(void);
  380. void tracing_off(void);
  381. int tracing_is_on(void);
  382. void tracing_snapshot(void);
  383. void tracing_snapshot_alloc(void);
  384.  
  385. extern void tracing_start(void);
  386. extern void tracing_stop(void);
  387.  
  388. static inline __printf(1, 2)
  389. void ____trace_printk_check_format(const char *fmt, ...)
  390. {
  391. }
  392. #define __trace_printk_check_format(fmt, args...)                       \
  393. do {                                                                    \
  394.         if (0)                                                          \
  395.                 ____trace_printk_check_format(fmt, ##args);             \
  396. } while (0)
  397.  
  398. /**
  399.  * trace_printk - printf formatting in the ftrace buffer
  400.  * @fmt: the printf format for printing
  401.  *
  402.  * Note: __trace_printk is an internal function for trace_printk and
  403.  *       the @ip is passed in via the trace_printk macro.
  404.  *
  405.  * This function allows a kernel developer to debug fast path sections
  406.  * that printk is not appropriate for. By scattering in various
  407.  * printk like tracing in the code, a developer can quickly see
  408.  * where problems are occurring.
  409.  *
  410.  * This is intended as a debugging tool for the developer only.
  411.  * Please refrain from leaving trace_printks scattered around in
  412.  * your code. (Extra memory is used for special buffers that are
  413.  * allocated when trace_printk() is used)
  414.  *
  415.  * A little optization trick is done here. If there's only one
  416.  * argument, there's no need to scan the string for printf formats.
  417.  * The trace_puts() will suffice. But how can we take advantage of
  418.  * using trace_puts() when trace_printk() has only one argument?
  419.  * By stringifying the args and checking the size we can tell
  420.  * whether or not there are args. __stringify((__VA_ARGS__)) will
  421.  * turn into "()\0" with a size of 3 when there are no args, anything
  422.  * else will be bigger. All we need to do is define a string to this,
  423.  * and then take its size and compare to 3. If it's bigger, use
  424.  * do_trace_printk() otherwise, optimize it to trace_puts(). Then just
  425.  * let gcc optimize the rest.
  426.  */
  427.  
  428. #define trace_printk(fmt, ...)                          \
  429. do {                                                    \
  430.         char _______STR[] = __stringify((__VA_ARGS__)); \
  431.         if (sizeof(_______STR) > 3)                     \
  432.                 do_trace_printk(fmt, ##__VA_ARGS__);    \
  433.         else                                            \
  434.                 trace_puts(fmt);                        \
  435. } while (0)
  436.  
  437. #define do_trace_printk(fmt, args...)                                   \
  438. do {                                                                    \
  439.         static const char *trace_printk_fmt                             \
  440.                 __attribute__((section("__trace_printk_fmt"))) =        \
  441.                 __builtin_constant_p(fmt) ? fmt : NULL;                 \
  442.                                                                         \
  443.         __trace_printk_check_format(fmt, ##args);                       \
  444.                                                                         \
  445.         if (__builtin_constant_p(fmt))                                  \
  446.                 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args);   \
  447.         else                                                            \
  448.                 __trace_printk(_THIS_IP_, fmt, ##args);                 \
  449. } while (0)
  450.  
  451. extern __printf(2, 3)
  452. int __trace_bprintk(unsigned long ip, const char *fmt, ...);
  453.  
  454. extern __printf(2, 3)
  455. int __trace_printk(unsigned long ip, const char *fmt, ...);
  456.  
  457. /**
  458.  * trace_puts - write a string into the ftrace buffer
  459.  * @str: the string to record
  460.  *
  461.  * Note: __trace_bputs is an internal function for trace_puts and
  462.  *       the @ip is passed in via the trace_puts macro.
  463.  *
  464.  * This is similar to trace_printk() but is made for those really fast
  465.  * paths that a developer wants the least amount of "Heisenbug" affects,
  466.  * where the processing of the print format is still too much.
  467.  *
  468.  * This function allows a kernel developer to debug fast path sections
  469.  * that printk is not appropriate for. By scattering in various
  470.  * printk like tracing in the code, a developer can quickly see
  471.  * where problems are occurring.
  472.  *
  473.  * This is intended as a debugging tool for the developer only.
  474.  * Please refrain from leaving trace_puts scattered around in
  475.  * your code. (Extra memory is used for special buffers that are
  476.  * allocated when trace_puts() is used)
  477.  *
  478.  * Returns: 0 if nothing was written, positive # if string was.
  479.  *  (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
  480.  */
  481.  
  482. #define trace_puts(str) ({                                              \
  483.         static const char *trace_printk_fmt                             \
  484.                 __attribute__((section("__trace_printk_fmt"))) =        \
  485.                 __builtin_constant_p(str) ? str : NULL;                 \
  486.                                                                         \
  487.         if (__builtin_constant_p(str))                                  \
  488.                 __trace_bputs(_THIS_IP_, trace_printk_fmt);             \
  489.         else                                                            \
  490.                 __trace_puts(_THIS_IP_, str, strlen(str));              \
  491. })
  492. extern int __trace_bputs(unsigned long ip, const char *str);
  493. extern int __trace_puts(unsigned long ip, const char *str, int size);
  494.  
  495. extern void trace_dump_stack(int skip);
  496.  
  497. /*
  498.  * The double __builtin_constant_p is because gcc will give us an error
  499.  * if we try to allocate the static variable to fmt if it is not a
  500.  * constant. Even with the outer if statement.
  501.  */
  502. #define ftrace_vprintk(fmt, vargs)                                      \
  503. do {                                                                    \
  504.         if (__builtin_constant_p(fmt)) {                                \
  505.                 static const char *trace_printk_fmt                     \
  506.                   __attribute__((section("__trace_printk_fmt"))) =      \
  507.                         __builtin_constant_p(fmt) ? fmt : NULL;         \
  508.                                                                         \
  509.                 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs);  \
  510.         } else                                                          \
  511.                 __ftrace_vprintk(_THIS_IP_, fmt, vargs);                \
  512. } while (0)
  513.  
  514. extern __printf(2, 0) int
  515. __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
  516.  
  517. extern __printf(2, 0) int
  518. __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
  519.  
  520. extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
  521. #else
  522. static inline void tracing_start(void) { }
  523. static inline void tracing_stop(void) { }
  524. static inline void trace_dump_stack(int skip) { }
  525.  
  526. static inline void tracing_on(void) { }
  527. static inline void tracing_off(void) { }
  528. static inline int tracing_is_on(void) { return 0; }
  529. static inline void tracing_snapshot(void) { }
  530. static inline void tracing_snapshot_alloc(void) { }
  531.  
  532. static inline __printf(1, 2)
  533. int trace_printk(const char *fmt, ...)
  534. {
  535.         return 0;
  536. }
  537. static __printf(1, 0) inline int
  538. ftrace_vprintk(const char *fmt, va_list ap)
  539. {
  540.         return 0;
  541. }
  542. static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
  543. #endif /* CONFIG_TRACING */
  544.  
  545. /*
  546.  * min()/max()/clamp() macros that also do
  547.  * strict type-checking.. See the
  548.  * "unnecessary" pointer comparison.
  549.  */
  550. #define min(x, y) ({                            \
  551.         typeof(x) _min1 = (x);                  \
  552.         typeof(y) _min2 = (y);                  \
  553.         (void) (&_min1 == &_min2);              \
  554.         _min1 < _min2 ? _min1 : _min2; })
  555.  
  556. #define max(x, y) ({                            \
  557.         typeof(x) _max1 = (x);                  \
  558.         typeof(y) _max2 = (y);                  \
  559.         (void) (&_max1 == &_max2);              \
  560.         _max1 > _max2 ? _max1 : _max2; })
  561.  
  562. #define min3(x, y, z) min((typeof(x))min(x, y), z)
  563. #define max3(x, y, z) max((typeof(x))max(x, y), z)
  564.  
  565. /**
  566.  * min_not_zero - return the minimum that is _not_ zero, unless both are zero
  567.  * @x: value1
  568.  * @y: value2
  569.  */
  570. #define min_not_zero(x, y) ({                   \
  571.         typeof(x) __x = (x);                    \
  572.         typeof(y) __y = (y);                    \
  573.         __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
  574.  
  575. /**
  576.  * clamp - return a value clamped to a given range with strict typechecking
  577.  * @val: current value
  578.  * @lo: lowest allowable value
  579.  * @hi: highest allowable value
  580.  *
  581.  * This macro does strict typechecking of lo/hi to make sure they are of the
  582.  * same type as val.  See the unnecessary pointer comparisons.
  583.  */
  584. #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
  585.  
  586. /*
  587.  * ..and if you can't take the strict
  588.  * types, you can specify one yourself.
  589.  *
  590.  * Or not use min/max/clamp at all, of course.
  591.  */
  592. #define min_t(type, x, y) ({                    \
  593.         type __min1 = (x);                      \
  594.         type __min2 = (y);                      \
  595.         __min1 < __min2 ? __min1: __min2; })
  596.  
  597. #define max_t(type, x, y) ({                    \
  598.         type __max1 = (x);                      \
  599.         type __max2 = (y);                      \
  600.         __max1 > __max2 ? __max1: __max2; })
  601.  
  602. /**
  603.  * clamp_t - return a value clamped to a given range using a given type
  604.  * @type: the type of variable to use
  605.  * @val: current value
  606.  * @lo: minimum allowable value
  607.  * @hi: maximum allowable value
  608.  *
  609.  * This macro does no typechecking and uses temporary variables of type
  610.  * 'type' to make all the comparisons.
  611.  */
  612. #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
  613.  
  614. /**
  615.  * clamp_val - return a value clamped to a given range using val's type
  616.  * @val: current value
  617.  * @lo: minimum allowable value
  618.  * @hi: maximum allowable value
  619.  *
  620.  * This macro does no typechecking and uses temporary variables of whatever
  621.  * type the input argument 'val' is.  This is useful when val is an unsigned
  622.  * type and min and max are literals that will otherwise be assigned a signed
  623.  * integer type.
  624.  */
  625. #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
  626.  
  627.  
  628. /*
  629.  * swap - swap value of @a and @b
  630.  */
  631. #define swap(a, b) \
  632.         do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
  633.  
  634. /**
  635.  * container_of - cast a member of a structure out to the containing structure
  636.  * @ptr:        the pointer to the member.
  637.  * @type:       the type of the container struct this is embedded in.
  638.  * @member:     the name of the member within the struct.
  639.  *
  640.  */
  641. #define container_of(ptr, type, member) ({                      \
  642.         const typeof( ((type *)0)->member ) *__mptr = (ptr);    \
  643.         (type *)( (char *)__mptr - offsetof(type,member) );})
  644.  
  645. /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
  646. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  647. # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
  648. #endif
  649.  
  650. /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
  651. #define VERIFY_OCTAL_PERMISSIONS(perms)                                         \
  652.         (BUILD_BUG_ON_ZERO((perms) < 0) +                                       \
  653.          BUILD_BUG_ON_ZERO((perms) > 0777) +                                    \
  654.          /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */                \
  655.          BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) +       \
  656.          BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) +              \
  657.          /* USER_WRITABLE >= GROUP_WRITABLE */                                  \
  658.          BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) +       \
  659.          /* OTHER_WRITABLE?  Generally considered a bad idea. */                \
  660.          BUILD_BUG_ON_ZERO((perms) & 2) +                                       \
  661.          (perms))
  662.  
  663.  
  664. void free (void *ptr);
  665.  
  666.  
  667. typedef unsigned long   pgprotval_t;
  668.  
  669.  
  670. struct file
  671. {
  672.     struct page  **pages;         /* physical memory backend */
  673.     unsigned int   count;
  674.     unsigned int   allocated;
  675.     void           *vma;
  676. };
  677.  
  678. struct vm_area_struct {};
  679. struct address_space {};
  680.  
  681.  
  682. #define in_dbg_master() (0)
  683.  
  684. #define HZ 100
  685.  
  686. struct tvec_base;
  687.  
  688. struct timer_list {
  689.          struct list_head entry;
  690.          unsigned long expires;
  691.  
  692.          void (*function)(unsigned long);
  693.          unsigned long data;
  694.          u32  handle;
  695. };
  696.  
  697. #define setup_timer(_timer, _fn, _data)                                 \
  698.         do {                                                            \
  699.                 (_timer)->function = (_fn);                             \
  700.                 (_timer)->data = (_data);                               \
  701.                 (_timer)->handle = 0;                                   \
  702.         } while (0)
  703.  
  704. int del_timer(struct timer_list *timer);
  705.  
  706. # define del_timer_sync(t)              del_timer(t)
  707.  
  708.  
  709. #define build_mmio_read(name, size, type, reg, barrier)     \
  710. static inline type name(const volatile void __iomem *addr)  \
  711. { type ret; asm volatile("mov" size " %1,%0":reg (ret)      \
  712. :"m" (*(volatile type __force *)addr) barrier); return ret; }
  713.  
  714. #define build_mmio_write(name, size, type, reg, barrier) \
  715. static inline void name(type val, volatile void __iomem *addr) \
  716. { asm volatile("mov" size " %0,%1": :reg (val), \
  717. "m" (*(volatile type __force *)addr) barrier); }
  718.  
  719. build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
  720. build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
  721. build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
  722.  
  723. build_mmio_read(__readb, "b", unsigned char, "=q", )
  724. build_mmio_read(__readw, "w", unsigned short, "=r", )
  725. build_mmio_read(__readl, "l", unsigned int, "=r", )
  726.  
  727. build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
  728. build_mmio_write(writew, "w", unsigned short, "r", :"memory")
  729. build_mmio_write(writel, "l", unsigned int, "r", :"memory")
  730.  
  731. build_mmio_write(__writeb, "b", unsigned char, "q", )
  732. build_mmio_write(__writew, "w", unsigned short, "r", )
  733. build_mmio_write(__writel, "l", unsigned int, "r", )
  734.  
  735. #define readb_relaxed(a) __readb(a)
  736. #define readw_relaxed(a) __readw(a)
  737. #define readl_relaxed(a) __readl(a)
  738. #define __raw_readb __readb
  739. #define __raw_readw __readw
  740. #define __raw_readl __readl
  741.  
  742. #define __raw_writeb __writeb
  743. #define __raw_writew __writew
  744. #define __raw_writel __writel
  745.  
  746. #define swap(a, b) \
  747.         do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
  748.  
  749.  
  750. #define mmiowb() barrier()
  751.  
  752. #define dev_err(dev, format, arg...)            \
  753.         printk("Error %s " format, __func__ , ## arg)
  754.  
  755. #define dev_warn(dev, format, arg...)            \
  756.         printk("Warning %s " format, __func__ , ## arg)
  757.  
  758. #define dev_info(dev, format, arg...)       \
  759.         printk("Info %s " format , __func__, ## arg)
  760.  
  761. struct page
  762. {
  763.     unsigned int addr;
  764. };
  765.  
  766. #define page_to_phys(page)    ((dma_addr_t)(page))
  767.  
  768. struct vm_fault {
  769.     unsigned int flags;             /* FAULT_FLAG_xxx flags */
  770.     pgoff_t pgoff;                  /* Logical page offset based on vma */
  771.     void __user *virtual_address;   /* Faulting virtual address */
  772.  
  773.     struct page *page;              /* ->fault handlers should return a
  774.                                      * page here, unless VM_FAULT_NOPAGE
  775.                                      * is set (which is also implied by
  776.                                      * VM_FAULT_ERROR).
  777.                                      */
  778. };
  779.  
  780. struct pagelist {
  781.     dma_addr_t    *page;
  782.     unsigned int   nents;
  783. };
  784.  
  785. #define page_cache_release(page)        FreePage(page_to_phys(page))
  786.  
  787. #define alloc_page(gfp_mask) (struct page*)AllocPage()
  788.  
  789. #define __free_page(page) FreePage(page_to_phys(page))
  790.  
  791. #define get_page(a)
  792. #define put_page(a)
  793.  
  794. #define pci_map_page(dev, page, offset, size, direction) \
  795.         (dma_addr_t)( (offset)+page_to_phys(page))
  796.  
  797. #define pci_unmap_page(dev, dma_address, size, direction)
  798.  
  799. #define IS_ENABLED(a)  0
  800.  
  801.  
  802.  
  803. #define cpufreq_quick_get_max(x) GetCpuFreq()
  804.  
  805. extern unsigned int tsc_khz;
  806.  
  807. #define on_each_cpu(func,info,wait)             \
  808.         ({                                      \
  809.                 func(info);                     \
  810.                 0;                              \
  811.         })
  812.  
  813.  
  814. static inline __must_check long __copy_to_user(void __user *to,
  815.         const void *from, unsigned long n)
  816. {
  817.     if (__builtin_constant_p(n)) {
  818.         switch(n) {
  819.         case 1:
  820.             *(u8 __force *)to = *(u8 *)from;
  821.             return 0;
  822.         case 2:
  823.             *(u16 __force *)to = *(u16 *)from;
  824.             return 0;
  825.         case 4:
  826.             *(u32 __force *)to = *(u32 *)from;
  827.             return 0;
  828. #ifdef CONFIG_64BIT
  829.         case 8:
  830.             *(u64 __force *)to = *(u64 *)from;
  831.             return 0;
  832. #endif
  833.         default:
  834.             break;
  835.         }
  836.     }
  837.  
  838.     __builtin_memcpy((void __force *)to, from, n);
  839.     return 0;
  840. }
  841.  
  842. void *kmap(struct page *page);
  843. void *kmap_atomic(struct page *page);
  844. void kunmap(struct page *page);
  845. void kunmap_atomic(void *vaddr);
  846.  
  847. typedef u64 async_cookie_t;
  848.  
  849. #define iowrite32(v, addr)      writel((v), (addr))
  850.  
  851.  
  852. #define __init
  853.  
  854. #define CONFIG_PAGE_OFFSET 0
  855.  
  856. #endif
  857.