Subversion Repositories Kolibri OS

Rev

Rev 3391 | Rev 3482 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef _LINUX_KERNEL_H
  2. #define _LINUX_KERNEL_H
  3.  
  4. /*
  5.  * 'kernel.h' contains some often-used function prototypes etc
  6.  */
  7.  
  8. #ifdef __KERNEL__
  9.  
  10. #include <stdarg.h>
  11. #include <linux/stddef.h>
  12. #include <linux/types.h>
  13. #include <linux/compiler.h>
  14. #include <linux/bitops.h>
  15.  
  16. #include <linux/typecheck.h>
  17.  
  18. #define __init
  19.  
  20. #define USHRT_MAX       ((u16)(~0U))
  21. #define SHRT_MAX        ((s16)(USHRT_MAX>>1))
  22. #define SHRT_MIN        ((s16)(-SHRT_MAX - 1))
  23. #define INT_MAX     ((int)(~0U>>1))
  24. #define INT_MIN     (-INT_MAX - 1)
  25. #define UINT_MAX    (~0U)
  26. #define LONG_MAX    ((long)(~0UL>>1))
  27. #define LONG_MIN    (-LONG_MAX - 1)
  28. #define ULONG_MAX   (~0UL)
  29. #define LLONG_MAX   ((long long)(~0ULL>>1))
  30. #define LLONG_MIN   (-LLONG_MAX - 1)
  31. #define ULLONG_MAX  (~0ULL)
  32. #define SIZE_MAX        (~(size_t)0)
  33.  
  34. #define ALIGN(x,a)      __ALIGN_MASK(x,(typeof(x))(a)-1)
  35. #define __ALIGN_MASK(x,mask)    (((x)+(mask))&~(mask))
  36. #define PTR_ALIGN(p, a)     ((typeof(p))ALIGN((unsigned long)(p), (a)))
  37. #define IS_ALIGNED(x, a)        (((x) & ((typeof(x))(a) - 1)) == 0)
  38.  
  39. /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
  40. #define roundup(x, y) (                                 \
  41. {                                                       \
  42.         const typeof(y) __y = y;                        \
  43.         (((x) + (__y - 1)) / __y) * __y;                \
  44. }                                                       \
  45. )
  46.  
  47. #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
  48. #define DIV_ROUND_CLOSEST(x, divisor)(                  \
  49. {                                                       \
  50.          typeof(divisor) __divisor = divisor;            \
  51.          (((x) + ((__divisor) / 2)) / (__divisor));      \
  52. }                                                       \
  53. )
  54.  
  55. /**
  56.  * upper_32_bits - return bits 32-63 of a number
  57.  * @n: the number we're accessing
  58.  *
  59.  * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
  60.  * the "right shift count >= width of type" warning when that quantity is
  61.  * 32-bits.
  62.  */
  63. #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
  64.  
  65. /**
  66.  * lower_32_bits - return bits 0-31 of a number
  67.  * @n: the number we're accessing
  68.  */
  69. #define lower_32_bits(n) ((u32)(n))
  70.  
  71. #define KERN_EMERG      "<0>"   /* system is unusable                   */
  72. #define KERN_ALERT      "<1>"   /* action must be taken immediately     */
  73. #define KERN_CRIT       "<2>"   /* critical conditions                  */
  74. #define KERN_ERR        "<3>"   /* error conditions                     */
  75. #define KERN_WARNING    "<4>"   /* warning conditions                   */
  76. #define KERN_NOTICE     "<5>"   /* normal but significant condition     */
  77. #define KERN_INFO       "<6>"   /* informational                        */
  78. #define KERN_DEBUG      "<7>"   /* debug-level messages                 */
  79. extern const char hex_asc[];
  80. #define hex_asc_lo(x)   hex_asc[((x) & 0x0f)]
  81. #define hex_asc_hi(x)   hex_asc[((x) & 0xf0) >> 4]
  82.  
  83. static inline char *pack_hex_byte(char *buf, u8 byte)
  84. {
  85.         *buf++ = hex_asc_hi(byte);
  86.         *buf++ = hex_asc_lo(byte);
  87.         return buf;
  88. }
  89.  
  90. enum {
  91.     DUMP_PREFIX_NONE,
  92.     DUMP_PREFIX_ADDRESS,
  93.     DUMP_PREFIX_OFFSET
  94. };
  95.  
  96. int hex_to_bin(char ch);
  97. int hex2bin(u8 *dst, const char *src, size_t count);
  98.  
  99.  
  100. //int printk(const char *fmt, ...);
  101.  
  102. #define printk(fmt, arg...)    dbgprintf(fmt , ##arg)
  103.  
  104.  
  105. /*
  106.  * min()/max()/clamp() macros that also do
  107.  * strict type-checking.. See the
  108.  * "unnecessary" pointer comparison.
  109.  */
  110. #define min(x, y) ({                \
  111.     typeof(x) _min1 = (x);          \
  112.     typeof(y) _min2 = (y);          \
  113.     (void) (&_min1 == &_min2);      \
  114.     _min1 < _min2 ? _min1 : _min2; })
  115.  
  116. #define max(x, y) ({                \
  117.     typeof(x) _max1 = (x);          \
  118.     typeof(y) _max2 = (y);          \
  119.     (void) (&_max1 == &_max2);      \
  120.     _max1 > _max2 ? _max1 : _max2; })
  121.  
  122. #define min3(x, y, z) ({                        \
  123.         typeof(x) _min1 = (x);                  \
  124.         typeof(y) _min2 = (y);                  \
  125.         typeof(z) _min3 = (z);                  \
  126.         (void) (&_min1 == &_min2);              \
  127.         (void) (&_min1 == &_min3);              \
  128.         _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
  129.                 (_min2 < _min3 ? _min2 : _min3); })
  130.  
  131. #define max3(x, y, z) ({                        \
  132.         typeof(x) _max1 = (x);                  \
  133.         typeof(y) _max2 = (y);                  \
  134.         typeof(z) _max3 = (z);                  \
  135.         (void) (&_max1 == &_max2);              \
  136.         (void) (&_max1 == &_max3);              \
  137.         _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
  138.                 (_max2 > _max3 ? _max2 : _max3); })
  139.  
  140. /**
  141.  * min_not_zero - return the minimum that is _not_ zero, unless both are zero
  142.  * @x: value1
  143.  * @y: value2
  144.  */
  145. #define min_not_zero(x, y) ({                   \
  146.         typeof(x) __x = (x);                    \
  147.         typeof(y) __y = (y);                    \
  148.         __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
  149.  
  150. /**
  151.  * clamp - return a value clamped to a given range with strict typechecking
  152.  * @val: current value
  153.  * @min: minimum allowable value
  154.  * @max: maximum allowable value
  155.  *
  156.  * This macro does strict typechecking of min/max to make sure they are of the
  157.  * same type as val.  See the unnecessary pointer comparisons.
  158.  */
  159. #define clamp(val, min, max) ({                 \
  160.         typeof(val) __val = (val);              \
  161.         typeof(min) __min = (min);              \
  162.         typeof(max) __max = (max);              \
  163.         (void) (&__val == &__min);              \
  164.         (void) (&__val == &__max);              \
  165.         __val = __val < __min ? __min: __val;   \
  166.         __val > __max ? __max: __val; })
  167.  
  168. /*
  169.  * ..and if you can't take the strict
  170.  * types, you can specify one yourself.
  171.  *
  172.  * Or not use min/max/clamp at all, of course.
  173.  */
  174. #define min_t(type, x, y) ({            \
  175.     type __min1 = (x);          \
  176.     type __min2 = (y);          \
  177.     __min1 < __min2 ? __min1: __min2; })
  178.  
  179. #define max_t(type, x, y) ({            \
  180.     type __max1 = (x);          \
  181.     type __max2 = (y);          \
  182.     __max1 > __max2 ? __max1: __max2; })
  183.  
  184. /**
  185.  * container_of - cast a member of a structure out to the containing structure
  186.  * @ptr:    the pointer to the member.
  187.  * @type:   the type of the container struct this is embedded in.
  188.  * @member: the name of the member within the struct.
  189.  *
  190.  */
  191. #define container_of(ptr, type, member) ({          \
  192.     const typeof( ((type *)0)->member ) *__mptr = (ptr);    \
  193.     (type *)( (char *)__mptr - offsetof(type,member) );})
  194.  
  195.  
  196. static inline void *kcalloc(size_t n, size_t size, uint32_t flags)
  197. {
  198.         if (n != 0 && size > ULONG_MAX / n)
  199.                 return NULL;
  200.         return kzalloc(n * size, 0);
  201. }
  202.  
  203.  
  204. void free (void *ptr);
  205.  
  206. #endif /* __KERNEL__ */
  207.  
  208. typedef unsigned long   pgprotval_t;
  209.  
  210. typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
  211.  
  212. struct file
  213. {
  214.     struct page  **pages;         /* physical memory backend */
  215.     unsigned int   count;
  216.     unsigned int   allocated;
  217.     void           *vma;
  218. };
  219.  
  220. struct vm_area_struct {};
  221. struct address_space {};
  222.  
  223. struct device
  224. {
  225.     struct device   *parent;
  226.     void            *driver_data;
  227. };
  228.  
  229. static inline void dev_set_drvdata(struct device *dev, void *data)
  230. {
  231.     dev->driver_data = data;
  232. }
  233.  
  234. static inline void *dev_get_drvdata(struct device *dev)
  235. {
  236.     return dev->driver_data;
  237. }
  238.  
  239. #define preempt_disable()       do { } while (0)
  240. #define preempt_enable_no_resched() do { } while (0)
  241. #define preempt_enable()        do { } while (0)
  242. #define preempt_check_resched()     do { } while (0)
  243.  
  244. #define preempt_disable_notrace()       do { } while (0)
  245. #define preempt_enable_no_resched_notrace() do { } while (0)
  246. #define preempt_enable_notrace()        do { } while (0)
  247.  
  248. #define in_dbg_master() (0)
  249.  
  250. #define HZ 100
  251.  
  252. #define time_after(a,b)         \
  253.         (typecheck(unsigned long, a) && \
  254.         typecheck(unsigned long, b) && \
  255.         ((long)(b) - (long)(a) < 0))
  256.  
  257. struct tvec_base;
  258.  
  259. struct timer_list {
  260.          struct list_head entry;
  261.          unsigned long expires;
  262.  
  263.          void (*function)(unsigned long);
  264.          unsigned long data;
  265.  
  266. //         struct tvec_base *base;
  267. };
  268.  
  269. struct timespec {
  270.     long tv_sec;                 /* seconds */
  271.     long tv_nsec;                /* nanoseconds */
  272. };
  273.  
  274.  
  275. #define build_mmio_read(name, size, type, reg, barrier)     \
  276. static inline type name(const volatile void __iomem *addr)  \
  277. { type ret; asm volatile("mov" size " %1,%0":reg (ret)      \
  278. :"m" (*(volatile type __force *)addr) barrier); return ret; }
  279.  
  280. #define build_mmio_write(name, size, type, reg, barrier) \
  281. static inline void name(type val, volatile void __iomem *addr) \
  282. { asm volatile("mov" size " %0,%1": :reg (val), \
  283. "m" (*(volatile type __force *)addr) barrier); }
  284.  
  285. build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
  286. build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
  287. build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
  288.  
  289. build_mmio_read(__readb, "b", unsigned char, "=q", )
  290. build_mmio_read(__readw, "w", unsigned short, "=r", )
  291. build_mmio_read(__readl, "l", unsigned int, "=r", )
  292.  
  293. build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
  294. build_mmio_write(writew, "w", unsigned short, "r", :"memory")
  295. build_mmio_write(writel, "l", unsigned int, "r", :"memory")
  296.  
  297. build_mmio_write(__writeb, "b", unsigned char, "q", )
  298. build_mmio_write(__writew, "w", unsigned short, "r", )
  299. build_mmio_write(__writel, "l", unsigned int, "r", )
  300.  
  301. #define readb_relaxed(a) __readb(a)
  302. #define readw_relaxed(a) __readw(a)
  303. #define readl_relaxed(a) __readl(a)
  304. #define __raw_readb __readb
  305. #define __raw_readw __readw
  306. #define __raw_readl __readl
  307.  
  308. #define __raw_writeb __writeb
  309. #define __raw_writew __writew
  310. #define __raw_writel __writel
  311.  
  312. static inline __u64 readq(const volatile void __iomem *addr)
  313. {
  314.         const volatile u32 __iomem *p = addr;
  315.         u32 low, high;
  316.  
  317.         low = readl(p);
  318.         high = readl(p + 1);
  319.  
  320.         return low + ((u64)high << 32);
  321. }
  322.  
  323. static inline void writeq(__u64 val, volatile void __iomem *addr)
  324. {
  325.         writel(val, addr);
  326.         writel(val >> 32, addr+4);
  327. }
  328.  
  329. #define swap(a, b) \
  330.         do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
  331.  
  332.  
  333. #define mmiowb() barrier()
  334.  
  335. #define dev_err(dev, format, arg...)            \
  336.         printk("Error %s " format, __func__ , ## arg)
  337.  
  338. #define dev_warn(dev, format, arg...)            \
  339.         printk("Warning %s " format, __func__ , ## arg)
  340.  
  341. #define dev_info(dev, format, arg...)       \
  342.         printk("Info %s " format , __func__, ## arg)
  343.  
  344. //#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
  345. #define BUILD_BUG_ON(condition)
  346.  
  347. struct page
  348. {
  349.     unsigned int addr;
  350. };
  351.  
  352. #define page_to_phys(page)    ((dma_addr_t)(page))
  353.  
  354. struct vm_fault {
  355.     unsigned int flags;             /* FAULT_FLAG_xxx flags */
  356.     pgoff_t pgoff;                  /* Logical page offset based on vma */
  357.     void __user *virtual_address;   /* Faulting virtual address */
  358.  
  359.     struct page *page;              /* ->fault handlers should return a
  360.                                      * page here, unless VM_FAULT_NOPAGE
  361.                                      * is set (which is also implied by
  362.                                      * VM_FAULT_ERROR).
  363.                                      */
  364. };
  365.  
  366. struct pagelist {
  367.     dma_addr_t    *page;
  368.     unsigned int   nents;
  369. };
  370.  
  371. #define page_cache_release(page)        FreePage(page_to_phys(page))
  372.  
  373. #define alloc_page(gfp_mask) (struct page*)AllocPage()
  374.  
  375. #define __free_page(page) FreePage(page_to_phys(page))
  376.  
  377. #define get_page(a)
  378. #define put_page(a)
  379. #define set_pages_uc(a,b)
  380. #define set_pages_wb(a,b)
  381.  
  382. #define pci_map_page(dev, page, offset, size, direction) \
  383.         (dma_addr_t)( (offset)+page_to_phys(page))
  384.  
  385. #define pci_unmap_page(dev, dma_address, size, direction)
  386.  
  387. #define GFP_TEMPORARY  0
  388. #define __GFP_NOWARN   0
  389. #define __GFP_NORETRY  0
  390. #define GFP_NOWAIT     0
  391.  
  392. #define IS_ENABLED(a)  0
  393.  
  394.  
  395. #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
  396.  
  397. #define RCU_INIT_POINTER(p, v) \
  398.         do { \
  399.                 p = (typeof(*v) __force __rcu *)(v); \
  400.         } while (0)
  401.  
  402.  
  403. #define rcu_dereference_raw(p)  ({ \
  404.                                 typeof(p) _________p1 = ACCESS_ONCE(p); \
  405.                                 (_________p1); \
  406.                                 })
  407. #define rcu_assign_pointer(p, v) \
  408.         ({ \
  409.                 if (!__builtin_constant_p(v) || \
  410.                     ((v) != NULL)) \
  411.                 (p) = (v); \
  412.         })
  413.  
  414. #endif
  415.  
  416.