Subversion Repositories Kolibri OS

Rev

Rev 3482 | Rev 4103 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef _LINUX_KERNEL_H
  2. #define _LINUX_KERNEL_H
  3.  
  4. /*
  5.  * 'kernel.h' contains some often-used function prototypes etc
  6.  */
  7.  
  8. #ifdef __KERNEL__
  9.  
  10. #include <stdarg.h>
  11. #include <linux/stddef.h>
  12. #include <linux/types.h>
  13. #include <linux/compiler.h>
  14. #include <linux/bitops.h>
  15.  
  16. #include <linux/typecheck.h>
  17.  
  18. #define __init
  19.  
  20. #define USHRT_MAX       ((u16)(~0U))
  21. #define SHRT_MAX        ((s16)(USHRT_MAX>>1))
  22. #define SHRT_MIN        ((s16)(-SHRT_MAX - 1))
  23. #define INT_MAX     ((int)(~0U>>1))
  24. #define INT_MIN     (-INT_MAX - 1)
  25. #define UINT_MAX    (~0U)
  26. #define LONG_MAX    ((long)(~0UL>>1))
  27. #define LONG_MIN    (-LONG_MAX - 1)
  28. #define ULONG_MAX   (~0UL)
  29. #define LLONG_MAX   ((long long)(~0ULL>>1))
  30. #define LLONG_MIN   (-LLONG_MAX - 1)
  31. #define ULLONG_MAX  (~0ULL)
  32. #define SIZE_MAX        (~(size_t)0)
  33.  
  34. #define ALIGN(x,a)      __ALIGN_MASK(x,(typeof(x))(a)-1)
  35. #define __ALIGN_MASK(x,mask)    (((x)+(mask))&~(mask))
  36. #define PTR_ALIGN(p, a)     ((typeof(p))ALIGN((unsigned long)(p), (a)))
  37. #define IS_ALIGNED(x, a)        (((x) & ((typeof(x))(a) - 1)) == 0)
  38.  
  39.  
  40. #define __round_mask(x, y) ((__typeof__(x))((y)-1))
  41. #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
  42.  
  43. /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
  44. #define roundup(x, y) (                                 \
  45. {                                                       \
  46.         const typeof(y) __y = y;                        \
  47.         (((x) + (__y - 1)) / __y) * __y;                \
  48. }                                                       \
  49. )
  50.  
  51. #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
  52. #define DIV_ROUND_CLOSEST(x, divisor)(                  \
  53. {                                                       \
  54.          typeof(divisor) __divisor = divisor;            \
  55.          (((x) + ((__divisor) / 2)) / (__divisor));      \
  56. }                                                       \
  57. )
  58.  
  59. /**
  60.  * upper_32_bits - return bits 32-63 of a number
  61.  * @n: the number we're accessing
  62.  *
  63.  * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
  64.  * the "right shift count >= width of type" warning when that quantity is
  65.  * 32-bits.
  66.  */
  67. #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
  68.  
  69. /**
  70.  * lower_32_bits - return bits 0-31 of a number
  71.  * @n: the number we're accessing
  72.  */
  73. #define lower_32_bits(n) ((u32)(n))
  74.  
  75. #define KERN_EMERG      "<0>"   /* system is unusable                   */
  76. #define KERN_ALERT      "<1>"   /* action must be taken immediately     */
  77. #define KERN_CRIT       "<2>"   /* critical conditions                  */
  78. #define KERN_ERR        "<3>"   /* error conditions                     */
  79. #define KERN_WARNING    "<4>"   /* warning conditions                   */
  80. #define KERN_NOTICE     "<5>"   /* normal but significant condition     */
  81. #define KERN_INFO       "<6>"   /* informational                        */
  82. #define KERN_DEBUG      "<7>"   /* debug-level messages                 */
  83. extern const char hex_asc[];
  84. #define hex_asc_lo(x)   hex_asc[((x) & 0x0f)]
  85. #define hex_asc_hi(x)   hex_asc[((x) & 0xf0) >> 4]
  86.  
  87. static inline char *pack_hex_byte(char *buf, u8 byte)
  88. {
  89.         *buf++ = hex_asc_hi(byte);
  90.         *buf++ = hex_asc_lo(byte);
  91.         return buf;
  92. }
  93.  
  94. enum {
  95.     DUMP_PREFIX_NONE,
  96.     DUMP_PREFIX_ADDRESS,
  97.     DUMP_PREFIX_OFFSET
  98. };
  99.  
  100. int hex_to_bin(char ch);
  101. int hex2bin(u8 *dst, const char *src, size_t count);
  102.  
  103.  
  104. //int printk(const char *fmt, ...);
  105.  
  106. #define printk(fmt, arg...)    dbgprintf(fmt , ##arg)
  107.  
  108.  
  109. /*
  110.  * min()/max()/clamp() macros that also do
  111.  * strict type-checking.. See the
  112.  * "unnecessary" pointer comparison.
  113.  */
  114. #define min(x, y) ({                \
  115.     typeof(x) _min1 = (x);          \
  116.     typeof(y) _min2 = (y);          \
  117.     (void) (&_min1 == &_min2);      \
  118.     _min1 < _min2 ? _min1 : _min2; })
  119.  
  120. #define max(x, y) ({                \
  121.     typeof(x) _max1 = (x);          \
  122.     typeof(y) _max2 = (y);          \
  123.     (void) (&_max1 == &_max2);      \
  124.     _max1 > _max2 ? _max1 : _max2; })
  125.  
  126. #define min3(x, y, z) ({                        \
  127.         typeof(x) _min1 = (x);                  \
  128.         typeof(y) _min2 = (y);                  \
  129.         typeof(z) _min3 = (z);                  \
  130.         (void) (&_min1 == &_min2);              \
  131.         (void) (&_min1 == &_min3);              \
  132.         _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
  133.                 (_min2 < _min3 ? _min2 : _min3); })
  134.  
  135. #define max3(x, y, z) ({                        \
  136.         typeof(x) _max1 = (x);                  \
  137.         typeof(y) _max2 = (y);                  \
  138.         typeof(z) _max3 = (z);                  \
  139.         (void) (&_max1 == &_max2);              \
  140.         (void) (&_max1 == &_max3);              \
  141.         _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
  142.                 (_max2 > _max3 ? _max2 : _max3); })
  143.  
  144. /**
  145.  * min_not_zero - return the minimum that is _not_ zero, unless both are zero
  146.  * @x: value1
  147.  * @y: value2
  148.  */
  149. #define min_not_zero(x, y) ({                   \
  150.         typeof(x) __x = (x);                    \
  151.         typeof(y) __y = (y);                    \
  152.         __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
  153.  
  154. /**
  155.  * clamp - return a value clamped to a given range with strict typechecking
  156.  * @val: current value
  157.  * @min: minimum allowable value
  158.  * @max: maximum allowable value
  159.  *
  160.  * This macro does strict typechecking of min/max to make sure they are of the
  161.  * same type as val.  See the unnecessary pointer comparisons.
  162.  */
  163. #define clamp(val, min, max) ({                 \
  164.         typeof(val) __val = (val);              \
  165.         typeof(min) __min = (min);              \
  166.         typeof(max) __max = (max);              \
  167.         (void) (&__val == &__min);              \
  168.         (void) (&__val == &__max);              \
  169.         __val = __val < __min ? __min: __val;   \
  170.         __val > __max ? __max: __val; })
  171.  
  172. /*
  173.  * ..and if you can't take the strict
  174.  * types, you can specify one yourself.
  175.  *
  176.  * Or not use min/max/clamp at all, of course.
  177.  */
  178. #define min_t(type, x, y) ({            \
  179.     type __min1 = (x);          \
  180.     type __min2 = (y);          \
  181.     __min1 < __min2 ? __min1: __min2; })
  182.  
  183. #define max_t(type, x, y) ({            \
  184.     type __max1 = (x);          \
  185.     type __max2 = (y);          \
  186.     __max1 > __max2 ? __max1: __max2; })
  187.  
  188. /**
  189.  * container_of - cast a member of a structure out to the containing structure
  190.  * @ptr:    the pointer to the member.
  191.  * @type:   the type of the container struct this is embedded in.
  192.  * @member: the name of the member within the struct.
  193.  *
  194.  */
  195. #define container_of(ptr, type, member) ({          \
  196.     const typeof( ((type *)0)->member ) *__mptr = (ptr);    \
  197.     (type *)( (char *)__mptr - offsetof(type,member) );})
  198.  
  199.  
  200. static inline void *kcalloc(size_t n, size_t size, uint32_t flags)
  201. {
  202.         if (n != 0 && size > ULONG_MAX / n)
  203.                 return NULL;
  204.         return kzalloc(n * size, 0);
  205. }
  206.  
  207.  
  208. void free (void *ptr);
  209.  
  210. #endif /* __KERNEL__ */
  211.  
  212. typedef unsigned long   pgprotval_t;
  213.  
  214. typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
  215.  
  216. struct file
  217. {
  218.     struct page  **pages;         /* physical memory backend */
  219.     unsigned int   count;
  220.     unsigned int   allocated;
  221.     void           *vma;
  222. };
  223.  
  224. struct vm_area_struct {};
  225. struct address_space {};
  226.  
  227. struct device
  228. {
  229.     struct device   *parent;
  230.     void            *driver_data;
  231. };
  232.  
  233. static inline void dev_set_drvdata(struct device *dev, void *data)
  234. {
  235.     dev->driver_data = data;
  236. }
  237.  
  238. static inline void *dev_get_drvdata(struct device *dev)
  239. {
  240.     return dev->driver_data;
  241. }
  242.  
  243. #define preempt_disable()       do { } while (0)
  244. #define preempt_enable_no_resched() do { } while (0)
  245. #define preempt_enable()        do { } while (0)
  246. #define preempt_check_resched()     do { } while (0)
  247.  
  248. #define preempt_disable_notrace()       do { } while (0)
  249. #define preempt_enable_no_resched_notrace() do { } while (0)
  250. #define preempt_enable_notrace()        do { } while (0)
  251.  
  252. #define in_dbg_master() (0)
  253.  
  254. #define HZ 100
  255.  
  256. #define time_after(a,b)         \
  257.         (typecheck(unsigned long, a) && \
  258.         typecheck(unsigned long, b) && \
  259.         ((long)(b) - (long)(a) < 0))
  260.  
  261. struct tvec_base;
  262.  
  263. struct timer_list {
  264.          struct list_head entry;
  265.          unsigned long expires;
  266.  
  267.          void (*function)(unsigned long);
  268.          unsigned long data;
  269.  
  270. //         struct tvec_base *base;
  271. };
  272.  
  273. struct timespec {
  274.     long tv_sec;                 /* seconds */
  275.     long tv_nsec;                /* nanoseconds */
  276. };
  277.  
  278.  
  279. #define build_mmio_read(name, size, type, reg, barrier)     \
  280. static inline type name(const volatile void __iomem *addr)  \
  281. { type ret; asm volatile("mov" size " %1,%0":reg (ret)      \
  282. :"m" (*(volatile type __force *)addr) barrier); return ret; }
  283.  
  284. #define build_mmio_write(name, size, type, reg, barrier) \
  285. static inline void name(type val, volatile void __iomem *addr) \
  286. { asm volatile("mov" size " %0,%1": :reg (val), \
  287. "m" (*(volatile type __force *)addr) barrier); }
  288.  
  289. build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
  290. build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
  291. build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
  292.  
  293. build_mmio_read(__readb, "b", unsigned char, "=q", )
  294. build_mmio_read(__readw, "w", unsigned short, "=r", )
  295. build_mmio_read(__readl, "l", unsigned int, "=r", )
  296.  
  297. build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
  298. build_mmio_write(writew, "w", unsigned short, "r", :"memory")
  299. build_mmio_write(writel, "l", unsigned int, "r", :"memory")
  300.  
  301. build_mmio_write(__writeb, "b", unsigned char, "q", )
  302. build_mmio_write(__writew, "w", unsigned short, "r", )
  303. build_mmio_write(__writel, "l", unsigned int, "r", )
  304.  
  305. #define readb_relaxed(a) __readb(a)
  306. #define readw_relaxed(a) __readw(a)
  307. #define readl_relaxed(a) __readl(a)
  308. #define __raw_readb __readb
  309. #define __raw_readw __readw
  310. #define __raw_readl __readl
  311.  
  312. #define __raw_writeb __writeb
  313. #define __raw_writew __writew
  314. #define __raw_writel __writel
  315.  
  316. static inline __u64 readq(const volatile void __iomem *addr)
  317. {
  318.         const volatile u32 __iomem *p = addr;
  319.         u32 low, high;
  320.  
  321.         low = readl(p);
  322.         high = readl(p + 1);
  323.  
  324.         return low + ((u64)high << 32);
  325. }
  326.  
  327. static inline void writeq(__u64 val, volatile void __iomem *addr)
  328. {
  329.         writel(val, addr);
  330.         writel(val >> 32, addr+4);
  331. }
  332.  
  333. #define swap(a, b) \
  334.         do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
  335.  
  336.  
  337. #define mmiowb() barrier()
  338.  
  339. #define dev_err(dev, format, arg...)            \
  340.         printk("Error %s " format, __func__ , ## arg)
  341.  
  342. #define dev_warn(dev, format, arg...)            \
  343.         printk("Warning %s " format, __func__ , ## arg)
  344.  
  345. #define dev_info(dev, format, arg...)       \
  346.         printk("Info %s " format , __func__, ## arg)
  347.  
  348. //#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
  349. #define BUILD_BUG_ON(condition)
  350.  
  351. struct page
  352. {
  353.     unsigned int addr;
  354. };
  355.  
  356. #define page_to_phys(page)    ((dma_addr_t)(page))
  357.  
  358. struct vm_fault {
  359.     unsigned int flags;             /* FAULT_FLAG_xxx flags */
  360.     pgoff_t pgoff;                  /* Logical page offset based on vma */
  361.     void __user *virtual_address;   /* Faulting virtual address */
  362.  
  363.     struct page *page;              /* ->fault handlers should return a
  364.                                      * page here, unless VM_FAULT_NOPAGE
  365.                                      * is set (which is also implied by
  366.                                      * VM_FAULT_ERROR).
  367.                                      */
  368. };
  369.  
  370. struct pagelist {
  371.     dma_addr_t    *page;
  372.     unsigned int   nents;
  373. };
  374.  
  375. #define page_cache_release(page)        FreePage(page_to_phys(page))
  376.  
  377. #define alloc_page(gfp_mask) (struct page*)AllocPage()
  378.  
  379. #define __free_page(page) FreePage(page_to_phys(page))
  380.  
  381. #define get_page(a)
  382. #define put_page(a)
  383. #define set_pages_uc(a,b)
  384. #define set_pages_wb(a,b)
  385.  
  386. #define pci_map_page(dev, page, offset, size, direction) \
  387.         (dma_addr_t)( (offset)+page_to_phys(page))
  388.  
  389. #define pci_unmap_page(dev, dma_address, size, direction)
  390.  
  391. #define GFP_TEMPORARY  0
  392. #define __GFP_NOWARN   0
  393. #define __GFP_NORETRY  0
  394. #define GFP_NOWAIT     0
  395.  
  396. #define IS_ENABLED(a)  0
  397.  
  398.  
  399. #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
  400.  
  401. #define RCU_INIT_POINTER(p, v) \
  402.         do { \
  403.                 p = (typeof(*v) __force __rcu *)(v); \
  404.         } while (0)
  405.  
  406.  
  407. #define rcu_dereference_raw(p)  ({ \
  408.                                 typeof(p) _________p1 = ACCESS_ONCE(p); \
  409.                                 (_________p1); \
  410.                                 })
  411. #define rcu_assign_pointer(p, v) \
  412.         ({ \
  413.                 if (!__builtin_constant_p(v) || \
  414.                     ((v) != NULL)) \
  415.                 (p) = (v); \
  416.         })
  417.  
  418.  
  419. unsigned int hweight16(unsigned int w);
  420.  
  421. #define cpufreq_quick_get_max(x) GetCpuFreq()
  422.  
  423. extern unsigned int tsc_khz;
  424.  
  425. #define on_each_cpu(func,info,wait)             \
  426.         ({                                      \
  427.                 func(info);                     \
  428.                 0;                              \
  429.         })
  430.  
  431.  
  432. #endif
  433.  
  434.