Subversion Repositories Kolibri OS

Rev

Rev 5272 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef __LINUX_PREEMPT_H
  2. #define __LINUX_PREEMPT_H
  3.  
  4. /*
  5.  * include/linux/preempt.h - macros for accessing and manipulating
  6.  * preempt_count (used for kernel preemption, interrupt count, etc.)
  7.  */
  8.  
  9. #include <linux/linkage.h>
  10. #include <linux/list.h>
  11.  
  12. /*
  13.  * We put the hardirq and softirq counter into the preemption
  14.  * counter. The bitmask has the following meaning:
  15.  *
  16.  * - bits 0-7 are the preemption count (max preemption depth: 256)
  17.  * - bits 8-15 are the softirq count (max # of softirqs: 256)
  18.  *
  19.  * The hardirq count could in theory be the same as the number of
  20.  * interrupts in the system, but we run all interrupt handlers with
  21.  * interrupts disabled, so we cannot have nesting interrupts. Though
  22.  * there are a few palaeontologic drivers which reenable interrupts in
  23.  * the handler, so we need more than one bit here.
  24.  *
  25.  *         PREEMPT_MASK:        0x000000ff
  26.  *         SOFTIRQ_MASK:        0x0000ff00
  27.  *         HARDIRQ_MASK:        0x000f0000
  28.  *             NMI_MASK:        0x00100000
  29.  * PREEMPT_NEED_RESCHED:        0x80000000
  30.  */
  31. #define PREEMPT_BITS    8
  32. #define SOFTIRQ_BITS    8
  33. #define HARDIRQ_BITS    4
  34. #define NMI_BITS        1
  35.  
  36. #define PREEMPT_SHIFT   0
  37. #define SOFTIRQ_SHIFT   (PREEMPT_SHIFT + PREEMPT_BITS)
  38. #define HARDIRQ_SHIFT   (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
  39. #define NMI_SHIFT       (HARDIRQ_SHIFT + HARDIRQ_BITS)
  40.  
  41. #define __IRQ_MASK(x)   ((1UL << (x))-1)
  42.  
  43. #define PREEMPT_MASK    (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
  44. #define SOFTIRQ_MASK    (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
  45. #define HARDIRQ_MASK    (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
  46. #define NMI_MASK        (__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
  47.  
  48. #define PREEMPT_OFFSET  (1UL << PREEMPT_SHIFT)
  49. #define SOFTIRQ_OFFSET  (1UL << SOFTIRQ_SHIFT)
  50. #define HARDIRQ_OFFSET  (1UL << HARDIRQ_SHIFT)
  51. #define NMI_OFFSET      (1UL << NMI_SHIFT)
  52.  
  53. #define SOFTIRQ_DISABLE_OFFSET  (2 * SOFTIRQ_OFFSET)
  54.  
  55. /* We use the MSB mostly because its available */
  56. #define PREEMPT_NEED_RESCHED    0x80000000
  57.  
  58. /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
  59. #include <asm/preempt.h>
  60.  
  61. #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
  62. #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
  63. #define irq_count()     (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
  64.                                  | NMI_MASK))
  65.  
  66. /*
  67.  * Are we doing bottom half or hardware interrupt processing?
  68.  * Are we in a softirq context? Interrupt context?
  69.  * in_softirq - Are we currently processing softirq or have bh disabled?
  70.  * in_serving_softirq - Are we currently processing softirq?
  71.  */
  72. #define in_irq()                (hardirq_count())
  73. #define in_softirq()            (softirq_count())
  74. #define in_interrupt()          (irq_count())
  75. #define in_serving_softirq()    (softirq_count() & SOFTIRQ_OFFSET)
  76.  
  77. /*
  78.  * Are we in NMI context?
  79.  */
  80. #define in_nmi()        (preempt_count() & NMI_MASK)
  81.  
  82. /*
  83.  * The preempt_count offset after preempt_disable();
  84.  */
  85. #if defined(CONFIG_PREEMPT_COUNT)
  86. # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
  87. #else
  88. # define PREEMPT_DISABLE_OFFSET 0
  89. #endif
  90.  
  91. /*
  92.  * The preempt_count offset after spin_lock()
  93.  */
  94. #define PREEMPT_LOCK_OFFSET     PREEMPT_DISABLE_OFFSET
  95.  
  96. /*
  97.  * The preempt_count offset needed for things like:
  98.  *
  99.  *  spin_lock_bh()
  100.  *
  101.  * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
  102.  * softirqs, such that unlock sequences of:
  103.  *
  104.  *  spin_unlock();
  105.  *  local_bh_enable();
  106.  *
  107.  * Work as expected.
  108.  */
  109. #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
  110.  
  111. /*
  112.  * Are we running in atomic context?  WARNING: this macro cannot
  113.  * always detect atomic context; in particular, it cannot know about
  114.  * held spinlocks in non-preemptible kernels.  Thus it should not be
  115.  * used in the general case to determine whether sleeping is possible.
  116.  * Do not use in_atomic() in driver code.
  117.  */
  118. #define in_atomic()     (preempt_count() != 0)
  119.  
  120. /*
  121.  * Check whether we were atomic before we did preempt_disable():
  122.  * (used by the scheduler)
  123.  */
  124. #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
  125.  
  126. #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
  127. extern void preempt_count_add(int val);
  128. extern void preempt_count_sub(int val);
  129. #define preempt_count_dec_and_test() \
  130.         ({ preempt_count_sub(1); should_resched(0); })
  131. #else
  132. #define preempt_count_add(val)  __preempt_count_add(val)
  133. #define preempt_count_sub(val)  __preempt_count_sub(val)
  134. #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
  135. #endif
  136.  
  137. #define __preempt_count_inc() __preempt_count_add(1)
  138. #define __preempt_count_dec() __preempt_count_sub(1)
  139.  
  140. #define preempt_count_inc() preempt_count_add(1)
  141. #define preempt_count_dec() preempt_count_sub(1)
  142.  
  143. #ifdef CONFIG_PREEMPT_COUNT
  144.  
  145. #define preempt_disable() \
  146. do { \
  147.         preempt_count_inc(); \
  148.         barrier(); \
  149. } while (0)
  150.  
  151. #define sched_preempt_enable_no_resched() \
  152. do { \
  153.         barrier(); \
  154.         preempt_count_dec(); \
  155. } while (0)
  156.  
  157. #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
  158.  
  159. #define preemptible()   (preempt_count() == 0 && !irqs_disabled())
  160.  
  161. #ifdef CONFIG_PREEMPT
  162. #define preempt_enable() \
  163. do { \
  164.         barrier(); \
  165.         if (unlikely(preempt_count_dec_and_test())) \
  166.                 __preempt_schedule(); \
  167. } while (0)
  168.  
  169. #define preempt_enable_notrace() \
  170. do { \
  171.         barrier(); \
  172.         if (unlikely(__preempt_count_dec_and_test())) \
  173.                 __preempt_schedule_notrace(); \
  174. } while (0)
  175.  
  176. #define preempt_check_resched() \
  177. do { \
  178.         if (should_resched(0)) \
  179.                 __preempt_schedule(); \
  180. } while (0)
  181.  
  182. #else /* !CONFIG_PREEMPT */
  183. #define preempt_enable() \
  184. do { \
  185.         barrier(); \
  186.         preempt_count_dec(); \
  187. } while (0)
  188.  
  189. #define preempt_enable_notrace() \
  190. do { \
  191.         barrier(); \
  192.         __preempt_count_dec(); \
  193. } while (0)
  194.  
  195. #define preempt_check_resched() do { } while (0)
  196. #endif /* CONFIG_PREEMPT */
  197.  
  198. #define preempt_disable_notrace() \
  199. do { \
  200.         __preempt_count_inc(); \
  201.         barrier(); \
  202. } while (0)
  203.  
  204. #define preempt_enable_no_resched_notrace() \
  205. do { \
  206.         barrier(); \
  207.         __preempt_count_dec(); \
  208. } while (0)
  209.  
  210. #else /* !CONFIG_PREEMPT_COUNT */
  211.  
  212. /*
  213.  * Even if we don't have any preemption, we need preempt disable/enable
  214.  * to be barriers, so that we don't have things like get_user/put_user
  215.  * that can cause faults and scheduling migrate into our preempt-protected
  216.  * region.
  217.  */
  218. #define preempt_disable()                       barrier()
  219. #define sched_preempt_enable_no_resched()       barrier()
  220. #define preempt_enable_no_resched()             barrier()
  221. #define preempt_enable()                        barrier()
  222. #define preempt_check_resched()                 do { } while (0)
  223.  
  224. #define preempt_disable_notrace()               barrier()
  225. #define preempt_enable_no_resched_notrace()     barrier()
  226. #define preempt_enable_notrace()                barrier()
  227. #define preemptible()                           0
  228.  
  229. #endif /* CONFIG_PREEMPT_COUNT */
  230.  
  231. #ifdef MODULE
  232. /*
  233.  * Modules have no business playing preemption tricks.
  234.  */
  235. #undef sched_preempt_enable_no_resched
  236. #undef preempt_enable_no_resched
  237. #undef preempt_enable_no_resched_notrace
  238. #undef preempt_check_resched
  239. #endif
  240.  
  241. #define preempt_set_need_resched() \
  242. do { \
  243.         set_preempt_need_resched(); \
  244. } while (0)
  245. #define preempt_fold_need_resched() \
  246. do { \
  247.         if (tif_need_resched()) \
  248.                 set_preempt_need_resched(); \
  249. } while (0)
  250.  
  251. #ifdef CONFIG_PREEMPT_NOTIFIERS
  252.  
  253. struct preempt_notifier;
  254.  
  255. /**
  256.  * preempt_ops - notifiers called when a task is preempted and rescheduled
  257.  * @sched_in: we're about to be rescheduled:
  258.  *    notifier: struct preempt_notifier for the task being scheduled
  259.  *    cpu:  cpu we're scheduled on
  260.  * @sched_out: we've just been preempted
  261.  *    notifier: struct preempt_notifier for the task being preempted
  262.  *    next: the task that's kicking us out
  263.  *
  264.  * Please note that sched_in and out are called under different
  265.  * contexts.  sched_out is called with rq lock held and irq disabled
  266.  * while sched_in is called without rq lock and irq enabled.  This
  267.  * difference is intentional and depended upon by its users.
  268.  */
  269. struct preempt_ops {
  270.         void (*sched_in)(struct preempt_notifier *notifier, int cpu);
  271.         void (*sched_out)(struct preempt_notifier *notifier,
  272.                           struct task_struct *next);
  273. };
  274.  
  275. /**
  276.  * preempt_notifier - key for installing preemption notifiers
  277.  * @link: internal use
  278.  * @ops: defines the notifier functions to be called
  279.  *
  280.  * Usually used in conjunction with container_of().
  281.  */
  282. struct preempt_notifier {
  283.         struct hlist_node link;
  284.         struct preempt_ops *ops;
  285. };
  286.  
  287. void preempt_notifier_inc(void);
  288. void preempt_notifier_dec(void);
  289. void preempt_notifier_register(struct preempt_notifier *notifier);
  290. void preempt_notifier_unregister(struct preempt_notifier *notifier);
  291.  
  292. static inline void preempt_notifier_init(struct preempt_notifier *notifier,
  293.                                      struct preempt_ops *ops)
  294. {
  295.         INIT_HLIST_NODE(&notifier->link);
  296.         notifier->ops = ops;
  297. }
  298.  
  299. #endif
  300.  
  301. #endif /* __LINUX_PREEMPT_H */
  302.