Subversion Repositories Kolibri OS

Rev

Rev 5270 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef _X86_IRQFLAGS_H_
  2. #define _X86_IRQFLAGS_H_
  3.  
  4. #include <asm/processor-flags.h>
  5.  
  6. #ifndef __ASSEMBLY__
  7. /*
  8.  * Interrupt control:
  9.  */
  10.  
  11. static inline unsigned long native_save_fl(void)
  12. {
  13.         unsigned long flags;
  14.  
  15.         /*
  16.          * "=rm" is safe here, because "pop" adjusts the stack before
  17.          * it evaluates its effective address -- this is part of the
  18.          * documented behavior of the "pop" instruction.
  19.          */
  20.         asm volatile("# __raw_save_flags\n\t"
  21.                      "pushf ; pop %0"
  22.                      : "=rm" (flags)
  23.                      : /* no input */
  24.                      : "memory");
  25.  
  26.         return flags;
  27. }
  28.  
  29. static inline void native_restore_fl(unsigned long flags)
  30. {
  31.         asm volatile("push %0 ; popf"
  32.                      : /* no output */
  33.                      :"g" (flags)
  34.                      :"memory", "cc");
  35. }
  36.  
  37. static inline void native_irq_disable(void)
  38. {
  39.         asm volatile("cli": : :"memory");
  40. }
  41.  
  42. static inline void native_irq_enable(void)
  43. {
  44.         asm volatile("sti": : :"memory");
  45. }
  46.  
  47. static inline void native_safe_halt(void)
  48. {
  49.         asm volatile("sti; hlt": : :"memory");
  50. }
  51.  
  52. static inline void native_halt(void)
  53. {
  54.         asm volatile("hlt": : :"memory");
  55. }
  56.  
  57. #endif
  58.  
  59. #ifdef CONFIG_PARAVIRT
  60. #include <asm/paravirt.h>
  61. #else
  62. #ifndef __ASSEMBLY__
  63. #include <linux/types.h>
  64.  
  65. static inline notrace unsigned long arch_local_save_flags(void)
  66. {
  67.         return native_save_fl();
  68. }
  69.  
  70. static inline notrace void arch_local_irq_restore(unsigned long flags)
  71. {
  72.         native_restore_fl(flags);
  73. }
  74.  
  75. static inline notrace void arch_local_irq_disable(void)
  76. {
  77.         native_irq_disable();
  78. }
  79.  
  80. static inline notrace void arch_local_irq_enable(void)
  81. {
  82.         native_irq_enable();
  83. }
  84.  
  85. /*
  86.  * Used in the idle loop; sti takes one instruction cycle
  87.  * to complete:
  88.  */
  89. static inline void arch_safe_halt(void)
  90. {
  91.         native_safe_halt();
  92. }
  93.  
  94. /*
  95.  * Used when interrupts are already enabled or to
  96.  * shutdown the processor:
  97.  */
  98. static inline void halt(void)
  99. {
  100.         native_halt();
  101. }
  102.  
  103. /*
  104.  * For spinlocks, etc:
  105.  */
  106. static inline notrace unsigned long arch_local_irq_save(void)
  107. {
  108.         unsigned long flags = arch_local_save_flags();
  109.         arch_local_irq_disable();
  110.         return flags;
  111. }
  112. #else
  113.  
  114. #define ENABLE_INTERRUPTS(x)    sti
  115. #define DISABLE_INTERRUPTS(x)   cli
  116.  
  117. #ifdef CONFIG_X86_64
  118. #define SWAPGS  swapgs
  119. /*
  120.  * Currently paravirt can't handle swapgs nicely when we
  121.  * don't have a stack we can rely on (such as a user space
  122.  * stack).  So we either find a way around these or just fault
  123.  * and emulate if a guest tries to call swapgs directly.
  124.  *
  125.  * Either way, this is a good way to document that we don't
  126.  * have a reliable stack. x86_64 only.
  127.  */
  128. #define SWAPGS_UNSAFE_STACK     swapgs
  129.  
  130. #define PARAVIRT_ADJUST_EXCEPTION_FRAME /*  */
  131.  
  132. #define INTERRUPT_RETURN        jmp native_iret
  133. #define USERGS_SYSRET64                         \
  134.         swapgs;                                 \
  135.         sysretq;
  136. #define USERGS_SYSRET32                         \
  137.         swapgs;                                 \
  138.         sysretl
  139.  
  140. #else
  141. #define INTERRUPT_RETURN                iret
  142. #define ENABLE_INTERRUPTS_SYSEXIT       sti; sysexit
  143. #define GET_CR0_INTO_EAX                movl %cr0, %eax
  144. #endif
  145.  
  146.  
  147. #endif /* __ASSEMBLY__ */
  148. #endif /* CONFIG_PARAVIRT */
  149.  
  150. #ifndef __ASSEMBLY__
  151. static inline int arch_irqs_disabled_flags(unsigned long flags)
  152. {
  153.         return !(flags & X86_EFLAGS_IF);
  154. }
  155.  
  156. static inline int arch_irqs_disabled(void)
  157. {
  158.         unsigned long flags = arch_local_save_flags();
  159.  
  160.         return arch_irqs_disabled_flags(flags);
  161. }
  162. #endif /* !__ASSEMBLY__ */
  163.  
  164. #ifdef __ASSEMBLY__
  165. #ifdef CONFIG_TRACE_IRQFLAGS
  166. #  define TRACE_IRQS_ON         call trace_hardirqs_on_thunk;
  167. #  define TRACE_IRQS_OFF        call trace_hardirqs_off_thunk;
  168. #else
  169. #  define TRACE_IRQS_ON
  170. #  define TRACE_IRQS_OFF
  171. #endif
  172. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  173. #  ifdef CONFIG_X86_64
  174. #    define LOCKDEP_SYS_EXIT            call lockdep_sys_exit_thunk
  175. #    define LOCKDEP_SYS_EXIT_IRQ \
  176.         TRACE_IRQS_ON; \
  177.         sti; \
  178.         call lockdep_sys_exit_thunk; \
  179.         cli; \
  180.         TRACE_IRQS_OFF;
  181. #  else
  182. #    define LOCKDEP_SYS_EXIT \
  183.         pushl %eax;                             \
  184.         pushl %ecx;                             \
  185.         pushl %edx;                             \
  186.         call lockdep_sys_exit;                  \
  187.         popl %edx;                              \
  188.         popl %ecx;                              \
  189.         popl %eax;
  190. #    define LOCKDEP_SYS_EXIT_IRQ
  191. #  endif
  192. #else
  193. #  define LOCKDEP_SYS_EXIT
  194. #  define LOCKDEP_SYS_EXIT_IRQ
  195. #endif
  196. #endif /* __ASSEMBLY__ */
  197.  
  198. #endif
  199.