irqflags.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. #ifndef _X86_IRQFLAGS_H_
  2. #define _X86_IRQFLAGS_H_
  3. #include <asm/processor-flags.h>
  4. #ifndef __ASSEMBLY__
  5. /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
  6. #define __cpuidle __attribute__((__section__(".cpuidle.text")))
  7. /*
  8. * Interrupt control:
  9. */
  10. static inline unsigned long native_save_fl(void)
  11. {
  12. unsigned long flags;
  13. /*
  14. * "=rm" is safe here, because "pop" adjusts the stack before
  15. * it evaluates its effective address -- this is part of the
  16. * documented behavior of the "pop" instruction.
  17. */
  18. asm volatile("# __raw_save_flags\n\t"
  19. "pushf ; pop %0"
  20. : "=rm" (flags)
  21. : /* no input */
  22. : "memory");
  23. return flags;
  24. }
  25. static inline void native_restore_fl(unsigned long flags)
  26. {
  27. asm volatile("push %0 ; popf"
  28. : /* no output */
  29. :"g" (flags)
  30. :"memory", "cc");
  31. }
  32. static inline void native_irq_disable(void)
  33. {
  34. asm volatile("cli": : :"memory");
  35. }
  36. static inline void native_irq_enable(void)
  37. {
  38. asm volatile("sti": : :"memory");
  39. }
  40. static inline __cpuidle void native_safe_halt(void)
  41. {
  42. asm volatile("sti; hlt": : :"memory");
  43. }
  44. static inline __cpuidle void native_halt(void)
  45. {
  46. asm volatile("hlt": : :"memory");
  47. }
  48. #endif
  49. #ifdef CONFIG_PARAVIRT
  50. #include <asm/paravirt.h>
  51. #else
  52. #ifndef __ASSEMBLY__
  53. #include <linux/types.h>
  54. static inline notrace unsigned long arch_local_save_flags(void)
  55. {
  56. return native_save_fl();
  57. }
  58. static inline notrace void arch_local_irq_restore(unsigned long flags)
  59. {
  60. native_restore_fl(flags);
  61. }
  62. static inline notrace void arch_local_irq_disable(void)
  63. {
  64. native_irq_disable();
  65. }
  66. static inline notrace void arch_local_irq_enable(void)
  67. {
  68. native_irq_enable();
  69. }
  70. /*
  71. * Used in the idle loop; sti takes one instruction cycle
  72. * to complete:
  73. */
  74. static inline __cpuidle void arch_safe_halt(void)
  75. {
  76. native_safe_halt();
  77. }
  78. /*
  79. * Used when interrupts are already enabled or to
  80. * shutdown the processor:
  81. */
  82. static inline __cpuidle void halt(void)
  83. {
  84. native_halt();
  85. }
  86. /*
  87. * For spinlocks, etc:
  88. */
  89. static inline notrace unsigned long arch_local_irq_save(void)
  90. {
  91. unsigned long flags = arch_local_save_flags();
  92. arch_local_irq_disable();
  93. return flags;
  94. }
  95. #else
  96. #define ENABLE_INTERRUPTS(x) sti
  97. #define DISABLE_INTERRUPTS(x) cli
  98. #ifdef CONFIG_X86_64
  99. #define SWAPGS swapgs
  100. /*
  101. * Currently paravirt can't handle swapgs nicely when we
  102. * don't have a stack we can rely on (such as a user space
  103. * stack). So we either find a way around these or just fault
  104. * and emulate if a guest tries to call swapgs directly.
  105. *
  106. * Either way, this is a good way to document that we don't
  107. * have a reliable stack. x86_64 only.
  108. */
  109. #define SWAPGS_UNSAFE_STACK swapgs
  110. #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
  111. #define INTERRUPT_RETURN jmp native_iret
  112. #define USERGS_SYSRET64 \
  113. swapgs; \
  114. sysretq;
  115. #define USERGS_SYSRET32 \
  116. swapgs; \
  117. sysretl
  118. #else
  119. #define INTERRUPT_RETURN iret
  120. #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
  121. #define GET_CR0_INTO_EAX movl %cr0, %eax
  122. #endif
  123. #endif /* __ASSEMBLY__ */
  124. #endif /* CONFIG_PARAVIRT */
  125. #ifndef __ASSEMBLY__
  126. static inline int arch_irqs_disabled_flags(unsigned long flags)
  127. {
  128. return !(flags & X86_EFLAGS_IF);
  129. }
  130. static inline int arch_irqs_disabled(void)
  131. {
  132. unsigned long flags = arch_local_save_flags();
  133. return arch_irqs_disabled_flags(flags);
  134. }
  135. #endif /* !__ASSEMBLY__ */
  136. #ifdef __ASSEMBLY__
  137. #ifdef CONFIG_TRACE_IRQFLAGS
  138. # define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
  139. # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
  140. #else
  141. # define TRACE_IRQS_ON
  142. # define TRACE_IRQS_OFF
  143. #endif
  144. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  145. # ifdef CONFIG_X86_64
  146. # define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
  147. # define LOCKDEP_SYS_EXIT_IRQ \
  148. TRACE_IRQS_ON; \
  149. sti; \
  150. call lockdep_sys_exit_thunk; \
  151. cli; \
  152. TRACE_IRQS_OFF;
  153. # else
  154. # define LOCKDEP_SYS_EXIT \
  155. pushl %eax; \
  156. pushl %ecx; \
  157. pushl %edx; \
  158. call lockdep_sys_exit; \
  159. popl %edx; \
  160. popl %ecx; \
  161. popl %eax;
  162. # define LOCKDEP_SYS_EXIT_IRQ
  163. # endif
  164. #else
  165. # define LOCKDEP_SYS_EXIT
  166. # define LOCKDEP_SYS_EXIT_IRQ
  167. #endif
  168. #endif /* __ASSEMBLY__ */
  169. #endif