irqflags.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _X86_IRQFLAGS_H_
  3. #define _X86_IRQFLAGS_H_
  4. #include <asm/processor-flags.h>
  5. #ifndef __ASSEMBLY__
  6. #include <asm/nospec-branch.h>
  7. /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
  8. #define __cpuidle __section(".cpuidle.text")
  9. /*
  10. * Interrupt control:
  11. */
  12. /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
  13. extern inline unsigned long native_save_fl(void);
  14. extern __always_inline unsigned long native_save_fl(void)
  15. {
  16. unsigned long flags;
  17. /*
  18. * "=rm" is safe here, because "pop" adjusts the stack before
  19. * it evaluates its effective address -- this is part of the
  20. * documented behavior of the "pop" instruction.
  21. */
  22. asm volatile("# __raw_save_flags\n\t"
  23. "pushf ; pop %0"
  24. : "=rm" (flags)
  25. : /* no input */
  26. : "memory");
  27. return flags;
  28. }
  29. static __always_inline void native_irq_disable(void)
  30. {
  31. asm volatile("cli": : :"memory");
  32. }
  33. static __always_inline void native_irq_enable(void)
  34. {
  35. asm volatile("sti": : :"memory");
  36. }
  37. static inline __cpuidle void native_safe_halt(void)
  38. {
  39. mds_idle_clear_cpu_buffers();
  40. asm volatile("sti; hlt": : :"memory");
  41. }
  42. static inline __cpuidle void native_halt(void)
  43. {
  44. mds_idle_clear_cpu_buffers();
  45. asm volatile("hlt": : :"memory");
  46. }
  47. #endif
  48. #ifdef CONFIG_PARAVIRT_XXL
  49. #include <asm/paravirt.h>
  50. #else
  51. #ifndef __ASSEMBLY__
  52. #include <linux/types.h>
  53. static __always_inline unsigned long arch_local_save_flags(void)
  54. {
  55. return native_save_fl();
  56. }
  57. static __always_inline void arch_local_irq_disable(void)
  58. {
  59. native_irq_disable();
  60. }
  61. static __always_inline void arch_local_irq_enable(void)
  62. {
  63. native_irq_enable();
  64. }
  65. /*
  66. * Used in the idle loop; sti takes one instruction cycle
  67. * to complete:
  68. */
  69. static inline __cpuidle void arch_safe_halt(void)
  70. {
  71. native_safe_halt();
  72. }
  73. /*
  74. * Used when interrupts are already enabled or to
  75. * shutdown the processor:
  76. */
  77. static inline __cpuidle void halt(void)
  78. {
  79. native_halt();
  80. }
  81. /*
  82. * For spinlocks, etc:
  83. */
  84. static __always_inline unsigned long arch_local_irq_save(void)
  85. {
  86. unsigned long flags = arch_local_save_flags();
  87. arch_local_irq_disable();
  88. return flags;
  89. }
  90. #else
  91. #ifdef CONFIG_X86_64
  92. #ifdef CONFIG_DEBUG_ENTRY
  93. #define SAVE_FLAGS pushfq; popq %rax
  94. #endif
  95. #endif
  96. #endif /* __ASSEMBLY__ */
  97. #endif /* CONFIG_PARAVIRT_XXL */
  98. #ifndef __ASSEMBLY__
  99. static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
  100. {
  101. return !(flags & X86_EFLAGS_IF);
  102. }
  103. static __always_inline int arch_irqs_disabled(void)
  104. {
  105. unsigned long flags = arch_local_save_flags();
  106. return arch_irqs_disabled_flags(flags);
  107. }
  108. static __always_inline void arch_local_irq_restore(unsigned long flags)
  109. {
  110. if (!arch_irqs_disabled_flags(flags))
  111. arch_local_irq_enable();
  112. }
  113. #endif /* !__ASSEMBLY__ */
  114. #endif