123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142 |
- /* SPDX-License-Identifier: GPL-2.0 */
- #ifndef _X86_IRQFLAGS_H_
- #define _X86_IRQFLAGS_H_
- #include <asm/processor-flags.h>
- #ifndef __ASSEMBLY__
- #include <asm/nospec-branch.h>
- /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
- #define __cpuidle __section(".cpuidle.text")
- /*
- * Interrupt control:
- */
- /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
- extern inline unsigned long native_save_fl(void);
- extern __always_inline unsigned long native_save_fl(void)
- {
- unsigned long flags;
- /*
- * "=rm" is safe here, because "pop" adjusts the stack before
- * it evaluates its effective address -- this is part of the
- * documented behavior of the "pop" instruction.
- */
- asm volatile("# __raw_save_flags\n\t"
- "pushf ; pop %0"
- : "=rm" (flags)
- : /* no input */
- : "memory");
- return flags;
- }
- static __always_inline void native_irq_disable(void)
- {
- asm volatile("cli": : :"memory");
- }
- static __always_inline void native_irq_enable(void)
- {
- asm volatile("sti": : :"memory");
- }
- static inline __cpuidle void native_safe_halt(void)
- {
- mds_idle_clear_cpu_buffers();
- asm volatile("sti; hlt": : :"memory");
- }
- static inline __cpuidle void native_halt(void)
- {
- mds_idle_clear_cpu_buffers();
- asm volatile("hlt": : :"memory");
- }
- #endif
- #ifdef CONFIG_PARAVIRT_XXL
- #include <asm/paravirt.h>
- #else
- #ifndef __ASSEMBLY__
- #include <linux/types.h>
- static __always_inline unsigned long arch_local_save_flags(void)
- {
- return native_save_fl();
- }
- static __always_inline void arch_local_irq_disable(void)
- {
- native_irq_disable();
- }
- static __always_inline void arch_local_irq_enable(void)
- {
- native_irq_enable();
- }
- /*
- * Used in the idle loop; sti takes one instruction cycle
- * to complete:
- */
- static inline __cpuidle void arch_safe_halt(void)
- {
- native_safe_halt();
- }
- /*
- * Used when interrupts are already enabled or to
- * shutdown the processor:
- */
- static inline __cpuidle void halt(void)
- {
- native_halt();
- }
- /*
- * For spinlocks, etc:
- */
- static __always_inline unsigned long arch_local_irq_save(void)
- {
- unsigned long flags = arch_local_save_flags();
- arch_local_irq_disable();
- return flags;
- }
- #else
- #ifdef CONFIG_X86_64
- #ifdef CONFIG_DEBUG_ENTRY
- #define SAVE_FLAGS pushfq; popq %rax
- #endif
- #endif
- #endif /* __ASSEMBLY__ */
- #endif /* CONFIG_PARAVIRT_XXL */
- #ifndef __ASSEMBLY__
- static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
- {
- return !(flags & X86_EFLAGS_IF);
- }
- static __always_inline int arch_irqs_disabled(void)
- {
- unsigned long flags = arch_local_save_flags();
- return arch_irqs_disabled_flags(flags);
- }
- static __always_inline void arch_local_irq_restore(unsigned long flags)
- {
- if (!arch_irqs_disabled_flags(flags))
- arch_local_irq_enable();
- }
- #endif /* !__ASSEMBLY__ */
- #endif
|