irqflags.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 ARM Ltd.
  4. */
  5. #ifndef __ASM_IRQFLAGS_H
  6. #define __ASM_IRQFLAGS_H
  7. #include <asm/alternative.h>
  8. #include <asm/barrier.h>
  9. #include <asm/ptrace.h>
  10. #include <asm/sysreg.h>
  11. /*
  12. * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
  13. * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
  14. * order:
  15. * Masking debug exceptions causes all other exceptions to be masked too/
  16. * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
  17. * always masked and unmasked together, and have no side effects for other
  18. * flags. Keeping to this order makes it easier for entry.S to know which
  19. * exceptions should be unmasked.
  20. */
  21. /*
  22. * CPU interrupt mask handling.
  23. */
  24. static inline void arch_local_irq_enable(void)
  25. {
  26. if (system_has_prio_mask_debugging()) {
  27. u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
  28. WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
  29. }
  30. asm volatile(ALTERNATIVE(
  31. "msr daifclr, #3 // arch_local_irq_enable",
  32. __msr_s(SYS_ICC_PMR_EL1, "%0"),
  33. ARM64_HAS_IRQ_PRIO_MASKING)
  34. :
  35. : "r" ((unsigned long) GIC_PRIO_IRQON)
  36. : "memory");
  37. pmr_sync();
  38. }
  39. static inline void arch_local_irq_disable(void)
  40. {
  41. if (system_has_prio_mask_debugging()) {
  42. u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
  43. WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
  44. }
  45. asm volatile(ALTERNATIVE(
  46. "msr daifset, #3 // arch_local_irq_disable",
  47. __msr_s(SYS_ICC_PMR_EL1, "%0"),
  48. ARM64_HAS_IRQ_PRIO_MASKING)
  49. :
  50. : "r" ((unsigned long) GIC_PRIO_IRQOFF)
  51. : "memory");
  52. }
  53. /*
  54. * Save the current interrupt enable state.
  55. */
  56. static inline unsigned long arch_local_save_flags(void)
  57. {
  58. unsigned long flags;
  59. asm volatile(ALTERNATIVE(
  60. "mrs %0, daif",
  61. __mrs_s("%0", SYS_ICC_PMR_EL1),
  62. ARM64_HAS_IRQ_PRIO_MASKING)
  63. : "=&r" (flags)
  64. :
  65. : "memory");
  66. return flags;
  67. }
  68. static inline int arch_irqs_disabled_flags(unsigned long flags)
  69. {
  70. int res;
  71. asm volatile(ALTERNATIVE(
  72. "and %w0, %w1, #" __stringify(PSR_I_BIT),
  73. "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON),
  74. ARM64_HAS_IRQ_PRIO_MASKING)
  75. : "=&r" (res)
  76. : "r" ((int) flags)
  77. : "memory");
  78. return res;
  79. }
  80. static inline int arch_irqs_disabled(void)
  81. {
  82. return arch_irqs_disabled_flags(arch_local_save_flags());
  83. }
  84. static inline unsigned long arch_local_irq_save(void)
  85. {
  86. unsigned long flags;
  87. flags = arch_local_save_flags();
  88. /*
  89. * There are too many states with IRQs disabled, just keep the current
  90. * state if interrupts are already disabled/masked.
  91. */
  92. if (!arch_irqs_disabled_flags(flags))
  93. arch_local_irq_disable();
  94. return flags;
  95. }
  96. /*
  97. * restore saved IRQ state
  98. */
  99. static inline void arch_local_irq_restore(unsigned long flags)
  100. {
  101. asm volatile(ALTERNATIVE(
  102. "msr daif, %0",
  103. __msr_s(SYS_ICC_PMR_EL1, "%0"),
  104. ARM64_HAS_IRQ_PRIO_MASKING)
  105. :
  106. : "r" (flags)
  107. : "memory");
  108. pmr_sync();
  109. }
  110. #endif /* __ASM_IRQFLAGS_H */