hw_irq.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 1999 Cort Dougan <[email protected]>
  4. */
  5. #ifndef _ASM_POWERPC_HW_IRQ_H
  6. #define _ASM_POWERPC_HW_IRQ_H
  7. #ifdef __KERNEL__
  8. #include <linux/errno.h>
  9. #include <linux/compiler.h>
  10. #include <asm/ptrace.h>
  11. #include <asm/processor.h>
  12. #ifdef CONFIG_PPC64
  13. /*
  14. * PACA flags in paca->irq_happened.
  15. *
  16. * This bits are set when interrupts occur while soft-disabled
  17. * and allow a proper replay.
  18. *
  19. * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
  20. * always in synch with the MSR[EE] state, except:
  21. * - A window in interrupt entry, where hardware disables MSR[EE] and that
  22. * must be "reconciled" with the soft mask state.
  23. * - NMI interrupts that hit in awkward places, until they fix the state.
  24. * - When local irqs are being enabled and state is being fixed up.
  25. * - When returning from an interrupt there are some windows where this
  26. * can become out of synch, but gets fixed before the RFI or before
  27. * executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
  28. */
  29. #define PACA_IRQ_HARD_DIS 0x01
  30. #define PACA_IRQ_DBELL 0x02
  31. #define PACA_IRQ_EE 0x04
  32. #define PACA_IRQ_DEC 0x08 /* Or FIT */
  33. #define PACA_IRQ_HMI 0x10
  34. #define PACA_IRQ_PMI 0x20
  35. #define PACA_IRQ_REPLAYING 0x40
  36. /*
  37. * Some soft-masked interrupts must be hard masked until they are replayed
  38. * (e.g., because the soft-masked handler does not clear the exception).
  39. * Interrupt replay itself must remain hard masked too.
  40. */
  41. #ifdef CONFIG_PPC_BOOK3S
  42. #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI|PACA_IRQ_REPLAYING)
  43. #else
  44. #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_REPLAYING)
  45. #endif
  46. #endif /* CONFIG_PPC64 */
  47. /*
  48. * flags for paca->irq_soft_mask
  49. */
  50. #define IRQS_ENABLED 0
  51. #define IRQS_DISABLED 1 /* local_irq_disable() interrupts */
  52. #define IRQS_PMI_DISABLED 2
  53. #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
  54. #ifndef __ASSEMBLY__
  55. static inline void __hard_irq_enable(void)
  56. {
  57. if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
  58. wrtee(MSR_EE);
  59. else if (IS_ENABLED(CONFIG_PPC_8xx))
  60. wrtspr(SPRN_EIE);
  61. else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
  62. __mtmsrd(MSR_EE | MSR_RI, 1);
  63. else
  64. mtmsr(mfmsr() | MSR_EE);
  65. }
  66. static inline void __hard_irq_disable(void)
  67. {
  68. if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
  69. wrtee(0);
  70. else if (IS_ENABLED(CONFIG_PPC_8xx))
  71. wrtspr(SPRN_EID);
  72. else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
  73. __mtmsrd(MSR_RI, 1);
  74. else
  75. mtmsr(mfmsr() & ~MSR_EE);
  76. }
  77. static inline void __hard_EE_RI_disable(void)
  78. {
  79. if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
  80. wrtee(0);
  81. else if (IS_ENABLED(CONFIG_PPC_8xx))
  82. wrtspr(SPRN_NRI);
  83. else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
  84. __mtmsrd(0, 1);
  85. else
  86. mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
  87. }
  88. static inline void __hard_RI_enable(void)
  89. {
  90. if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
  91. return;
  92. if (IS_ENABLED(CONFIG_PPC_8xx))
  93. wrtspr(SPRN_EID);
  94. else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
  95. __mtmsrd(MSR_RI, 1);
  96. else
  97. mtmsr(mfmsr() | MSR_RI);
  98. }
  99. #ifdef CONFIG_PPC64
  100. #include <asm/paca.h>
  101. static inline notrace unsigned long irq_soft_mask_return(void)
  102. {
  103. unsigned long flags;
  104. asm volatile(
  105. "lbz %0,%1(13)"
  106. : "=r" (flags)
  107. : "i" (offsetof(struct paca_struct, irq_soft_mask)));
  108. return flags;
  109. }
  110. /*
  111. * The "memory" clobber acts as both a compiler barrier
  112. * for the critical section and as a clobber because
  113. * we changed paca->irq_soft_mask
  114. */
  115. static inline notrace void irq_soft_mask_set(unsigned long mask)
  116. {
  117. /*
  118. * The irq mask must always include the STD bit if any are set.
  119. *
  120. * and interrupts don't get replayed until the standard
  121. * interrupt (local_irq_disable()) is unmasked.
  122. *
  123. * Other masks must only provide additional masking beyond
  124. * the standard, and they are also not replayed until the
  125. * standard interrupt becomes unmasked.
  126. *
  127. * This could be changed, but it will require partial
  128. * unmasks to be replayed, among other things. For now, take
  129. * the simple approach.
  130. */
  131. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
  132. WARN_ON(mask && !(mask & IRQS_DISABLED));
  133. asm volatile(
  134. "stb %0,%1(13)"
  135. :
  136. : "r" (mask),
  137. "i" (offsetof(struct paca_struct, irq_soft_mask))
  138. : "memory");
  139. }
  140. static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
  141. {
  142. unsigned long flags = irq_soft_mask_return();
  143. irq_soft_mask_set(mask);
  144. return flags;
  145. }
  146. static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
  147. {
  148. unsigned long flags = irq_soft_mask_return();
  149. irq_soft_mask_set(flags | mask);
  150. return flags;
  151. }
  152. static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
  153. {
  154. unsigned long flags = irq_soft_mask_return();
  155. irq_soft_mask_set(flags & ~mask);
  156. return flags;
  157. }
  158. static inline unsigned long arch_local_save_flags(void)
  159. {
  160. return irq_soft_mask_return();
  161. }
  162. static inline void arch_local_irq_disable(void)
  163. {
  164. irq_soft_mask_set(IRQS_DISABLED);
  165. }
  166. extern void arch_local_irq_restore(unsigned long);
  167. static inline void arch_local_irq_enable(void)
  168. {
  169. arch_local_irq_restore(IRQS_ENABLED);
  170. }
  171. static inline unsigned long arch_local_irq_save(void)
  172. {
  173. return irq_soft_mask_or_return(IRQS_DISABLED);
  174. }
  175. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  176. {
  177. return flags & IRQS_DISABLED;
  178. }
  179. static inline bool arch_irqs_disabled(void)
  180. {
  181. return arch_irqs_disabled_flags(arch_local_save_flags());
  182. }
  183. static inline void set_pmi_irq_pending(void)
  184. {
  185. /*
  186. * Invoked from PMU callback functions to set PMI bit in the paca.
  187. * This has to be called with irq's disabled (via hard_irq_disable()).
  188. */
  189. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
  190. WARN_ON_ONCE(mfmsr() & MSR_EE);
  191. get_paca()->irq_happened |= PACA_IRQ_PMI;
  192. }
  193. static inline void clear_pmi_irq_pending(void)
  194. {
  195. /*
  196. * Invoked from PMU callback functions to clear the pending PMI bit
  197. * in the paca.
  198. */
  199. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
  200. WARN_ON_ONCE(mfmsr() & MSR_EE);
  201. get_paca()->irq_happened &= ~PACA_IRQ_PMI;
  202. }
  203. static inline bool pmi_irq_pending(void)
  204. {
  205. /*
  206. * Invoked from PMU callback functions to check if there is a pending
  207. * PMI bit in the paca.
  208. */
  209. if (get_paca()->irq_happened & PACA_IRQ_PMI)
  210. return true;
  211. return false;
  212. }
  213. #ifdef CONFIG_PPC_BOOK3S
  214. /*
  215. * To support disabling and enabling of irq with PMI, set of
  216. * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
  217. * functions are added. These macros are implemented using generic
  218. * linux local_irq_* code from include/linux/irqflags.h.
  219. */
  220. #define raw_local_irq_pmu_save(flags) \
  221. do { \
  222. typecheck(unsigned long, flags); \
  223. flags = irq_soft_mask_or_return(IRQS_DISABLED | \
  224. IRQS_PMI_DISABLED); \
  225. } while(0)
  226. #define raw_local_irq_pmu_restore(flags) \
  227. do { \
  228. typecheck(unsigned long, flags); \
  229. arch_local_irq_restore(flags); \
  230. } while(0)
  231. #ifdef CONFIG_TRACE_IRQFLAGS
  232. #define powerpc_local_irq_pmu_save(flags) \
  233. do { \
  234. raw_local_irq_pmu_save(flags); \
  235. if (!raw_irqs_disabled_flags(flags)) \
  236. trace_hardirqs_off(); \
  237. } while(0)
  238. #define powerpc_local_irq_pmu_restore(flags) \
  239. do { \
  240. if (!raw_irqs_disabled_flags(flags)) \
  241. trace_hardirqs_on(); \
  242. raw_local_irq_pmu_restore(flags); \
  243. } while(0)
  244. #else
  245. #define powerpc_local_irq_pmu_save(flags) \
  246. do { \
  247. raw_local_irq_pmu_save(flags); \
  248. } while(0)
  249. #define powerpc_local_irq_pmu_restore(flags) \
  250. do { \
  251. raw_local_irq_pmu_restore(flags); \
  252. } while (0)
  253. #endif /* CONFIG_TRACE_IRQFLAGS */
  254. #endif /* CONFIG_PPC_BOOK3S */
  255. #define hard_irq_disable() do { \
  256. unsigned long flags; \
  257. __hard_irq_disable(); \
  258. flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
  259. local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
  260. if (!arch_irqs_disabled_flags(flags)) { \
  261. asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
  262. : "r" (current_stack_pointer)); \
  263. trace_hardirqs_off(); \
  264. } \
  265. } while(0)
  266. static inline bool __lazy_irq_pending(u8 irq_happened)
  267. {
  268. return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
  269. }
  270. /*
  271. * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
  272. */
  273. static inline bool lazy_irq_pending(void)
  274. {
  275. return __lazy_irq_pending(get_paca()->irq_happened);
  276. }
  277. /*
  278. * Check if a lazy IRQ is pending, with no debugging checks.
  279. * Should be called with IRQs hard disabled.
  280. * For use in RI disabled code or other constrained situations.
  281. */
  282. static inline bool lazy_irq_pending_nocheck(void)
  283. {
  284. return __lazy_irq_pending(local_paca->irq_happened);
  285. }
  286. bool power_pmu_wants_prompt_pmi(void);
  287. /*
  288. * This is called by asynchronous interrupts to check whether to
  289. * conditionally re-enable hard interrupts after having cleared
  290. * the source of the interrupt. They are kept disabled if there
  291. * is a different soft-masked interrupt pending that requires hard
  292. * masking.
  293. */
  294. static inline bool should_hard_irq_enable(struct pt_regs *regs)
  295. {
  296. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
  297. WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
  298. WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
  299. WARN_ON(mfmsr() & MSR_EE);
  300. }
  301. if (!IS_ENABLED(CONFIG_PERF_EVENTS))
  302. return false;
  303. /*
  304. * If the PMU is not running, there is not much reason to enable
  305. * MSR[EE] in irq handlers because any interrupts would just be
  306. * soft-masked.
  307. *
  308. * TODO: Add test for 64e
  309. */
  310. if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
  311. if (!power_pmu_wants_prompt_pmi())
  312. return false;
  313. /*
  314. * If PMIs are disabled then IRQs should be disabled as well,
  315. * so we shouldn't see this condition, check for it just in
  316. * case because we are about to enable PMIs.
  317. */
  318. if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
  319. return false;
  320. }
  321. if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
  322. return false;
  323. return true;
  324. }
  325. /*
  326. * Do the hard enabling, only call this if should_hard_irq_enable is true.
  327. * This allows PMI interrupts to profile irq handlers.
  328. */
  329. static inline void do_hard_irq_enable(void)
  330. {
  331. /*
  332. * Asynch interrupts come in with IRQS_ALL_DISABLED,
  333. * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
  334. */
  335. if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
  336. irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
  337. get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
  338. __hard_irq_enable();
  339. }
  340. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  341. {
  342. return (regs->softe & IRQS_DISABLED);
  343. }
  344. extern bool prep_irq_for_idle(void);
  345. extern bool prep_irq_for_idle_irqsoff(void);
  346. extern void irq_set_pending_from_srr1(unsigned long srr1);
  347. #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
  348. extern void force_external_irq_replay(void);
  349. static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
  350. {
  351. regs->softe = val;
  352. }
  353. #else /* CONFIG_PPC64 */
  354. static inline notrace unsigned long irq_soft_mask_return(void)
  355. {
  356. return 0;
  357. }
  358. static inline unsigned long arch_local_save_flags(void)
  359. {
  360. return mfmsr();
  361. }
  362. static inline void arch_local_irq_restore(unsigned long flags)
  363. {
  364. if (IS_ENABLED(CONFIG_BOOKE))
  365. wrtee(flags);
  366. else
  367. mtmsr(flags);
  368. }
  369. static inline unsigned long arch_local_irq_save(void)
  370. {
  371. unsigned long flags = arch_local_save_flags();
  372. if (IS_ENABLED(CONFIG_BOOKE))
  373. wrtee(0);
  374. else if (IS_ENABLED(CONFIG_PPC_8xx))
  375. wrtspr(SPRN_EID);
  376. else
  377. mtmsr(flags & ~MSR_EE);
  378. return flags;
  379. }
  380. static inline void arch_local_irq_disable(void)
  381. {
  382. __hard_irq_disable();
  383. }
  384. static inline void arch_local_irq_enable(void)
  385. {
  386. __hard_irq_enable();
  387. }
  388. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  389. {
  390. return (flags & MSR_EE) == 0;
  391. }
  392. static inline bool arch_irqs_disabled(void)
  393. {
  394. return arch_irqs_disabled_flags(arch_local_save_flags());
  395. }
  396. #define hard_irq_disable() arch_local_irq_disable()
  397. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  398. {
  399. return !(regs->msr & MSR_EE);
  400. }
  401. static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
  402. {
  403. return false;
  404. }
  405. static inline void do_hard_irq_enable(void)
  406. {
  407. BUILD_BUG();
  408. }
  409. static inline void clear_pmi_irq_pending(void) { }
  410. static inline void set_pmi_irq_pending(void) { }
  411. static inline bool pmi_irq_pending(void) { return false; }
  412. static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
  413. {
  414. }
  415. #endif /* CONFIG_PPC64 */
  416. static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr)
  417. {
  418. #ifdef CONFIG_PPC64
  419. if (arch_irqs_disabled()) {
  420. /*
  421. * With soft-masking, MSR[EE] can change from 1 to 0
  422. * asynchronously when irqs are disabled, and we don't want to
  423. * set MSR[EE] back to 1 here if that has happened. A race-free
  424. * way to do this is ensure EE is already 0. Another way it
  425. * could be done is with a RESTART_TABLE handler, but that's
  426. * probably overkill here.
  427. */
  428. msr &= ~MSR_EE;
  429. mtmsr_isync(msr);
  430. irq_soft_mask_set(IRQS_ALL_DISABLED);
  431. local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
  432. } else
  433. #endif
  434. mtmsr_isync(msr);
  435. return msr;
  436. }
  437. #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
  438. #endif /* __ASSEMBLY__ */
  439. #endif /* __KERNEL__ */
  440. #endif /* _ASM_POWERPC_HW_IRQ_H */