irq_64.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Derived from arch/i386/kernel/irq.c
  4. * Copyright (C) 1992 Linus Torvalds
  5. * Adapted from arch/i386 by Gary Thomas
  6. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  7. * Updated and modified by Cort Dougan <[email protected]>
  8. * Copyright (C) 1996-2001 Cort Dougan
  9. * Adapted for Power Macintosh by Paul Mackerras
  10. * Copyright (C) 1996 Paul Mackerras ([email protected])
  11. *
  12. * This file contains the code used by various IRQ handling routines:
  13. * asking for different IRQ's should be done through these routines
  14. * instead of just grabbing them. Thus setups with different IRQ numbers
  15. * shouldn't result in any weird surprises, and installing new handlers
  16. * should be easier.
  17. */
  18. #undef DEBUG
  19. #include <linux/export.h>
  20. #include <linux/threads.h>
  21. #include <linux/kernel_stat.h>
  22. #include <linux/signal.h>
  23. #include <linux/sched.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/ioport.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/timex.h>
  28. #include <linux/init.h>
  29. #include <linux/slab.h>
  30. #include <linux/delay.h>
  31. #include <linux/irq.h>
  32. #include <linux/seq_file.h>
  33. #include <linux/cpumask.h>
  34. #include <linux/profile.h>
  35. #include <linux/bitops.h>
  36. #include <linux/list.h>
  37. #include <linux/radix-tree.h>
  38. #include <linux/mutex.h>
  39. #include <linux/pci.h>
  40. #include <linux/debugfs.h>
  41. #include <linux/of.h>
  42. #include <linux/of_irq.h>
  43. #include <linux/vmalloc.h>
  44. #include <linux/pgtable.h>
  45. #include <linux/static_call.h>
  46. #include <linux/uaccess.h>
  47. #include <asm/interrupt.h>
  48. #include <asm/io.h>
  49. #include <asm/irq.h>
  50. #include <asm/cache.h>
  51. #include <asm/ptrace.h>
  52. #include <asm/machdep.h>
  53. #include <asm/udbg.h>
  54. #include <asm/smp.h>
  55. #include <asm/hw_irq.h>
  56. #include <asm/softirq_stack.h>
  57. #include <asm/ppc_asm.h>
  58. #include <asm/paca.h>
  59. #include <asm/firmware.h>
  60. #include <asm/lv1call.h>
  61. #include <asm/dbell.h>
  62. #include <asm/trace.h>
  63. #include <asm/cpu_has_feature.h>
  64. int distribute_irqs = 1;
  65. static inline void next_interrupt(struct pt_regs *regs)
  66. {
  67. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
  68. WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
  69. WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
  70. }
  71. /*
  72. * We are responding to the next interrupt, so interrupt-off
  73. * latencies should be reset here.
  74. */
  75. lockdep_hardirq_exit();
  76. trace_hardirqs_on();
  77. trace_hardirqs_off();
  78. lockdep_hardirq_enter();
  79. }
  80. static inline bool irq_happened_test_and_clear(u8 irq)
  81. {
  82. if (local_paca->irq_happened & irq) {
  83. local_paca->irq_happened &= ~irq;
  84. return true;
  85. }
  86. return false;
  87. }
  88. static void __replay_soft_interrupts(void)
  89. {
  90. struct pt_regs regs;
  91. /*
  92. * We use local_paca rather than get_paca() to avoid all the
  93. * debug_smp_processor_id() business in this low level function.
  94. */
  95. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
  96. WARN_ON_ONCE(mfmsr() & MSR_EE);
  97. WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
  98. WARN_ON(local_paca->irq_happened & PACA_IRQ_REPLAYING);
  99. }
  100. /*
  101. * PACA_IRQ_REPLAYING prevents interrupt handlers from enabling
  102. * MSR[EE] to get PMIs, which can result in more IRQs becoming
  103. * pending.
  104. */
  105. local_paca->irq_happened |= PACA_IRQ_REPLAYING;
  106. ppc_save_regs(&regs);
  107. regs.softe = IRQS_ENABLED;
  108. regs.msr |= MSR_EE;
  109. /*
  110. * Force the delivery of pending soft-disabled interrupts on PS3.
  111. * Any HV call will have this side effect.
  112. */
  113. if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
  114. u64 tmp, tmp2;
  115. lv1_get_version_info(&tmp, &tmp2);
  116. }
  117. /*
  118. * Check if an hypervisor Maintenance interrupt happened.
  119. * This is a higher priority interrupt than the others, so
  120. * replay it first.
  121. */
  122. if (IS_ENABLED(CONFIG_PPC_BOOK3S) &&
  123. irq_happened_test_and_clear(PACA_IRQ_HMI)) {
  124. regs.trap = INTERRUPT_HMI;
  125. handle_hmi_exception(&regs);
  126. next_interrupt(&regs);
  127. }
  128. if (irq_happened_test_and_clear(PACA_IRQ_DEC)) {
  129. regs.trap = INTERRUPT_DECREMENTER;
  130. timer_interrupt(&regs);
  131. next_interrupt(&regs);
  132. }
  133. if (irq_happened_test_and_clear(PACA_IRQ_EE)) {
  134. regs.trap = INTERRUPT_EXTERNAL;
  135. do_IRQ(&regs);
  136. next_interrupt(&regs);
  137. }
  138. if (IS_ENABLED(CONFIG_PPC_DOORBELL) &&
  139. irq_happened_test_and_clear(PACA_IRQ_DBELL)) {
  140. regs.trap = INTERRUPT_DOORBELL;
  141. doorbell_exception(&regs);
  142. next_interrupt(&regs);
  143. }
  144. /* Book3E does not support soft-masking PMI interrupts */
  145. if (IS_ENABLED(CONFIG_PPC_BOOK3S) &&
  146. irq_happened_test_and_clear(PACA_IRQ_PMI)) {
  147. regs.trap = INTERRUPT_PERFMON;
  148. performance_monitor_exception(&regs);
  149. next_interrupt(&regs);
  150. }
  151. local_paca->irq_happened &= ~PACA_IRQ_REPLAYING;
  152. }
  153. void replay_soft_interrupts(void)
  154. {
  155. irq_enter(); /* See comment in arch_local_irq_restore */
  156. __replay_soft_interrupts();
  157. irq_exit();
  158. }
  159. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
  160. static inline void replay_soft_interrupts_irqrestore(void)
  161. {
  162. unsigned long kuap_state = get_kuap();
  163. /*
  164. * Check if anything calls local_irq_enable/restore() when KUAP is
  165. * disabled (user access enabled). We handle that case here by saving
  166. * and re-locking AMR but we shouldn't get here in the first place,
  167. * hence the warning.
  168. */
  169. kuap_assert_locked();
  170. if (kuap_state != AMR_KUAP_BLOCKED)
  171. set_kuap(AMR_KUAP_BLOCKED);
  172. __replay_soft_interrupts();
  173. if (kuap_state != AMR_KUAP_BLOCKED)
  174. set_kuap(kuap_state);
  175. }
  176. #else
  177. #define replay_soft_interrupts_irqrestore() __replay_soft_interrupts()
  178. #endif
  179. notrace void arch_local_irq_restore(unsigned long mask)
  180. {
  181. unsigned char irq_happened;
  182. /* Write the new soft-enabled value if it is a disable */
  183. if (mask) {
  184. irq_soft_mask_set(mask);
  185. return;
  186. }
  187. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
  188. WARN_ON_ONCE(in_nmi());
  189. WARN_ON_ONCE(in_hardirq());
  190. WARN_ON_ONCE(local_paca->irq_happened & PACA_IRQ_REPLAYING);
  191. }
  192. again:
  193. /*
  194. * After the stb, interrupts are unmasked and there are no interrupts
  195. * pending replay. The restart sequence makes this atomic with
  196. * respect to soft-masked interrupts. If this was just a simple code
  197. * sequence, a soft-masked interrupt could become pending right after
  198. * the comparison and before the stb.
  199. *
  200. * This allows interrupts to be unmasked without hard disabling, and
  201. * also without new hard interrupts coming in ahead of pending ones.
  202. */
  203. asm_volatile_goto(
  204. "1: \n"
  205. " lbz 9,%0(13) \n"
  206. " cmpwi 9,0 \n"
  207. " bne %l[happened] \n"
  208. " stb 9,%1(13) \n"
  209. "2: \n"
  210. RESTART_TABLE(1b, 2b, 1b)
  211. : : "i" (offsetof(struct paca_struct, irq_happened)),
  212. "i" (offsetof(struct paca_struct, irq_soft_mask))
  213. : "cr0", "r9"
  214. : happened);
  215. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
  216. WARN_ON_ONCE(!(mfmsr() & MSR_EE));
  217. /*
  218. * If we came here from the replay below, we might have a preempt
  219. * pending (due to preempt_enable_no_resched()). Have to check now.
  220. */
  221. preempt_check_resched();
  222. return;
  223. happened:
  224. irq_happened = READ_ONCE(local_paca->irq_happened);
  225. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
  226. WARN_ON_ONCE(!irq_happened);
  227. if (irq_happened == PACA_IRQ_HARD_DIS) {
  228. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
  229. WARN_ON_ONCE(mfmsr() & MSR_EE);
  230. irq_soft_mask_set(IRQS_ENABLED);
  231. local_paca->irq_happened = 0;
  232. __hard_irq_enable();
  233. preempt_check_resched();
  234. return;
  235. }
  236. /* Have interrupts to replay, need to hard disable first */
  237. if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
  238. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
  239. if (!(mfmsr() & MSR_EE)) {
  240. /*
  241. * An interrupt could have come in and cleared
  242. * MSR[EE] and set IRQ_HARD_DIS, so check
  243. * IRQ_HARD_DIS again and warn if it is still
  244. * clear.
  245. */
  246. irq_happened = READ_ONCE(local_paca->irq_happened);
  247. WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
  248. }
  249. }
  250. __hard_irq_disable();
  251. local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
  252. } else {
  253. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
  254. if (WARN_ON_ONCE(mfmsr() & MSR_EE))
  255. __hard_irq_disable();
  256. }
  257. }
  258. /*
  259. * Disable preempt here, so that the below preempt_enable will
  260. * perform resched if required (a replayed interrupt may set
  261. * need_resched).
  262. */
  263. preempt_disable();
  264. irq_soft_mask_set(IRQS_ALL_DISABLED);
  265. trace_hardirqs_off();
  266. /*
  267. * Now enter interrupt context. The interrupt handlers themselves
  268. * also call irq_enter/exit (which is okay, they can nest). But call
  269. * it here now to hold off softirqs until the below irq_exit(). If
  270. * we allowed replayed handlers to run softirqs, that enables irqs,
  271. * which must replay interrupts, which recurses in here and makes
  272. * things more complicated. The recursion is limited to 2, and it can
  273. * be made to work, but it's complicated.
  274. *
  275. * local_bh_disable can not be used here because interrupts taken in
  276. * idle are not in the right context (RCU, tick, etc) to run softirqs
  277. * so irq_enter must be called.
  278. */
  279. irq_enter();
  280. replay_soft_interrupts_irqrestore();
  281. irq_exit();
  282. if (unlikely(local_paca->irq_happened != PACA_IRQ_HARD_DIS)) {
  283. /*
  284. * The softirq processing in irq_exit() may enable interrupts
  285. * temporarily, which can result in MSR[EE] being enabled and
  286. * more irqs becoming pending. Go around again if that happens.
  287. */
  288. trace_hardirqs_on();
  289. preempt_enable_no_resched();
  290. goto again;
  291. }
  292. trace_hardirqs_on();
  293. irq_soft_mask_set(IRQS_ENABLED);
  294. local_paca->irq_happened = 0;
  295. __hard_irq_enable();
  296. preempt_enable();
  297. }
  298. EXPORT_SYMBOL(arch_local_irq_restore);
  299. /*
  300. * This is a helper to use when about to go into idle low-power
  301. * when the latter has the side effect of re-enabling interrupts
  302. * (such as calling H_CEDE under pHyp).
  303. *
  304. * You call this function with interrupts soft-disabled (this is
  305. * already the case when ppc_md.power_save is called). The function
  306. * will return whether to enter power save or just return.
  307. *
  308. * In the former case, it will have notified lockdep of interrupts
  309. * being re-enabled and generally sanitized the lazy irq state,
  310. * and in the latter case it will leave with interrupts hard
  311. * disabled and marked as such, so the local_irq_enable() call
  312. * in arch_cpu_idle() will properly re-enable everything.
  313. */
  314. bool prep_irq_for_idle(void)
  315. {
  316. /*
  317. * First we need to hard disable to ensure no interrupt
  318. * occurs before we effectively enter the low power state
  319. */
  320. __hard_irq_disable();
  321. local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
  322. /*
  323. * If anything happened while we were soft-disabled,
  324. * we return now and do not enter the low power state.
  325. */
  326. if (lazy_irq_pending())
  327. return false;
  328. /* Tell lockdep we are about to re-enable */
  329. trace_hardirqs_on();
  330. /*
  331. * Mark interrupts as soft-enabled and clear the
  332. * PACA_IRQ_HARD_DIS from the pending mask since we
  333. * are about to hard enable as well as a side effect
  334. * of entering the low power state.
  335. */
  336. local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
  337. irq_soft_mask_set(IRQS_ENABLED);
  338. /* Tell the caller to enter the low power state */
  339. return true;
  340. }
  341. #ifdef CONFIG_PPC_BOOK3S
  342. /*
  343. * This is for idle sequences that return with IRQs off, but the
  344. * idle state itself wakes on interrupt. Tell the irq tracer that
  345. * IRQs are enabled for the duration of idle so it does not get long
  346. * off times. Must be paired with fini_irq_for_idle_irqsoff.
  347. */
  348. bool prep_irq_for_idle_irqsoff(void)
  349. {
  350. WARN_ON(!irqs_disabled());
  351. /*
  352. * First we need to hard disable to ensure no interrupt
  353. * occurs before we effectively enter the low power state
  354. */
  355. __hard_irq_disable();
  356. local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
  357. /*
  358. * If anything happened while we were soft-disabled,
  359. * we return now and do not enter the low power state.
  360. */
  361. if (lazy_irq_pending())
  362. return false;
  363. /* Tell lockdep we are about to re-enable */
  364. trace_hardirqs_on();
  365. return true;
  366. }
  367. /*
  368. * Take the SRR1 wakeup reason, index into this table to find the
  369. * appropriate irq_happened bit.
  370. *
  371. * Sytem reset exceptions taken in idle state also come through here,
  372. * but they are NMI interrupts so do not need to wait for IRQs to be
  373. * restored, and should be taken as early as practical. These are marked
  374. * with 0xff in the table. The Power ISA specifies 0100b as the system
  375. * reset interrupt reason.
  376. */
  377. #define IRQ_SYSTEM_RESET 0xff
  378. static const u8 srr1_to_lazyirq[0x10] = {
  379. 0, 0, 0,
  380. PACA_IRQ_DBELL,
  381. IRQ_SYSTEM_RESET,
  382. PACA_IRQ_DBELL,
  383. PACA_IRQ_DEC,
  384. 0,
  385. PACA_IRQ_EE,
  386. PACA_IRQ_EE,
  387. PACA_IRQ_HMI,
  388. 0, 0, 0, 0, 0 };
  389. void replay_system_reset(void)
  390. {
  391. struct pt_regs regs;
  392. ppc_save_regs(&regs);
  393. regs.trap = 0x100;
  394. get_paca()->in_nmi = 1;
  395. system_reset_exception(&regs);
  396. get_paca()->in_nmi = 0;
  397. }
  398. EXPORT_SYMBOL_GPL(replay_system_reset);
  399. void irq_set_pending_from_srr1(unsigned long srr1)
  400. {
  401. unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
  402. u8 reason = srr1_to_lazyirq[idx];
  403. /*
  404. * Take the system reset now, which is immediately after registers
  405. * are restored from idle. It's an NMI, so interrupts need not be
  406. * re-enabled before it is taken.
  407. */
  408. if (unlikely(reason == IRQ_SYSTEM_RESET)) {
  409. replay_system_reset();
  410. return;
  411. }
  412. if (reason == PACA_IRQ_DBELL) {
  413. /*
  414. * When doorbell triggers a system reset wakeup, the message
  415. * is not cleared, so if the doorbell interrupt is replayed
  416. * and the IPI handled, the doorbell interrupt would still
  417. * fire when EE is enabled.
  418. *
  419. * To avoid taking the superfluous doorbell interrupt,
  420. * execute a msgclr here before the interrupt is replayed.
  421. */
  422. ppc_msgclr(PPC_DBELL_MSGTYPE);
  423. }
  424. /*
  425. * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
  426. * so this can be called unconditionally with the SRR1 wake
  427. * reason as returned by the idle code, which uses 0 to mean no
  428. * interrupt.
  429. *
  430. * If a future CPU was to designate this as an interrupt reason,
  431. * then a new index for no interrupt must be assigned.
  432. */
  433. local_paca->irq_happened |= reason;
  434. }
  435. #endif /* CONFIG_PPC_BOOK3S */
  436. /*
  437. * Force a replay of the external interrupt handler on this CPU.
  438. */
  439. void force_external_irq_replay(void)
  440. {
  441. /*
  442. * This must only be called with interrupts soft-disabled,
  443. * the replay will happen when re-enabling.
  444. */
  445. WARN_ON(!arch_irqs_disabled());
  446. /*
  447. * Interrupts must always be hard disabled before irq_happened is
  448. * modified (to prevent lost update in case of interrupt between
  449. * load and store).
  450. */
  451. __hard_irq_disable();
  452. local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
  453. /* Indicate in the PACA that we have an interrupt to replay */
  454. local_paca->irq_happened |= PACA_IRQ_EE;
  455. }
  456. static int __init setup_noirqdistrib(char *str)
  457. {
  458. distribute_irqs = 0;
  459. return 1;
  460. }
  461. __setup("noirqdistrib", setup_noirqdistrib);