handle.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  4. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  5. *
  6. * This file contains the core interrupt handling code. Detailed
  7. * information is available in Documentation/core-api/genericirq.rst
  8. *
  9. */
  10. #include <linux/irq.h>
  11. #include <linux/random.h>
  12. #include <linux/sched.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/kernel_stat.h>
  15. #include <asm/irq_regs.h>
  16. #include <trace/events/irq.h>
  17. #include "internals.h"
  18. #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
  19. void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
  20. #endif
  21. /**
  22. * handle_bad_irq - handle spurious and unhandled irqs
  23. * @desc: description of the interrupt
  24. *
  25. * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
  26. */
  27. void handle_bad_irq(struct irq_desc *desc)
  28. {
  29. unsigned int irq = irq_desc_get_irq(desc);
  30. print_irq_desc(irq, desc);
  31. kstat_incr_irqs_this_cpu(desc);
  32. ack_bad_irq(irq);
  33. }
  34. EXPORT_SYMBOL_GPL(handle_bad_irq);
  35. /*
  36. * Special, empty irq handler:
  37. */
  38. irqreturn_t no_action(int cpl, void *dev_id)
  39. {
  40. return IRQ_NONE;
  41. }
  42. EXPORT_SYMBOL_GPL(no_action);
  43. static void warn_no_thread(unsigned int irq, struct irqaction *action)
  44. {
  45. if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
  46. return;
  47. printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
  48. "but no thread function available.", irq, action->name);
  49. }
  50. void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
  51. {
  52. /*
  53. * In case the thread crashed and was killed we just pretend that
  54. * we handled the interrupt. The hardirq handler has disabled the
  55. * device interrupt, so no irq storm is lurking.
  56. */
  57. if (action->thread->flags & PF_EXITING)
  58. return;
  59. /*
  60. * Wake up the handler thread for this action. If the
  61. * RUNTHREAD bit is already set, nothing to do.
  62. */
  63. if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  64. return;
  65. /*
  66. * It's safe to OR the mask lockless here. We have only two
  67. * places which write to threads_oneshot: This code and the
  68. * irq thread.
  69. *
  70. * This code is the hard irq context and can never run on two
  71. * cpus in parallel. If it ever does we have more serious
  72. * problems than this bitmask.
  73. *
  74. * The irq threads of this irq which clear their "running" bit
  75. * in threads_oneshot are serialized via desc->lock against
  76. * each other and they are serialized against this code by
  77. * IRQS_INPROGRESS.
  78. *
  79. * Hard irq handler:
  80. *
  81. * spin_lock(desc->lock);
  82. * desc->state |= IRQS_INPROGRESS;
  83. * spin_unlock(desc->lock);
  84. * set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
  85. * desc->threads_oneshot |= mask;
  86. * spin_lock(desc->lock);
  87. * desc->state &= ~IRQS_INPROGRESS;
  88. * spin_unlock(desc->lock);
  89. *
  90. * irq thread:
  91. *
  92. * again:
  93. * spin_lock(desc->lock);
  94. * if (desc->state & IRQS_INPROGRESS) {
  95. * spin_unlock(desc->lock);
  96. * while(desc->state & IRQS_INPROGRESS)
  97. * cpu_relax();
  98. * goto again;
  99. * }
  100. * if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  101. * desc->threads_oneshot &= ~mask;
  102. * spin_unlock(desc->lock);
  103. *
  104. * So either the thread waits for us to clear IRQS_INPROGRESS
  105. * or we are waiting in the flow handler for desc->lock to be
  106. * released before we reach this point. The thread also checks
  107. * IRQTF_RUNTHREAD under desc->lock. If set it leaves
  108. * threads_oneshot untouched and runs the thread another time.
  109. */
  110. desc->threads_oneshot |= action->thread_mask;
  111. /*
  112. * We increment the threads_active counter in case we wake up
  113. * the irq thread. The irq thread decrements the counter when
  114. * it returns from the handler or in the exit path and wakes
  115. * up waiters which are stuck in synchronize_irq() when the
  116. * active count becomes zero. synchronize_irq() is serialized
  117. * against this code (hard irq handler) via IRQS_INPROGRESS
  118. * like the finalize_oneshot() code. See comment above.
  119. */
  120. atomic_inc(&desc->threads_active);
  121. wake_up_process(action->thread);
  122. }
  123. irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc)
  124. {
  125. irqreturn_t retval = IRQ_NONE;
  126. unsigned int irq = desc->irq_data.irq;
  127. struct irqaction *action;
  128. record_irq_time(desc);
  129. for_each_action_of_desc(desc, action) {
  130. irqreturn_t res;
  131. /*
  132. * If this IRQ would be threaded under force_irqthreads, mark it so.
  133. */
  134. if (irq_settings_can_thread(desc) &&
  135. !(action->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)))
  136. lockdep_hardirq_threaded();
  137. trace_irq_handler_entry(irq, action);
  138. res = action->handler(irq, action->dev_id);
  139. trace_irq_handler_exit(irq, action, res);
  140. if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pS enabled interrupts\n",
  141. irq, action->handler))
  142. local_irq_disable();
  143. switch (res) {
  144. case IRQ_WAKE_THREAD:
  145. /*
  146. * Catch drivers which return WAKE_THREAD but
  147. * did not set up a thread function
  148. */
  149. if (unlikely(!action->thread_fn)) {
  150. warn_no_thread(irq, action);
  151. break;
  152. }
  153. __irq_wake_thread(desc, action);
  154. break;
  155. default:
  156. break;
  157. }
  158. retval |= res;
  159. }
  160. return retval;
  161. }
  162. irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
  163. {
  164. irqreturn_t retval;
  165. retval = __handle_irq_event_percpu(desc);
  166. add_interrupt_randomness(desc->irq_data.irq);
  167. if (!irq_settings_no_debug(desc))
  168. note_interrupt(desc, retval);
  169. return retval;
  170. }
  171. irqreturn_t handle_irq_event(struct irq_desc *desc)
  172. {
  173. irqreturn_t ret;
  174. desc->istate &= ~IRQS_PENDING;
  175. irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  176. raw_spin_unlock(&desc->lock);
  177. ret = handle_irq_event_percpu(desc);
  178. raw_spin_lock(&desc->lock);
  179. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  180. return ret;
  181. }
  182. #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
  183. int __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
  184. {
  185. if (handle_arch_irq)
  186. return -EBUSY;
  187. handle_arch_irq = handle_irq;
  188. return 0;
  189. }
  190. /**
  191. * generic_handle_arch_irq - root irq handler for architectures which do no
  192. * entry accounting themselves
  193. * @regs: Register file coming from the low-level handling code
  194. */
  195. asmlinkage void noinstr generic_handle_arch_irq(struct pt_regs *regs)
  196. {
  197. struct pt_regs *old_regs;
  198. irq_enter();
  199. old_regs = set_irq_regs(regs);
  200. handle_arch_irq(regs);
  201. set_irq_regs(old_regs);
  202. irq_exit();
  203. }
  204. #endif