hardirq.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef LINUX_HARDIRQ_H
  3. #define LINUX_HARDIRQ_H
  4. #include <linux/context_tracking_state.h>
  5. #include <linux/preempt.h>
  6. #include <linux/lockdep.h>
  7. #include <linux/ftrace_irq.h>
  8. #include <linux/sched.h>
  9. #include <linux/vtime.h>
  10. #include <asm/hardirq.h>
  11. extern void synchronize_irq(unsigned int irq);
  12. extern bool synchronize_hardirq(unsigned int irq);
  13. #ifdef CONFIG_NO_HZ_FULL
  14. void __rcu_irq_enter_check_tick(void);
  15. #else
  16. static inline void __rcu_irq_enter_check_tick(void) { }
  17. #endif
  18. static __always_inline void rcu_irq_enter_check_tick(void)
  19. {
  20. if (context_tracking_enabled())
  21. __rcu_irq_enter_check_tick();
  22. }
  23. /*
  24. * It is safe to do non-atomic ops on ->hardirq_context,
  25. * because NMI handlers may not preempt and the ops are
  26. * always balanced, so the interrupted value of ->hardirq_context
  27. * will always be restored.
  28. */
  29. #define __irq_enter() \
  30. do { \
  31. preempt_count_add(HARDIRQ_OFFSET); \
  32. lockdep_hardirq_enter(); \
  33. account_hardirq_enter(current); \
  34. } while (0)
  35. /*
  36. * Like __irq_enter() without time accounting for fast
  37. * interrupts, e.g. reschedule IPI where time accounting
  38. * is more expensive than the actual interrupt.
  39. */
  40. #define __irq_enter_raw() \
  41. do { \
  42. preempt_count_add(HARDIRQ_OFFSET); \
  43. lockdep_hardirq_enter(); \
  44. } while (0)
  45. /*
  46. * Enter irq context (on NO_HZ, update jiffies):
  47. */
  48. void irq_enter(void);
  49. /*
  50. * Like irq_enter(), but RCU is already watching.
  51. */
  52. void irq_enter_rcu(void);
  53. /*
  54. * Exit irq context without processing softirqs:
  55. */
  56. #define __irq_exit() \
  57. do { \
  58. account_hardirq_exit(current); \
  59. lockdep_hardirq_exit(); \
  60. preempt_count_sub(HARDIRQ_OFFSET); \
  61. } while (0)
  62. /*
  63. * Like __irq_exit() without time accounting
  64. */
  65. #define __irq_exit_raw() \
  66. do { \
  67. lockdep_hardirq_exit(); \
  68. preempt_count_sub(HARDIRQ_OFFSET); \
  69. } while (0)
  70. /*
  71. * Exit irq context and process softirqs if needed:
  72. */
  73. void irq_exit(void);
  74. /*
  75. * Like irq_exit(), but return with RCU watching.
  76. */
  77. void irq_exit_rcu(void);
  78. #ifndef arch_nmi_enter
  79. #define arch_nmi_enter() do { } while (0)
  80. #define arch_nmi_exit() do { } while (0)
  81. #endif
  82. /*
  83. * NMI vs Tracing
  84. * --------------
  85. *
  86. * We must not land in a tracer until (or after) we've changed preempt_count
  87. * such that in_nmi() becomes true. To that effect all NMI C entry points must
  88. * be marked 'notrace' and call nmi_enter() as soon as possible.
  89. */
  90. /*
  91. * nmi_enter() can nest up to 15 times; see NMI_BITS.
  92. */
  93. #define __nmi_enter() \
  94. do { \
  95. lockdep_off(); \
  96. arch_nmi_enter(); \
  97. BUG_ON(in_nmi() == NMI_MASK); \
  98. __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
  99. } while (0)
  100. #define nmi_enter() \
  101. do { \
  102. __nmi_enter(); \
  103. lockdep_hardirq_enter(); \
  104. ct_nmi_enter(); \
  105. instrumentation_begin(); \
  106. ftrace_nmi_enter(); \
  107. instrumentation_end(); \
  108. } while (0)
  109. #define __nmi_exit() \
  110. do { \
  111. BUG_ON(!in_nmi()); \
  112. __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
  113. arch_nmi_exit(); \
  114. lockdep_on(); \
  115. } while (0)
  116. #define nmi_exit() \
  117. do { \
  118. instrumentation_begin(); \
  119. ftrace_nmi_exit(); \
  120. instrumentation_end(); \
  121. ct_nmi_exit(); \
  122. lockdep_hardirq_exit(); \
  123. __nmi_exit(); \
  124. } while (0)
  125. #endif /* LINUX_HARDIRQ_H */