123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143 |
- /* SPDX-License-Identifier: GPL-2.0 */
- #ifndef LINUX_HARDIRQ_H
- #define LINUX_HARDIRQ_H
- #include <linux/context_tracking_state.h>
- #include <linux/preempt.h>
- #include <linux/lockdep.h>
- #include <linux/ftrace_irq.h>
- #include <linux/sched.h>
- #include <linux/vtime.h>
- #include <asm/hardirq.h>
- extern void synchronize_irq(unsigned int irq);
- extern bool synchronize_hardirq(unsigned int irq);
- #ifdef CONFIG_NO_HZ_FULL
- void __rcu_irq_enter_check_tick(void);
- #else
- static inline void __rcu_irq_enter_check_tick(void) { }
- #endif
- static __always_inline void rcu_irq_enter_check_tick(void)
- {
- if (context_tracking_enabled())
- __rcu_irq_enter_check_tick();
- }
- /*
- * It is safe to do non-atomic ops on ->hardirq_context,
- * because NMI handlers may not preempt and the ops are
- * always balanced, so the interrupted value of ->hardirq_context
- * will always be restored.
- */
- #define __irq_enter() \
- do { \
- preempt_count_add(HARDIRQ_OFFSET); \
- lockdep_hardirq_enter(); \
- account_hardirq_enter(current); \
- } while (0)
- /*
- * Like __irq_enter() without time accounting for fast
- * interrupts, e.g. reschedule IPI where time accounting
- * is more expensive than the actual interrupt.
- */
- #define __irq_enter_raw() \
- do { \
- preempt_count_add(HARDIRQ_OFFSET); \
- lockdep_hardirq_enter(); \
- } while (0)
- /*
- * Enter irq context (on NO_HZ, update jiffies):
- */
- void irq_enter(void);
- /*
- * Like irq_enter(), but RCU is already watching.
- */
- void irq_enter_rcu(void);
- /*
- * Exit irq context without processing softirqs:
- */
- #define __irq_exit() \
- do { \
- account_hardirq_exit(current); \
- lockdep_hardirq_exit(); \
- preempt_count_sub(HARDIRQ_OFFSET); \
- } while (0)
- /*
- * Like __irq_exit() without time accounting
- */
- #define __irq_exit_raw() \
- do { \
- lockdep_hardirq_exit(); \
- preempt_count_sub(HARDIRQ_OFFSET); \
- } while (0)
- /*
- * Exit irq context and process softirqs if needed:
- */
- void irq_exit(void);
- /*
- * Like irq_exit(), but return with RCU watching.
- */
- void irq_exit_rcu(void);
- #ifndef arch_nmi_enter
- #define arch_nmi_enter() do { } while (0)
- #define arch_nmi_exit() do { } while (0)
- #endif
- /*
- * NMI vs Tracing
- * --------------
- *
- * We must not land in a tracer until (or after) we've changed preempt_count
- * such that in_nmi() becomes true. To that effect all NMI C entry points must
- * be marked 'notrace' and call nmi_enter() as soon as possible.
- */
- /*
- * nmi_enter() can nest up to 15 times; see NMI_BITS.
- */
- #define __nmi_enter() \
- do { \
- lockdep_off(); \
- arch_nmi_enter(); \
- BUG_ON(in_nmi() == NMI_MASK); \
- __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
- } while (0)
- #define nmi_enter() \
- do { \
- __nmi_enter(); \
- lockdep_hardirq_enter(); \
- ct_nmi_enter(); \
- instrumentation_begin(); \
- ftrace_nmi_enter(); \
- instrumentation_end(); \
- } while (0)
- #define __nmi_exit() \
- do { \
- BUG_ON(!in_nmi()); \
- __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- arch_nmi_exit(); \
- lockdep_on(); \
- } while (0)
- #define nmi_exit() \
- do { \
- instrumentation_begin(); \
- ftrace_nmi_exit(); \
- instrumentation_end(); \
- ct_nmi_exit(); \
- lockdep_hardirq_exit(); \
- __nmi_exit(); \
- } while (0)
- #endif /* LINUX_HARDIRQ_H */
|