context_tracking.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_CONTEXT_TRACKING_H
  3. #define _LINUX_CONTEXT_TRACKING_H
  4. #include <linux/sched.h>
  5. #include <linux/vtime.h>
  6. #include <linux/context_tracking_state.h>
  7. #include <linux/instrumentation.h>
  8. #include <asm/ptrace.h>
  9. #ifdef CONFIG_CONTEXT_TRACKING_USER
  10. extern void ct_cpu_track_user(int cpu);
  11. /* Called with interrupts disabled. */
  12. extern void __ct_user_enter(enum ctx_state state);
  13. extern void __ct_user_exit(enum ctx_state state);
  14. extern void ct_user_enter(enum ctx_state state);
  15. extern void ct_user_exit(enum ctx_state state);
  16. extern void user_enter_callable(void);
  17. extern void user_exit_callable(void);
  18. static inline void user_enter(void)
  19. {
  20. if (context_tracking_enabled())
  21. ct_user_enter(CONTEXT_USER);
  22. }
  23. static inline void user_exit(void)
  24. {
  25. if (context_tracking_enabled())
  26. ct_user_exit(CONTEXT_USER);
  27. }
  28. /* Called with interrupts disabled. */
  29. static __always_inline void user_enter_irqoff(void)
  30. {
  31. if (context_tracking_enabled())
  32. __ct_user_enter(CONTEXT_USER);
  33. }
  34. static __always_inline void user_exit_irqoff(void)
  35. {
  36. if (context_tracking_enabled())
  37. __ct_user_exit(CONTEXT_USER);
  38. }
  39. static inline enum ctx_state exception_enter(void)
  40. {
  41. enum ctx_state prev_ctx;
  42. if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
  43. !context_tracking_enabled())
  44. return 0;
  45. prev_ctx = __ct_state();
  46. if (prev_ctx != CONTEXT_KERNEL)
  47. ct_user_exit(prev_ctx);
  48. return prev_ctx;
  49. }
  50. static inline void exception_exit(enum ctx_state prev_ctx)
  51. {
  52. if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
  53. context_tracking_enabled()) {
  54. if (prev_ctx != CONTEXT_KERNEL)
  55. ct_user_enter(prev_ctx);
  56. }
  57. }
  58. static __always_inline bool context_tracking_guest_enter(void)
  59. {
  60. if (context_tracking_enabled())
  61. __ct_user_enter(CONTEXT_GUEST);
  62. return context_tracking_enabled_this_cpu();
  63. }
  64. static __always_inline void context_tracking_guest_exit(void)
  65. {
  66. if (context_tracking_enabled())
  67. __ct_user_exit(CONTEXT_GUEST);
  68. }
  69. #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
  70. #else
  71. static inline void user_enter(void) { }
  72. static inline void user_exit(void) { }
  73. static inline void user_enter_irqoff(void) { }
  74. static inline void user_exit_irqoff(void) { }
  75. static inline int exception_enter(void) { return 0; }
  76. static inline void exception_exit(enum ctx_state prev_ctx) { }
  77. static inline int ct_state(void) { return -1; }
  78. static inline int __ct_state(void) { return -1; }
  79. static __always_inline bool context_tracking_guest_enter(void) { return false; }
  80. static inline void context_tracking_guest_exit(void) { }
  81. #define CT_WARN_ON(cond) do { } while (0)
  82. #endif /* !CONFIG_CONTEXT_TRACKING_USER */
  83. #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
  84. extern void context_tracking_init(void);
  85. #else
  86. static inline void context_tracking_init(void) { }
  87. #endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
  88. #ifdef CONFIG_CONTEXT_TRACKING_IDLE
  89. extern void ct_idle_enter(void);
  90. extern void ct_idle_exit(void);
  91. /*
  92. * Is the current CPU in an extended quiescent state?
  93. *
  94. * No ordering, as we are sampling CPU-local information.
  95. */
  96. static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
  97. {
  98. return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
  99. }
  100. /*
  101. * Increment the current CPU's context_tracking structure's ->state field
  102. * with ordering. Return the new value.
  103. */
  104. static __always_inline unsigned long ct_state_inc(int incby)
  105. {
  106. return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
  107. }
  108. static __always_inline bool warn_rcu_enter(void)
  109. {
  110. bool ret = false;
  111. /*
  112. * Horrible hack to shut up recursive RCU isn't watching fail since
  113. * lots of the actual reporting also relies on RCU.
  114. */
  115. preempt_disable_notrace();
  116. if (rcu_dynticks_curr_cpu_in_eqs()) {
  117. ret = true;
  118. ct_state_inc(RCU_DYNTICKS_IDX);
  119. }
  120. return ret;
  121. }
  122. static __always_inline void warn_rcu_exit(bool rcu)
  123. {
  124. if (rcu)
  125. ct_state_inc(RCU_DYNTICKS_IDX);
  126. preempt_enable_notrace();
  127. }
  128. #else
  129. static inline void ct_idle_enter(void) { }
  130. static inline void ct_idle_exit(void) { }
  131. static __always_inline bool warn_rcu_enter(void) { return false; }
  132. static __always_inline void warn_rcu_exit(bool rcu) { }
  133. #endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
  134. #endif