context_tracking_state.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_CONTEXT_TRACKING_STATE_H
  3. #define _LINUX_CONTEXT_TRACKING_STATE_H
  4. #include <linux/percpu.h>
  5. #include <linux/static_key.h>
  6. #include <linux/context_tracking_irq.h>
  7. /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
  8. #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
  9. enum ctx_state {
  10. CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
  11. CONTEXT_KERNEL = 0,
  12. CONTEXT_IDLE = 1,
  13. CONTEXT_USER = 2,
  14. CONTEXT_GUEST = 3,
  15. CONTEXT_MAX = 4,
  16. };
  17. /* Even value for idle, else odd. */
  18. #define RCU_DYNTICKS_IDX CONTEXT_MAX
  19. #define CT_STATE_MASK (CONTEXT_MAX - 1)
  20. #define CT_DYNTICKS_MASK (~CT_STATE_MASK)
  21. struct context_tracking {
  22. #ifdef CONFIG_CONTEXT_TRACKING_USER
  23. /*
  24. * When active is false, probes are unset in order
  25. * to minimize overhead: TIF flags are cleared
  26. * and calls to user_enter/exit are ignored. This
  27. * may be further optimized using static keys.
  28. */
  29. bool active;
  30. int recursion;
  31. #endif
  32. #ifdef CONFIG_CONTEXT_TRACKING
  33. atomic_t state;
  34. #endif
  35. #ifdef CONFIG_CONTEXT_TRACKING_IDLE
  36. long dynticks_nesting; /* Track process nesting level. */
  37. long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
  38. #endif
  39. };
  40. #ifdef CONFIG_CONTEXT_TRACKING
  41. DECLARE_PER_CPU(struct context_tracking, context_tracking);
  42. #endif
  43. #ifdef CONFIG_CONTEXT_TRACKING_USER
  44. static __always_inline int __ct_state(void)
  45. {
  46. return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
  47. }
  48. #endif
  49. #ifdef CONFIG_CONTEXT_TRACKING_IDLE
  50. static __always_inline int ct_dynticks(void)
  51. {
  52. return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
  53. }
  54. static __always_inline int ct_dynticks_cpu(int cpu)
  55. {
  56. struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
  57. return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
  58. }
  59. static __always_inline int ct_dynticks_cpu_acquire(int cpu)
  60. {
  61. struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
  62. return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
  63. }
  64. static __always_inline long ct_dynticks_nesting(void)
  65. {
  66. return __this_cpu_read(context_tracking.dynticks_nesting);
  67. }
  68. static __always_inline long ct_dynticks_nesting_cpu(int cpu)
  69. {
  70. struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
  71. return ct->dynticks_nesting;
  72. }
  73. static __always_inline long ct_dynticks_nmi_nesting(void)
  74. {
  75. return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
  76. }
  77. static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
  78. {
  79. struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
  80. return ct->dynticks_nmi_nesting;
  81. }
  82. #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
  83. #ifdef CONFIG_CONTEXT_TRACKING_USER
  84. extern struct static_key_false context_tracking_key;
  85. static __always_inline bool context_tracking_enabled(void)
  86. {
  87. return static_branch_unlikely(&context_tracking_key);
  88. }
  89. static __always_inline bool context_tracking_enabled_cpu(int cpu)
  90. {
  91. return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
  92. }
  93. static inline bool context_tracking_enabled_this_cpu(void)
  94. {
  95. return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
  96. }
  97. /**
  98. * ct_state() - return the current context tracking state if known
  99. *
  100. * Returns the current cpu's context tracking state if context tracking
  101. * is enabled. If context tracking is disabled, returns
  102. * CONTEXT_DISABLED. This should be used primarily for debugging.
  103. */
  104. static __always_inline int ct_state(void)
  105. {
  106. int ret;
  107. if (!context_tracking_enabled())
  108. return CONTEXT_DISABLED;
  109. preempt_disable();
  110. ret = __ct_state();
  111. preempt_enable();
  112. return ret;
  113. }
  114. #else
  115. static __always_inline bool context_tracking_enabled(void) { return false; }
  116. static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
  117. static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
  118. #endif /* CONFIG_CONTEXT_TRACKING_USER */
  119. #endif