trace_recursion.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_TRACE_RECURSION_H
  3. #define _LINUX_TRACE_RECURSION_H
  4. #include <linux/interrupt.h>
  5. #include <linux/sched.h>
  6. #ifdef CONFIG_TRACING
  7. /* Only current can touch trace_recursion */
  8. /*
  9. * For function tracing recursion:
  10. * The order of these bits are important.
  11. *
  12. * When function tracing occurs, the following steps are made:
  13. * If arch does not support a ftrace feature:
  14. * call internal function (uses INTERNAL bits) which calls...
  15. * The function callback, which can use the FTRACE bits to
  16. * check for recursion.
  17. */
  18. enum {
  19. /* Function recursion bits */
  20. TRACE_FTRACE_BIT,
  21. TRACE_FTRACE_NMI_BIT,
  22. TRACE_FTRACE_IRQ_BIT,
  23. TRACE_FTRACE_SIRQ_BIT,
  24. TRACE_FTRACE_TRANSITION_BIT,
  25. /* Internal use recursion bits */
  26. TRACE_INTERNAL_BIT,
  27. TRACE_INTERNAL_NMI_BIT,
  28. TRACE_INTERNAL_IRQ_BIT,
  29. TRACE_INTERNAL_SIRQ_BIT,
  30. TRACE_INTERNAL_TRANSITION_BIT,
  31. TRACE_BRANCH_BIT,
  32. /*
  33. * Abuse of the trace_recursion.
  34. * As we need a way to maintain state if we are tracing the function
  35. * graph in irq because we want to trace a particular function that
  36. * was called in irq context but we have irq tracing off. Since this
  37. * can only be modified by current, we can reuse trace_recursion.
  38. */
  39. TRACE_IRQ_BIT,
  40. /* Set if the function is in the set_graph_function file */
  41. TRACE_GRAPH_BIT,
  42. /*
  43. * In the very unlikely case that an interrupt came in
  44. * at a start of graph tracing, and we want to trace
  45. * the function in that interrupt, the depth can be greater
  46. * than zero, because of the preempted start of a previous
  47. * trace. In an even more unlikely case, depth could be 2
  48. * if a softirq interrupted the start of graph tracing,
  49. * followed by an interrupt preempting a start of graph
  50. * tracing in the softirq, and depth can even be 3
  51. * if an NMI came in at the start of an interrupt function
  52. * that preempted a softirq start of a function that
  53. * preempted normal context!!!! Luckily, it can't be
  54. * greater than 3, so the next two bits are a mask
  55. * of what the depth is when we set TRACE_GRAPH_BIT
  56. */
  57. TRACE_GRAPH_DEPTH_START_BIT,
  58. TRACE_GRAPH_DEPTH_END_BIT,
  59. /*
  60. * To implement set_graph_notrace, if this bit is set, we ignore
  61. * function graph tracing of called functions, until the return
  62. * function is called to clear it.
  63. */
  64. TRACE_GRAPH_NOTRACE_BIT,
  65. /* Used to prevent recursion recording from recursing. */
  66. TRACE_RECORD_RECURSION_BIT,
  67. };
  68. #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
  69. #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
  70. #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
  71. #define trace_recursion_depth() \
  72. (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
  73. #define trace_recursion_set_depth(depth) \
  74. do { \
  75. current->trace_recursion &= \
  76. ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
  77. current->trace_recursion |= \
  78. ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
  79. } while (0)
  80. #define TRACE_CONTEXT_BITS 4
  81. #define TRACE_FTRACE_START TRACE_FTRACE_BIT
  82. #define TRACE_LIST_START TRACE_INTERNAL_BIT
  83. #define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
  84. /*
  85. * Used for setting context
  86. * NMI = 0
  87. * IRQ = 1
  88. * SOFTIRQ = 2
  89. * NORMAL = 3
  90. */
  91. enum {
  92. TRACE_CTX_NMI,
  93. TRACE_CTX_IRQ,
  94. TRACE_CTX_SOFTIRQ,
  95. TRACE_CTX_NORMAL,
  96. TRACE_CTX_TRANSITION,
  97. };
  98. static __always_inline int trace_get_context_bit(void)
  99. {
  100. unsigned char bit = interrupt_context_level();
  101. return TRACE_CTX_NORMAL - bit;
  102. }
  103. #ifdef CONFIG_FTRACE_RECORD_RECURSION
  104. extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
  105. # define do_ftrace_record_recursion(ip, pip) \
  106. do { \
  107. if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
  108. trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
  109. ftrace_record_recursion(ip, pip); \
  110. trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
  111. } \
  112. } while (0)
  113. #else
  114. # define do_ftrace_record_recursion(ip, pip) do { } while (0)
  115. #endif
  116. /*
  117. * Preemption is promised to be disabled when return bit >= 0.
  118. */
  119. static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
  120. int start)
  121. {
  122. unsigned int val = READ_ONCE(current->trace_recursion);
  123. int bit;
  124. bit = trace_get_context_bit() + start;
  125. if (unlikely(val & (1 << bit))) {
  126. /*
  127. * If an interrupt occurs during a trace, and another trace
  128. * happens in that interrupt but before the preempt_count is
  129. * updated to reflect the new interrupt context, then this
  130. * will think a recursion occurred, and the event will be dropped.
  131. * Let a single instance happen via the TRANSITION_BIT to
  132. * not drop those events.
  133. */
  134. bit = TRACE_CTX_TRANSITION + start;
  135. if (val & (1 << bit)) {
  136. do_ftrace_record_recursion(ip, pip);
  137. return -1;
  138. }
  139. }
  140. val |= 1 << bit;
  141. current->trace_recursion = val;
  142. barrier();
  143. preempt_disable_notrace();
  144. return bit;
  145. }
  146. /*
  147. * Preemption will be enabled (if it was previously enabled).
  148. */
  149. static __always_inline void trace_clear_recursion(int bit)
  150. {
  151. preempt_enable_notrace();
  152. barrier();
  153. trace_recursion_clear(bit);
  154. }
  155. /**
  156. * ftrace_test_recursion_trylock - tests for recursion in same context
  157. *
  158. * Use this for ftrace callbacks. This will detect if the function
  159. * tracing recursed in the same context (normal vs interrupt),
  160. *
  161. * Returns: -1 if a recursion happened.
  162. * >= 0 if no recursion.
  163. */
  164. static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
  165. unsigned long parent_ip)
  166. {
  167. return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
  168. }
  169. /**
  170. * ftrace_test_recursion_unlock - called when function callback is complete
  171. * @bit: The return of a successful ftrace_test_recursion_trylock()
  172. *
  173. * This is used at the end of a ftrace callback.
  174. */
  175. static __always_inline void ftrace_test_recursion_unlock(int bit)
  176. {
  177. trace_clear_recursion(bit);
  178. }
  179. #endif /* CONFIG_TRACING */
  180. #endif /* _LINUX_TRACE_RECURSION_H */