rcupdate_trace.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /*
  3. * Read-Copy Update mechanism for mutual exclusion, adapted for tracing.
  4. *
  5. * Copyright (C) 2020 Paul E. McKenney.
  6. */
  7. #ifndef __LINUX_RCUPDATE_TRACE_H
  8. #define __LINUX_RCUPDATE_TRACE_H
  9. #include <linux/sched.h>
  10. #include <linux/rcupdate.h>
  11. extern struct lockdep_map rcu_trace_lock_map;
  12. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  13. static inline int rcu_read_lock_trace_held(void)
  14. {
  15. return lock_is_held(&rcu_trace_lock_map);
  16. }
  17. #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  18. static inline int rcu_read_lock_trace_held(void)
  19. {
  20. return 1;
  21. }
  22. #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  23. #ifdef CONFIG_TASKS_TRACE_RCU
  24. void rcu_read_unlock_trace_special(struct task_struct *t);
  25. /**
  26. * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
  27. *
  28. * When synchronize_rcu_tasks_trace() is invoked by one task, then that
  29. * task is guaranteed to block until all other tasks exit their read-side
  30. * critical sections. Similarly, if call_rcu_trace() is invoked on one
  31. * task while other tasks are within RCU read-side critical sections,
  32. * invocation of the corresponding RCU callback is deferred until after
  33. * the all the other tasks exit their critical sections.
  34. *
  35. * For more details, please see the documentation for rcu_read_lock().
  36. */
  37. static inline void rcu_read_lock_trace(void)
  38. {
  39. struct task_struct *t = current;
  40. WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
  41. barrier();
  42. if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
  43. t->trc_reader_special.b.need_mb)
  44. smp_mb(); // Pairs with update-side barriers
  45. rcu_lock_acquire(&rcu_trace_lock_map);
  46. }
  47. /**
  48. * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section
  49. *
  50. * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is
  51. * allowed. Invoking a rcu_read_unlock_trace() when there is no matching
  52. * rcu_read_lock_trace() is verboten, and will result in lockdep complaints.
  53. *
  54. * For more details, please see the documentation for rcu_read_unlock().
  55. */
  56. static inline void rcu_read_unlock_trace(void)
  57. {
  58. int nesting;
  59. struct task_struct *t = current;
  60. rcu_lock_release(&rcu_trace_lock_map);
  61. nesting = READ_ONCE(t->trc_reader_nesting) - 1;
  62. barrier(); // Critical section before disabling.
  63. // Disable IPI-based setting of .need_qs.
  64. WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
  65. if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
  66. WRITE_ONCE(t->trc_reader_nesting, nesting);
  67. return; // We assume shallow reader nesting.
  68. }
  69. WARN_ON_ONCE(nesting != 0);
  70. rcu_read_unlock_trace_special(t);
  71. }
  72. void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
  73. void synchronize_rcu_tasks_trace(void);
  74. void rcu_barrier_tasks_trace(void);
  75. #else
  76. /*
  77. * The BPF JIT forms these addresses even when it doesn't call these
  78. * functions, so provide definitions that result in runtime errors.
  79. */
  80. static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
  81. static inline void rcu_read_lock_trace(void) { BUG(); }
  82. static inline void rcu_read_unlock_trace(void) { BUG(); }
  83. #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
  84. #endif /* __LINUX_RCUPDATE_TRACE_H */