rtmutex_common.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * RT Mutexes: blocking mutual exclusion locks with PI support
  4. *
  5. * started by Ingo Molnar and Thomas Gleixner:
  6. *
  7. * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <[email protected]>
  8. * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <[email protected]>
  9. *
  10. * This file contains the private data structure and API definitions.
  11. */
  12. #ifndef __KERNEL_RTMUTEX_COMMON_H
  13. #define __KERNEL_RTMUTEX_COMMON_H
  14. #include <linux/debug_locks.h>
  15. #include <linux/rtmutex.h>
  16. #include <linux/sched/wake_q.h>
  17. /*
  18. * This is the control structure for tasks blocked on a rt_mutex,
  19. * which is allocated on the kernel stack on of the blocked task.
  20. *
  21. * @tree_entry: pi node to enqueue into the mutex waiters tree
  22. * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree
  23. * @task: task reference to the blocked task
  24. * @lock: Pointer to the rt_mutex on which the waiter blocks
  25. * @wake_state: Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
  26. * @prio: Priority of the waiter
  27. * @deadline: Deadline of the waiter if applicable
  28. * @ww_ctx: WW context pointer
  29. */
  30. struct rt_mutex_waiter {
  31. struct rb_node tree_entry;
  32. struct rb_node pi_tree_entry;
  33. struct task_struct *task;
  34. struct rt_mutex_base *lock;
  35. unsigned int wake_state;
  36. int prio;
  37. u64 deadline;
  38. struct ww_acquire_ctx *ww_ctx;
  39. };
  40. /**
  41. * rt_wake_q_head - Wrapper around regular wake_q_head to support
  42. * "sleeping" spinlocks on RT
  43. * @head: The regular wake_q_head for sleeping lock variants
  44. * @rtlock_task: Task pointer for RT lock (spin/rwlock) wakeups
  45. */
  46. struct rt_wake_q_head {
  47. struct wake_q_head head;
  48. struct task_struct *rtlock_task;
  49. };
  50. #define DEFINE_RT_WAKE_Q(name) \
  51. struct rt_wake_q_head name = { \
  52. .head = WAKE_Q_HEAD_INITIALIZER(name.head), \
  53. .rtlock_task = NULL, \
  54. }
  55. /*
  56. * PI-futex support (proxy locking functions, etc.):
  57. */
  58. extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
  59. struct task_struct *proxy_owner);
  60. extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
  61. extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
  62. struct rt_mutex_waiter *waiter,
  63. struct task_struct *task);
  64. extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
  65. struct rt_mutex_waiter *waiter,
  66. struct task_struct *task);
  67. extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
  68. struct hrtimer_sleeper *to,
  69. struct rt_mutex_waiter *waiter);
  70. extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
  71. struct rt_mutex_waiter *waiter);
  72. extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
  73. extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
  74. extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
  75. extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
  76. struct rt_wake_q_head *wqh);
  77. extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
  78. /*
  79. * Must be guarded because this header is included from rcu/tree_plugin.h
  80. * unconditionally.
  81. */
  82. #ifdef CONFIG_RT_MUTEXES
  83. static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
  84. {
  85. return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
  86. }
  87. /*
  88. * Lockless speculative check whether @waiter is still the top waiter on
  89. * @lock. This is solely comparing pointers and not derefencing the
  90. * leftmost entry which might be about to vanish.
  91. */
  92. static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
  93. struct rt_mutex_waiter *waiter)
  94. {
  95. struct rb_node *leftmost = rb_first_cached(&lock->waiters);
  96. return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter;
  97. }
  98. static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
  99. {
  100. struct rb_node *leftmost = rb_first_cached(&lock->waiters);
  101. struct rt_mutex_waiter *w = NULL;
  102. if (leftmost) {
  103. w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry);
  104. BUG_ON(w->lock != lock);
  105. }
  106. return w;
  107. }
  108. static inline int task_has_pi_waiters(struct task_struct *p)
  109. {
  110. return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
  111. }
  112. static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
  113. {
  114. return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
  115. pi_tree_entry);
  116. }
  117. #define RT_MUTEX_HAS_WAITERS 1UL
  118. static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
  119. {
  120. unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
  121. return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
  122. }
  123. /*
  124. * Constants for rt mutex functions which have a selectable deadlock
  125. * detection.
  126. *
  127. * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are
  128. * no further PI adjustments to be made.
  129. *
  130. * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full
  131. * walk of the lock chain.
  132. */
  133. enum rtmutex_chainwalk {
  134. RT_MUTEX_MIN_CHAINWALK,
  135. RT_MUTEX_FULL_CHAINWALK,
  136. };
  137. static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
  138. {
  139. raw_spin_lock_init(&lock->wait_lock);
  140. lock->waiters = RB_ROOT_CACHED;
  141. lock->owner = NULL;
  142. }
  143. /* Debug functions */
  144. static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock)
  145. {
  146. if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
  147. DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
  148. }
  149. static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
  150. {
  151. if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
  152. DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
  153. }
  154. static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
  155. {
  156. if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
  157. memset(waiter, 0x11, sizeof(*waiter));
  158. }
  159. static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
  160. {
  161. if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
  162. memset(waiter, 0x22, sizeof(*waiter));
  163. }
  164. static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
  165. {
  166. debug_rt_mutex_init_waiter(waiter);
  167. RB_CLEAR_NODE(&waiter->pi_tree_entry);
  168. RB_CLEAR_NODE(&waiter->tree_entry);
  169. waiter->wake_state = TASK_NORMAL;
  170. waiter->task = NULL;
  171. }
  172. static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
  173. {
  174. rt_mutex_init_waiter(waiter);
  175. waiter->wake_state = TASK_RTLOCK_WAIT;
  176. }
  177. #else /* CONFIG_RT_MUTEXES */
  178. /* Used in rcu/tree_plugin.h */
  179. static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
  180. {
  181. return NULL;
  182. }
  183. #endif /* !CONFIG_RT_MUTEXES */
  184. #endif