ww_mutex.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. #ifndef WW_RT
  3. #define MUTEX mutex
  4. #define MUTEX_WAITER mutex_waiter
  5. static inline struct mutex_waiter *
  6. __ww_waiter_first(struct mutex *lock)
  7. {
  8. struct mutex_waiter *w;
  9. w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
  10. if (list_entry_is_head(w, &lock->wait_list, list))
  11. return NULL;
  12. return w;
  13. }
  14. static inline struct mutex_waiter *
  15. __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
  16. {
  17. w = list_next_entry(w, list);
  18. if (list_entry_is_head(w, &lock->wait_list, list))
  19. return NULL;
  20. return w;
  21. }
  22. static inline struct mutex_waiter *
  23. __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
  24. {
  25. w = list_prev_entry(w, list);
  26. if (list_entry_is_head(w, &lock->wait_list, list))
  27. return NULL;
  28. return w;
  29. }
  30. static inline struct mutex_waiter *
  31. __ww_waiter_last(struct mutex *lock)
  32. {
  33. struct mutex_waiter *w;
  34. w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
  35. if (list_entry_is_head(w, &lock->wait_list, list))
  36. return NULL;
  37. return w;
  38. }
  39. static inline void
  40. __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
  41. {
  42. struct list_head *p = &lock->wait_list;
  43. if (pos)
  44. p = &pos->list;
  45. __mutex_add_waiter(lock, waiter, p);
  46. }
  47. static inline struct task_struct *
  48. __ww_mutex_owner(struct mutex *lock)
  49. {
  50. return __mutex_owner(lock);
  51. }
  52. static inline bool
  53. __ww_mutex_has_waiters(struct mutex *lock)
  54. {
  55. return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
  56. }
  57. static inline void lock_wait_lock(struct mutex *lock)
  58. {
  59. raw_spin_lock(&lock->wait_lock);
  60. }
  61. static inline void unlock_wait_lock(struct mutex *lock)
  62. {
  63. raw_spin_unlock(&lock->wait_lock);
  64. }
  65. static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
  66. {
  67. lockdep_assert_held(&lock->wait_lock);
  68. }
  69. #else /* WW_RT */
  70. #define MUTEX rt_mutex
  71. #define MUTEX_WAITER rt_mutex_waiter
  72. static inline struct rt_mutex_waiter *
  73. __ww_waiter_first(struct rt_mutex *lock)
  74. {
  75. struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
  76. if (!n)
  77. return NULL;
  78. return rb_entry(n, struct rt_mutex_waiter, tree_entry);
  79. }
  80. static inline struct rt_mutex_waiter *
  81. __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
  82. {
  83. struct rb_node *n = rb_next(&w->tree_entry);
  84. if (!n)
  85. return NULL;
  86. return rb_entry(n, struct rt_mutex_waiter, tree_entry);
  87. }
  88. static inline struct rt_mutex_waiter *
  89. __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
  90. {
  91. struct rb_node *n = rb_prev(&w->tree_entry);
  92. if (!n)
  93. return NULL;
  94. return rb_entry(n, struct rt_mutex_waiter, tree_entry);
  95. }
  96. static inline struct rt_mutex_waiter *
  97. __ww_waiter_last(struct rt_mutex *lock)
  98. {
  99. struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
  100. if (!n)
  101. return NULL;
  102. return rb_entry(n, struct rt_mutex_waiter, tree_entry);
  103. }
  104. static inline void
  105. __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
  106. {
  107. /* RT unconditionally adds the waiter first and then removes it on error */
  108. }
  109. static inline struct task_struct *
  110. __ww_mutex_owner(struct rt_mutex *lock)
  111. {
  112. return rt_mutex_owner(&lock->rtmutex);
  113. }
  114. static inline bool
  115. __ww_mutex_has_waiters(struct rt_mutex *lock)
  116. {
  117. return rt_mutex_has_waiters(&lock->rtmutex);
  118. }
  119. static inline void lock_wait_lock(struct rt_mutex *lock)
  120. {
  121. raw_spin_lock(&lock->rtmutex.wait_lock);
  122. }
  123. static inline void unlock_wait_lock(struct rt_mutex *lock)
  124. {
  125. raw_spin_unlock(&lock->rtmutex.wait_lock);
  126. }
  127. static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
  128. {
  129. lockdep_assert_held(&lock->rtmutex.wait_lock);
  130. }
  131. #endif /* WW_RT */
  132. /*
  133. * Wait-Die:
  134. * The newer transactions are killed when:
  135. * It (the new transaction) makes a request for a lock being held
  136. * by an older transaction.
  137. *
  138. * Wound-Wait:
  139. * The newer transactions are wounded when:
  140. * An older transaction makes a request for a lock being held by
  141. * the newer transaction.
  142. */
  143. /*
  144. * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
  145. * it.
  146. */
  147. static __always_inline void
  148. ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
  149. {
  150. #ifdef DEBUG_WW_MUTEXES
  151. /*
  152. * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
  153. * but released with a normal mutex_unlock in this call.
  154. *
  155. * This should never happen, always use ww_mutex_unlock.
  156. */
  157. DEBUG_LOCKS_WARN_ON(ww->ctx);
  158. /*
  159. * Not quite done after calling ww_acquire_done() ?
  160. */
  161. DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
  162. if (ww_ctx->contending_lock) {
  163. /*
  164. * After -EDEADLK you tried to
  165. * acquire a different ww_mutex? Bad!
  166. */
  167. DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
  168. /*
  169. * You called ww_mutex_lock after receiving -EDEADLK,
  170. * but 'forgot' to unlock everything else first?
  171. */
  172. DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
  173. ww_ctx->contending_lock = NULL;
  174. }
  175. /*
  176. * Naughty, using a different class will lead to undefined behavior!
  177. */
  178. DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
  179. #endif
  180. ww_ctx->acquired++;
  181. ww->ctx = ww_ctx;
  182. }
  183. /*
  184. * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
  185. * or, when of equal priority, a younger transaction than @b.
  186. *
  187. * Depending on the algorithm, @a will either need to wait for @b, or die.
  188. */
  189. static inline bool
  190. __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
  191. {
  192. /*
  193. * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
  194. * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
  195. * isn't affected by this.
  196. */
  197. #ifdef WW_RT
  198. /* kernel prio; less is more */
  199. int a_prio = a->task->prio;
  200. int b_prio = b->task->prio;
  201. if (rt_prio(a_prio) || rt_prio(b_prio)) {
  202. if (a_prio > b_prio)
  203. return true;
  204. if (a_prio < b_prio)
  205. return false;
  206. /* equal static prio */
  207. if (dl_prio(a_prio)) {
  208. if (dl_time_before(b->task->dl.deadline,
  209. a->task->dl.deadline))
  210. return true;
  211. if (dl_time_before(a->task->dl.deadline,
  212. b->task->dl.deadline))
  213. return false;
  214. }
  215. /* equal prio */
  216. }
  217. #endif
  218. /* FIFO order tie break -- bigger is younger */
  219. return (signed long)(a->stamp - b->stamp) > 0;
  220. }
  221. /*
  222. * Wait-Die; wake a lesser waiter context (when locks held) such that it can
  223. * die.
  224. *
  225. * Among waiters with context, only the first one can have other locks acquired
  226. * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
  227. * __ww_mutex_check_kill() wake any but the earliest context.
  228. */
  229. static bool
  230. __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
  231. struct ww_acquire_ctx *ww_ctx)
  232. {
  233. if (!ww_ctx->is_wait_die)
  234. return false;
  235. if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
  236. #ifndef WW_RT
  237. debug_mutex_wake_waiter(lock, waiter);
  238. #endif
  239. wake_up_process(waiter->task);
  240. }
  241. return true;
  242. }
  243. /*
  244. * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
  245. *
  246. * Wound the lock holder if there are waiters with more important transactions
  247. * than the lock holders. Even if multiple waiters may wound the lock holder,
  248. * it's sufficient that only one does.
  249. */
  250. static bool __ww_mutex_wound(struct MUTEX *lock,
  251. struct ww_acquire_ctx *ww_ctx,
  252. struct ww_acquire_ctx *hold_ctx)
  253. {
  254. struct task_struct *owner = __ww_mutex_owner(lock);
  255. lockdep_assert_wait_lock_held(lock);
  256. /*
  257. * Possible through __ww_mutex_add_waiter() when we race with
  258. * ww_mutex_set_context_fastpath(). In that case we'll get here again
  259. * through __ww_mutex_check_waiters().
  260. */
  261. if (!hold_ctx)
  262. return false;
  263. /*
  264. * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
  265. * it cannot go away because we'll have FLAG_WAITERS set and hold
  266. * wait_lock.
  267. */
  268. if (!owner)
  269. return false;
  270. if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
  271. hold_ctx->wounded = 1;
  272. /*
  273. * wake_up_process() paired with set_current_state()
  274. * inserts sufficient barriers to make sure @owner either sees
  275. * it's wounded in __ww_mutex_check_kill() or has a
  276. * wakeup pending to re-read the wounded state.
  277. */
  278. if (owner != current)
  279. wake_up_process(owner);
  280. return true;
  281. }
  282. return false;
  283. }
  284. /*
  285. * We just acquired @lock under @ww_ctx, if there are more important contexts
  286. * waiting behind us on the wait-list, check if they need to die, or wound us.
  287. *
  288. * See __ww_mutex_add_waiter() for the list-order construction; basically the
  289. * list is ordered by stamp, smallest (oldest) first.
  290. *
  291. * This relies on never mixing wait-die/wound-wait on the same wait-list;
  292. * which is currently ensured by that being a ww_class property.
  293. *
  294. * The current task must not be on the wait list.
  295. */
  296. static void
  297. __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
  298. {
  299. struct MUTEX_WAITER *cur;
  300. lockdep_assert_wait_lock_held(lock);
  301. for (cur = __ww_waiter_first(lock); cur;
  302. cur = __ww_waiter_next(lock, cur)) {
  303. if (!cur->ww_ctx)
  304. continue;
  305. if (__ww_mutex_die(lock, cur, ww_ctx) ||
  306. __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
  307. break;
  308. }
  309. }
  310. /*
  311. * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
  312. * and wake up any waiters so they can recheck.
  313. */
  314. static __always_inline void
  315. ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  316. {
  317. ww_mutex_lock_acquired(lock, ctx);
  318. /*
  319. * The lock->ctx update should be visible on all cores before
  320. * the WAITERS check is done, otherwise contended waiters might be
  321. * missed. The contended waiters will either see ww_ctx == NULL
  322. * and keep spinning, or it will acquire wait_lock, add itself
  323. * to waiter list and sleep.
  324. */
  325. smp_mb(); /* See comments above and below. */
  326. /*
  327. * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
  328. * MB MB
  329. * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
  330. *
  331. * The memory barrier above pairs with the memory barrier in
  332. * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
  333. * and/or !empty list.
  334. */
  335. if (likely(!__ww_mutex_has_waiters(&lock->base)))
  336. return;
  337. /*
  338. * Uh oh, we raced in fastpath, check if any of the waiters need to
  339. * die or wound us.
  340. */
  341. lock_wait_lock(&lock->base);
  342. __ww_mutex_check_waiters(&lock->base, ctx);
  343. unlock_wait_lock(&lock->base);
  344. }
  345. static __always_inline int
  346. __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
  347. {
  348. if (ww_ctx->acquired > 0) {
  349. #ifdef DEBUG_WW_MUTEXES
  350. struct ww_mutex *ww;
  351. ww = container_of(lock, struct ww_mutex, base);
  352. DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
  353. ww_ctx->contending_lock = ww;
  354. #endif
  355. return -EDEADLK;
  356. }
  357. return 0;
  358. }
  359. /*
  360. * Check the wound condition for the current lock acquire.
  361. *
  362. * Wound-Wait: If we're wounded, kill ourself.
  363. *
  364. * Wait-Die: If we're trying to acquire a lock already held by an older
  365. * context, kill ourselves.
  366. *
  367. * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
  368. * look at waiters before us in the wait-list.
  369. */
  370. static inline int
  371. __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
  372. struct ww_acquire_ctx *ctx)
  373. {
  374. struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
  375. struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
  376. struct MUTEX_WAITER *cur;
  377. if (ctx->acquired == 0)
  378. return 0;
  379. if (!ctx->is_wait_die) {
  380. if (ctx->wounded)
  381. return __ww_mutex_kill(lock, ctx);
  382. return 0;
  383. }
  384. if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
  385. return __ww_mutex_kill(lock, ctx);
  386. /*
  387. * If there is a waiter in front of us that has a context, then its
  388. * stamp is earlier than ours and we must kill ourself.
  389. */
  390. for (cur = __ww_waiter_prev(lock, waiter); cur;
  391. cur = __ww_waiter_prev(lock, cur)) {
  392. if (!cur->ww_ctx)
  393. continue;
  394. return __ww_mutex_kill(lock, ctx);
  395. }
  396. return 0;
  397. }
  398. /*
  399. * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
  400. * first. Such that older contexts are preferred to acquire the lock over
  401. * younger contexts.
  402. *
  403. * Waiters without context are interspersed in FIFO order.
  404. *
  405. * Furthermore, for Wait-Die kill ourself immediately when possible (there are
  406. * older contexts already waiting) to avoid unnecessary waiting and for
  407. * Wound-Wait ensure we wound the owning context when it is younger.
  408. */
  409. static inline int
  410. __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
  411. struct MUTEX *lock,
  412. struct ww_acquire_ctx *ww_ctx)
  413. {
  414. struct MUTEX_WAITER *cur, *pos = NULL;
  415. bool is_wait_die;
  416. if (!ww_ctx) {
  417. __ww_waiter_add(lock, waiter, NULL);
  418. return 0;
  419. }
  420. is_wait_die = ww_ctx->is_wait_die;
  421. /*
  422. * Add the waiter before the first waiter with a higher stamp.
  423. * Waiters without a context are skipped to avoid starving
  424. * them. Wait-Die waiters may die here. Wound-Wait waiters
  425. * never die here, but they are sorted in stamp order and
  426. * may wound the lock holder.
  427. */
  428. for (cur = __ww_waiter_last(lock); cur;
  429. cur = __ww_waiter_prev(lock, cur)) {
  430. if (!cur->ww_ctx)
  431. continue;
  432. if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
  433. /*
  434. * Wait-Die: if we find an older context waiting, there
  435. * is no point in queueing behind it, as we'd have to
  436. * die the moment it would acquire the lock.
  437. */
  438. if (is_wait_die) {
  439. int ret = __ww_mutex_kill(lock, ww_ctx);
  440. if (ret)
  441. return ret;
  442. }
  443. break;
  444. }
  445. pos = cur;
  446. /* Wait-Die: ensure younger waiters die. */
  447. __ww_mutex_die(lock, cur, ww_ctx);
  448. }
  449. __ww_waiter_add(lock, waiter, pos);
  450. /*
  451. * Wound-Wait: if we're blocking on a mutex owned by a younger context,
  452. * wound that such that we might proceed.
  453. */
  454. if (!is_wait_die) {
  455. struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
  456. /*
  457. * See ww_mutex_set_context_fastpath(). Orders setting
  458. * MUTEX_FLAG_WAITERS vs the ww->ctx load,
  459. * such that either we or the fastpath will wound @ww->ctx.
  460. */
  461. smp_mb();
  462. __ww_mutex_wound(lock, ww_ctx, ww->ctx);
  463. }
  464. return 0;
  465. }
  466. static inline void __ww_mutex_unlock(struct ww_mutex *lock)
  467. {
  468. if (lock->ctx) {
  469. #ifdef DEBUG_WW_MUTEXES
  470. DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
  471. #endif
  472. if (lock->ctx->acquired > 0)
  473. lock->ctx->acquired--;
  474. lock->ctx = NULL;
  475. }
  476. }