requeue.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/sched/signal.h>
  3. #include "futex.h"
  4. #include "../locking/rtmutex_common.h"
  5. /*
  6. * On PREEMPT_RT, the hash bucket lock is a 'sleeping' spinlock with an
  7. * underlying rtmutex. The task which is about to be requeued could have
  8. * just woken up (timeout, signal). After the wake up the task has to
  9. * acquire hash bucket lock, which is held by the requeue code. As a task
  10. * can only be blocked on _ONE_ rtmutex at a time, the proxy lock blocking
  11. * and the hash bucket lock blocking would collide and corrupt state.
  12. *
  13. * On !PREEMPT_RT this is not a problem and everything could be serialized
  14. * on hash bucket lock, but aside of having the benefit of common code,
  15. * this allows to avoid doing the requeue when the task is already on the
  16. * way out and taking the hash bucket lock of the original uaddr1 when the
  17. * requeue has been completed.
  18. *
  19. * The following state transitions are valid:
  20. *
  21. * On the waiter side:
  22. * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_IGNORE
  23. * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_WAIT
  24. *
  25. * On the requeue side:
  26. * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_INPROGRESS
  27. * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_DONE/LOCKED
  28. * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_NONE (requeue failed)
  29. * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_DONE/LOCKED
  30. * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_IGNORE (requeue failed)
  31. *
  32. * The requeue side ignores a waiter with state Q_REQUEUE_PI_IGNORE as this
  33. * signals that the waiter is already on the way out. It also means that
  34. * the waiter is still on the 'wait' futex, i.e. uaddr1.
  35. *
  36. * The waiter side signals early wakeup to the requeue side either through
  37. * setting state to Q_REQUEUE_PI_IGNORE or to Q_REQUEUE_PI_WAIT depending
  38. * on the current state. In case of Q_REQUEUE_PI_IGNORE it can immediately
  39. * proceed to take the hash bucket lock of uaddr1. If it set state to WAIT,
  40. * which means the wakeup is interleaving with a requeue in progress it has
  41. * to wait for the requeue side to change the state. Either to DONE/LOCKED
  42. * or to IGNORE. DONE/LOCKED means the waiter q is now on the uaddr2 futex
  43. * and either blocked (DONE) or has acquired it (LOCKED). IGNORE is set by
  44. * the requeue side when the requeue attempt failed via deadlock detection
  45. * and therefore the waiter q is still on the uaddr1 futex.
  46. */
  47. enum {
  48. Q_REQUEUE_PI_NONE = 0,
  49. Q_REQUEUE_PI_IGNORE,
  50. Q_REQUEUE_PI_IN_PROGRESS,
  51. Q_REQUEUE_PI_WAIT,
  52. Q_REQUEUE_PI_DONE,
  53. Q_REQUEUE_PI_LOCKED,
  54. };
  55. const struct futex_q futex_q_init = {
  56. /* list gets initialized in futex_queue()*/
  57. .key = FUTEX_KEY_INIT,
  58. .bitset = FUTEX_BITSET_MATCH_ANY,
  59. .requeue_state = ATOMIC_INIT(Q_REQUEUE_PI_NONE),
  60. };
  61. /**
  62. * requeue_futex() - Requeue a futex_q from one hb to another
  63. * @q: the futex_q to requeue
  64. * @hb1: the source hash_bucket
  65. * @hb2: the target hash_bucket
  66. * @key2: the new key for the requeued futex_q
  67. */
  68. static inline
  69. void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
  70. struct futex_hash_bucket *hb2, union futex_key *key2)
  71. {
  72. /*
  73. * If key1 and key2 hash to the same bucket, no need to
  74. * requeue.
  75. */
  76. if (likely(&hb1->chain != &hb2->chain)) {
  77. plist_del(&q->list, &hb1->chain);
  78. futex_hb_waiters_dec(hb1);
  79. futex_hb_waiters_inc(hb2);
  80. plist_add(&q->list, &hb2->chain);
  81. q->lock_ptr = &hb2->lock;
  82. }
  83. q->key = *key2;
  84. }
  85. static inline bool futex_requeue_pi_prepare(struct futex_q *q,
  86. struct futex_pi_state *pi_state)
  87. {
  88. int old, new;
  89. /*
  90. * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has
  91. * already set Q_REQUEUE_PI_IGNORE to signal that requeue should
  92. * ignore the waiter.
  93. */
  94. old = atomic_read_acquire(&q->requeue_state);
  95. do {
  96. if (old == Q_REQUEUE_PI_IGNORE)
  97. return false;
  98. /*
  99. * futex_proxy_trylock_atomic() might have set it to
  100. * IN_PROGRESS and a interleaved early wake to WAIT.
  101. *
  102. * It was considered to have an extra state for that
  103. * trylock, but that would just add more conditionals
  104. * all over the place for a dubious value.
  105. */
  106. if (old != Q_REQUEUE_PI_NONE)
  107. break;
  108. new = Q_REQUEUE_PI_IN_PROGRESS;
  109. } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
  110. q->pi_state = pi_state;
  111. return true;
  112. }
  113. static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
  114. {
  115. int old, new;
  116. old = atomic_read_acquire(&q->requeue_state);
  117. do {
  118. if (old == Q_REQUEUE_PI_IGNORE)
  119. return;
  120. if (locked >= 0) {
  121. /* Requeue succeeded. Set DONE or LOCKED */
  122. WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS &&
  123. old != Q_REQUEUE_PI_WAIT);
  124. new = Q_REQUEUE_PI_DONE + locked;
  125. } else if (old == Q_REQUEUE_PI_IN_PROGRESS) {
  126. /* Deadlock, no early wakeup interleave */
  127. new = Q_REQUEUE_PI_NONE;
  128. } else {
  129. /* Deadlock, early wakeup interleave. */
  130. WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT);
  131. new = Q_REQUEUE_PI_IGNORE;
  132. }
  133. } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
  134. #ifdef CONFIG_PREEMPT_RT
  135. /* If the waiter interleaved with the requeue let it know */
  136. if (unlikely(old == Q_REQUEUE_PI_WAIT))
  137. rcuwait_wake_up(&q->requeue_wait);
  138. #endif
  139. }
  140. static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
  141. {
  142. int old, new;
  143. old = atomic_read_acquire(&q->requeue_state);
  144. do {
  145. /* Is requeue done already? */
  146. if (old >= Q_REQUEUE_PI_DONE)
  147. return old;
  148. /*
  149. * If not done, then tell the requeue code to either ignore
  150. * the waiter or to wake it up once the requeue is done.
  151. */
  152. new = Q_REQUEUE_PI_WAIT;
  153. if (old == Q_REQUEUE_PI_NONE)
  154. new = Q_REQUEUE_PI_IGNORE;
  155. } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
  156. /* If the requeue was in progress, wait for it to complete */
  157. if (old == Q_REQUEUE_PI_IN_PROGRESS) {
  158. #ifdef CONFIG_PREEMPT_RT
  159. rcuwait_wait_event(&q->requeue_wait,
  160. atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT,
  161. TASK_UNINTERRUPTIBLE);
  162. #else
  163. (void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT);
  164. #endif
  165. }
  166. /*
  167. * Requeue is now either prohibited or complete. Reread state
  168. * because during the wait above it might have changed. Nothing
  169. * will modify q->requeue_state after this point.
  170. */
  171. return atomic_read(&q->requeue_state);
  172. }
  173. /**
  174. * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
  175. * @q: the futex_q
  176. * @key: the key of the requeue target futex
  177. * @hb: the hash_bucket of the requeue target futex
  178. *
  179. * During futex_requeue, with requeue_pi=1, it is possible to acquire the
  180. * target futex if it is uncontended or via a lock steal.
  181. *
  182. * 1) Set @q::key to the requeue target futex key so the waiter can detect
  183. * the wakeup on the right futex.
  184. *
  185. * 2) Dequeue @q from the hash bucket.
  186. *
  187. * 3) Set @q::rt_waiter to NULL so the woken up task can detect atomic lock
  188. * acquisition.
  189. *
  190. * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that
  191. * the waiter has to fixup the pi state.
  192. *
  193. * 5) Complete the requeue state so the waiter can make progress. After
  194. * this point the waiter task can return from the syscall immediately in
  195. * case that the pi state does not have to be fixed up.
  196. *
  197. * 6) Wake the waiter task.
  198. *
  199. * Must be called with both q->lock_ptr and hb->lock held.
  200. */
  201. static inline
  202. void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
  203. struct futex_hash_bucket *hb)
  204. {
  205. q->key = *key;
  206. __futex_unqueue(q);
  207. WARN_ON(!q->rt_waiter);
  208. q->rt_waiter = NULL;
  209. q->lock_ptr = &hb->lock;
  210. /* Signal locked state to the waiter */
  211. futex_requeue_pi_complete(q, 1);
  212. wake_up_state(q->task, TASK_NORMAL);
  213. }
  214. /**
  215. * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
  216. * @pifutex: the user address of the to futex
  217. * @hb1: the from futex hash bucket, must be locked by the caller
  218. * @hb2: the to futex hash bucket, must be locked by the caller
  219. * @key1: the from futex key
  220. * @key2: the to futex key
  221. * @ps: address to store the pi_state pointer
  222. * @exiting: Pointer to store the task pointer of the owner task
  223. * which is in the middle of exiting
  224. * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
  225. *
  226. * Try and get the lock on behalf of the top waiter if we can do it atomically.
  227. * Wake the top waiter if we succeed. If the caller specified set_waiters,
  228. * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
  229. * hb1 and hb2 must be held by the caller.
  230. *
  231. * @exiting is only set when the return value is -EBUSY. If so, this holds
  232. * a refcount on the exiting task on return and the caller needs to drop it
  233. * after waiting for the exit to complete.
  234. *
  235. * Return:
  236. * - 0 - failed to acquire the lock atomically;
  237. * - >0 - acquired the lock, return value is vpid of the top_waiter
  238. * - <0 - error
  239. */
  240. static int
  241. futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
  242. struct futex_hash_bucket *hb2, union futex_key *key1,
  243. union futex_key *key2, struct futex_pi_state **ps,
  244. struct task_struct **exiting, int set_waiters)
  245. {
  246. struct futex_q *top_waiter = NULL;
  247. u32 curval;
  248. int ret;
  249. if (futex_get_value_locked(&curval, pifutex))
  250. return -EFAULT;
  251. if (unlikely(should_fail_futex(true)))
  252. return -EFAULT;
  253. /*
  254. * Find the top_waiter and determine if there are additional waiters.
  255. * If the caller intends to requeue more than 1 waiter to pifutex,
  256. * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
  257. * as we have means to handle the possible fault. If not, don't set
  258. * the bit unnecessarily as it will force the subsequent unlock to enter
  259. * the kernel.
  260. */
  261. top_waiter = futex_top_waiter(hb1, key1);
  262. /* There are no waiters, nothing for us to do. */
  263. if (!top_waiter)
  264. return 0;
  265. /*
  266. * Ensure that this is a waiter sitting in futex_wait_requeue_pi()
  267. * and waiting on the 'waitqueue' futex which is always !PI.
  268. */
  269. if (!top_waiter->rt_waiter || top_waiter->pi_state)
  270. return -EINVAL;
  271. /* Ensure we requeue to the expected futex. */
  272. if (!futex_match(top_waiter->requeue_pi_key, key2))
  273. return -EINVAL;
  274. /* Ensure that this does not race against an early wakeup */
  275. if (!futex_requeue_pi_prepare(top_waiter, NULL))
  276. return -EAGAIN;
  277. /*
  278. * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit
  279. * in the contended case or if @set_waiters is true.
  280. *
  281. * In the contended case PI state is attached to the lock owner. If
  282. * the user space lock can be acquired then PI state is attached to
  283. * the new owner (@top_waiter->task) when @set_waiters is true.
  284. */
  285. ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
  286. exiting, set_waiters);
  287. if (ret == 1) {
  288. /*
  289. * Lock was acquired in user space and PI state was
  290. * attached to @top_waiter->task. That means state is fully
  291. * consistent and the waiter can return to user space
  292. * immediately after the wakeup.
  293. */
  294. requeue_pi_wake_futex(top_waiter, key2, hb2);
  295. } else if (ret < 0) {
  296. /* Rewind top_waiter::requeue_state */
  297. futex_requeue_pi_complete(top_waiter, ret);
  298. } else {
  299. /*
  300. * futex_lock_pi_atomic() did not acquire the user space
  301. * futex, but managed to establish the proxy lock and pi
  302. * state. top_waiter::requeue_state cannot be fixed up here
  303. * because the waiter is not enqueued on the rtmutex
  304. * yet. This is handled at the callsite depending on the
  305. * result of rt_mutex_start_proxy_lock() which is
  306. * guaranteed to be reached with this function returning 0.
  307. */
  308. }
  309. return ret;
  310. }
  311. /**
  312. * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
  313. * @uaddr1: source futex user address
  314. * @flags: futex flags (FLAGS_SHARED, etc.)
  315. * @uaddr2: target futex user address
  316. * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
  317. * @nr_requeue: number of waiters to requeue (0-INT_MAX)
  318. * @cmpval: @uaddr1 expected value (or %NULL)
  319. * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
  320. * pi futex (pi to pi requeue is not supported)
  321. *
  322. * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
  323. * uaddr2 atomically on behalf of the top waiter.
  324. *
  325. * Return:
  326. * - >=0 - on success, the number of tasks requeued or woken;
  327. * - <0 - on error
  328. */
  329. int futex_requeue(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
  330. int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi)
  331. {
  332. union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
  333. int task_count = 0, ret;
  334. struct futex_pi_state *pi_state = NULL;
  335. struct futex_hash_bucket *hb1, *hb2;
  336. struct futex_q *this, *next;
  337. DEFINE_WAKE_Q(wake_q);
  338. if (nr_wake < 0 || nr_requeue < 0)
  339. return -EINVAL;
  340. /*
  341. * When PI not supported: return -ENOSYS if requeue_pi is true,
  342. * consequently the compiler knows requeue_pi is always false past
  343. * this point which will optimize away all the conditional code
  344. * further down.
  345. */
  346. if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
  347. return -ENOSYS;
  348. if (requeue_pi) {
  349. /*
  350. * Requeue PI only works on two distinct uaddrs. This
  351. * check is only valid for private futexes. See below.
  352. */
  353. if (uaddr1 == uaddr2)
  354. return -EINVAL;
  355. /*
  356. * futex_requeue() allows the caller to define the number
  357. * of waiters to wake up via the @nr_wake argument. With
  358. * REQUEUE_PI, waking up more than one waiter is creating
  359. * more problems than it solves. Waking up a waiter makes
  360. * only sense if the PI futex @uaddr2 is uncontended as
  361. * this allows the requeue code to acquire the futex
  362. * @uaddr2 before waking the waiter. The waiter can then
  363. * return to user space without further action. A secondary
  364. * wakeup would just make the futex_wait_requeue_pi()
  365. * handling more complex, because that code would have to
  366. * look up pi_state and do more or less all the handling
  367. * which the requeue code has to do for the to be requeued
  368. * waiters. So restrict the number of waiters to wake to
  369. * one, and only wake it up when the PI futex is
  370. * uncontended. Otherwise requeue it and let the unlock of
  371. * the PI futex handle the wakeup.
  372. *
  373. * All REQUEUE_PI users, e.g. pthread_cond_signal() and
  374. * pthread_cond_broadcast() must use nr_wake=1.
  375. */
  376. if (nr_wake != 1)
  377. return -EINVAL;
  378. /*
  379. * requeue_pi requires a pi_state, try to allocate it now
  380. * without any locks in case it fails.
  381. */
  382. if (refill_pi_state_cache())
  383. return -ENOMEM;
  384. }
  385. retry:
  386. ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
  387. if (unlikely(ret != 0))
  388. return ret;
  389. ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
  390. requeue_pi ? FUTEX_WRITE : FUTEX_READ);
  391. if (unlikely(ret != 0))
  392. return ret;
  393. /*
  394. * The check above which compares uaddrs is not sufficient for
  395. * shared futexes. We need to compare the keys:
  396. */
  397. if (requeue_pi && futex_match(&key1, &key2))
  398. return -EINVAL;
  399. hb1 = futex_hash(&key1);
  400. hb2 = futex_hash(&key2);
  401. retry_private:
  402. futex_hb_waiters_inc(hb2);
  403. double_lock_hb(hb1, hb2);
  404. if (likely(cmpval != NULL)) {
  405. u32 curval;
  406. ret = futex_get_value_locked(&curval, uaddr1);
  407. if (unlikely(ret)) {
  408. double_unlock_hb(hb1, hb2);
  409. futex_hb_waiters_dec(hb2);
  410. ret = get_user(curval, uaddr1);
  411. if (ret)
  412. return ret;
  413. if (!(flags & FLAGS_SHARED))
  414. goto retry_private;
  415. goto retry;
  416. }
  417. if (curval != *cmpval) {
  418. ret = -EAGAIN;
  419. goto out_unlock;
  420. }
  421. }
  422. if (requeue_pi) {
  423. struct task_struct *exiting = NULL;
  424. /*
  425. * Attempt to acquire uaddr2 and wake the top waiter. If we
  426. * intend to requeue waiters, force setting the FUTEX_WAITERS
  427. * bit. We force this here where we are able to easily handle
  428. * faults rather in the requeue loop below.
  429. *
  430. * Updates topwaiter::requeue_state if a top waiter exists.
  431. */
  432. ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
  433. &key2, &pi_state,
  434. &exiting, nr_requeue);
  435. /*
  436. * At this point the top_waiter has either taken uaddr2 or
  437. * is waiting on it. In both cases pi_state has been
  438. * established and an initial refcount on it. In case of an
  439. * error there's nothing.
  440. *
  441. * The top waiter's requeue_state is up to date:
  442. *
  443. * - If the lock was acquired atomically (ret == 1), then
  444. * the state is Q_REQUEUE_PI_LOCKED.
  445. *
  446. * The top waiter has been dequeued and woken up and can
  447. * return to user space immediately. The kernel/user
  448. * space state is consistent. In case that there must be
  449. * more waiters requeued the WAITERS bit in the user
  450. * space futex is set so the top waiter task has to go
  451. * into the syscall slowpath to unlock the futex. This
  452. * will block until this requeue operation has been
  453. * completed and the hash bucket locks have been
  454. * dropped.
  455. *
  456. * - If the trylock failed with an error (ret < 0) then
  457. * the state is either Q_REQUEUE_PI_NONE, i.e. "nothing
  458. * happened", or Q_REQUEUE_PI_IGNORE when there was an
  459. * interleaved early wakeup.
  460. *
  461. * - If the trylock did not succeed (ret == 0) then the
  462. * state is either Q_REQUEUE_PI_IN_PROGRESS or
  463. * Q_REQUEUE_PI_WAIT if an early wakeup interleaved.
  464. * This will be cleaned up in the loop below, which
  465. * cannot fail because futex_proxy_trylock_atomic() did
  466. * the same sanity checks for requeue_pi as the loop
  467. * below does.
  468. */
  469. switch (ret) {
  470. case 0:
  471. /* We hold a reference on the pi state. */
  472. break;
  473. case 1:
  474. /*
  475. * futex_proxy_trylock_atomic() acquired the user space
  476. * futex. Adjust task_count.
  477. */
  478. task_count++;
  479. ret = 0;
  480. break;
  481. /*
  482. * If the above failed, then pi_state is NULL and
  483. * waiter::requeue_state is correct.
  484. */
  485. case -EFAULT:
  486. double_unlock_hb(hb1, hb2);
  487. futex_hb_waiters_dec(hb2);
  488. ret = fault_in_user_writeable(uaddr2);
  489. if (!ret)
  490. goto retry;
  491. return ret;
  492. case -EBUSY:
  493. case -EAGAIN:
  494. /*
  495. * Two reasons for this:
  496. * - EBUSY: Owner is exiting and we just wait for the
  497. * exit to complete.
  498. * - EAGAIN: The user space value changed.
  499. */
  500. double_unlock_hb(hb1, hb2);
  501. futex_hb_waiters_dec(hb2);
  502. /*
  503. * Handle the case where the owner is in the middle of
  504. * exiting. Wait for the exit to complete otherwise
  505. * this task might loop forever, aka. live lock.
  506. */
  507. wait_for_owner_exiting(ret, exiting);
  508. cond_resched();
  509. goto retry;
  510. default:
  511. goto out_unlock;
  512. }
  513. }
  514. plist_for_each_entry_safe(this, next, &hb1->chain, list) {
  515. if (task_count - nr_wake >= nr_requeue)
  516. break;
  517. if (!futex_match(&this->key, &key1))
  518. continue;
  519. /*
  520. * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always
  521. * be paired with each other and no other futex ops.
  522. *
  523. * We should never be requeueing a futex_q with a pi_state,
  524. * which is awaiting a futex_unlock_pi().
  525. */
  526. if ((requeue_pi && !this->rt_waiter) ||
  527. (!requeue_pi && this->rt_waiter) ||
  528. this->pi_state) {
  529. ret = -EINVAL;
  530. break;
  531. }
  532. /* Plain futexes just wake or requeue and are done */
  533. if (!requeue_pi) {
  534. if (++task_count <= nr_wake)
  535. futex_wake_mark(&wake_q, this);
  536. else
  537. requeue_futex(this, hb1, hb2, &key2);
  538. continue;
  539. }
  540. /* Ensure we requeue to the expected futex for requeue_pi. */
  541. if (!futex_match(this->requeue_pi_key, &key2)) {
  542. ret = -EINVAL;
  543. break;
  544. }
  545. /*
  546. * Requeue nr_requeue waiters and possibly one more in the case
  547. * of requeue_pi if we couldn't acquire the lock atomically.
  548. *
  549. * Prepare the waiter to take the rt_mutex. Take a refcount
  550. * on the pi_state and store the pointer in the futex_q
  551. * object of the waiter.
  552. */
  553. get_pi_state(pi_state);
  554. /* Don't requeue when the waiter is already on the way out. */
  555. if (!futex_requeue_pi_prepare(this, pi_state)) {
  556. /*
  557. * Early woken waiter signaled that it is on the
  558. * way out. Drop the pi_state reference and try the
  559. * next waiter. @this->pi_state is still NULL.
  560. */
  561. put_pi_state(pi_state);
  562. continue;
  563. }
  564. ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
  565. this->rt_waiter,
  566. this->task);
  567. if (ret == 1) {
  568. /*
  569. * We got the lock. We do neither drop the refcount
  570. * on pi_state nor clear this->pi_state because the
  571. * waiter needs the pi_state for cleaning up the
  572. * user space value. It will drop the refcount
  573. * after doing so. this::requeue_state is updated
  574. * in the wakeup as well.
  575. */
  576. requeue_pi_wake_futex(this, &key2, hb2);
  577. task_count++;
  578. } else if (!ret) {
  579. /* Waiter is queued, move it to hb2 */
  580. requeue_futex(this, hb1, hb2, &key2);
  581. futex_requeue_pi_complete(this, 0);
  582. task_count++;
  583. } else {
  584. /*
  585. * rt_mutex_start_proxy_lock() detected a potential
  586. * deadlock when we tried to queue that waiter.
  587. * Drop the pi_state reference which we took above
  588. * and remove the pointer to the state from the
  589. * waiters futex_q object.
  590. */
  591. this->pi_state = NULL;
  592. put_pi_state(pi_state);
  593. futex_requeue_pi_complete(this, ret);
  594. /*
  595. * We stop queueing more waiters and let user space
  596. * deal with the mess.
  597. */
  598. break;
  599. }
  600. }
  601. /*
  602. * We took an extra initial reference to the pi_state in
  603. * futex_proxy_trylock_atomic(). We need to drop it here again.
  604. */
  605. put_pi_state(pi_state);
  606. out_unlock:
  607. double_unlock_hb(hb1, hb2);
  608. wake_up_q(&wake_q);
  609. futex_hb_waiters_dec(hb2);
  610. return ret ? ret : task_count;
  611. }
  612. /**
  613. * handle_early_requeue_pi_wakeup() - Handle early wakeup on the initial futex
  614. * @hb: the hash_bucket futex_q was original enqueued on
  615. * @q: the futex_q woken while waiting to be requeued
  616. * @timeout: the timeout associated with the wait (NULL if none)
  617. *
  618. * Determine the cause for the early wakeup.
  619. *
  620. * Return:
  621. * -EWOULDBLOCK or -ETIMEDOUT or -ERESTARTNOINTR
  622. */
  623. static inline
  624. int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
  625. struct futex_q *q,
  626. struct hrtimer_sleeper *timeout)
  627. {
  628. int ret;
  629. /*
  630. * With the hb lock held, we avoid races while we process the wakeup.
  631. * We only need to hold hb (and not hb2) to ensure atomicity as the
  632. * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
  633. * It can't be requeued from uaddr2 to something else since we don't
  634. * support a PI aware source futex for requeue.
  635. */
  636. WARN_ON_ONCE(&hb->lock != q->lock_ptr);
  637. /*
  638. * We were woken prior to requeue by a timeout or a signal.
  639. * Unqueue the futex_q and determine which it was.
  640. */
  641. plist_del(&q->list, &hb->chain);
  642. futex_hb_waiters_dec(hb);
  643. /* Handle spurious wakeups gracefully */
  644. ret = -EWOULDBLOCK;
  645. if (timeout && !timeout->task)
  646. ret = -ETIMEDOUT;
  647. else if (signal_pending(current))
  648. ret = -ERESTARTNOINTR;
  649. return ret;
  650. }
  651. /**
  652. * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
  653. * @uaddr: the futex we initially wait on (non-pi)
  654. * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
  655. * the same type, no requeueing from private to shared, etc.
  656. * @val: the expected value of uaddr
  657. * @abs_time: absolute timeout
  658. * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
  659. * @uaddr2: the pi futex we will take prior to returning to user-space
  660. *
  661. * The caller will wait on uaddr and will be requeued by futex_requeue() to
  662. * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
  663. * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
  664. * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
  665. * without one, the pi logic would not know which task to boost/deboost, if
  666. * there was a need to.
  667. *
  668. * We call schedule in futex_wait_queue() when we enqueue and return there
  669. * via the following--
  670. * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
  671. * 2) wakeup on uaddr2 after a requeue
  672. * 3) signal
  673. * 4) timeout
  674. *
  675. * If 3, cleanup and return -ERESTARTNOINTR.
  676. *
  677. * If 2, we may then block on trying to take the rt_mutex and return via:
  678. * 5) successful lock
  679. * 6) signal
  680. * 7) timeout
  681. * 8) other lock acquisition failure
  682. *
  683. * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
  684. *
  685. * If 4 or 7, we cleanup and return with -ETIMEDOUT.
  686. *
  687. * Return:
  688. * - 0 - On success;
  689. * - <0 - On error
  690. */
  691. int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  692. u32 val, ktime_t *abs_time, u32 bitset,
  693. u32 __user *uaddr2)
  694. {
  695. struct hrtimer_sleeper timeout, *to;
  696. struct rt_mutex_waiter rt_waiter;
  697. struct futex_hash_bucket *hb;
  698. union futex_key key2 = FUTEX_KEY_INIT;
  699. struct futex_q q = futex_q_init;
  700. struct rt_mutex_base *pi_mutex;
  701. int res, ret;
  702. if (!IS_ENABLED(CONFIG_FUTEX_PI))
  703. return -ENOSYS;
  704. if (uaddr == uaddr2)
  705. return -EINVAL;
  706. if (!bitset)
  707. return -EINVAL;
  708. to = futex_setup_timer(abs_time, &timeout, flags,
  709. current->timer_slack_ns);
  710. /*
  711. * The waiter is allocated on our stack, manipulated by the requeue
  712. * code while we sleep on uaddr.
  713. */
  714. rt_mutex_init_waiter(&rt_waiter);
  715. ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
  716. if (unlikely(ret != 0))
  717. goto out;
  718. q.bitset = bitset;
  719. q.rt_waiter = &rt_waiter;
  720. q.requeue_pi_key = &key2;
  721. /*
  722. * Prepare to wait on uaddr. On success, it holds hb->lock and q
  723. * is initialized.
  724. */
  725. ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
  726. if (ret)
  727. goto out;
  728. /*
  729. * The check above which compares uaddrs is not sufficient for
  730. * shared futexes. We need to compare the keys:
  731. */
  732. if (futex_match(&q.key, &key2)) {
  733. futex_q_unlock(hb);
  734. ret = -EINVAL;
  735. goto out;
  736. }
  737. /* Queue the futex_q, drop the hb lock, wait for wakeup. */
  738. futex_wait_queue(hb, &q, to);
  739. switch (futex_requeue_pi_wakeup_sync(&q)) {
  740. case Q_REQUEUE_PI_IGNORE:
  741. /* The waiter is still on uaddr1 */
  742. spin_lock(&hb->lock);
  743. ret = handle_early_requeue_pi_wakeup(hb, &q, to);
  744. spin_unlock(&hb->lock);
  745. break;
  746. case Q_REQUEUE_PI_LOCKED:
  747. /* The requeue acquired the lock */
  748. if (q.pi_state && (q.pi_state->owner != current)) {
  749. spin_lock(q.lock_ptr);
  750. ret = fixup_pi_owner(uaddr2, &q, true);
  751. /*
  752. * Drop the reference to the pi state which the
  753. * requeue_pi() code acquired for us.
  754. */
  755. put_pi_state(q.pi_state);
  756. spin_unlock(q.lock_ptr);
  757. /*
  758. * Adjust the return value. It's either -EFAULT or
  759. * success (1) but the caller expects 0 for success.
  760. */
  761. ret = ret < 0 ? ret : 0;
  762. }
  763. break;
  764. case Q_REQUEUE_PI_DONE:
  765. /* Requeue completed. Current is 'pi_blocked_on' the rtmutex */
  766. pi_mutex = &q.pi_state->pi_mutex;
  767. ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
  768. /* Current is not longer pi_blocked_on */
  769. spin_lock(q.lock_ptr);
  770. if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
  771. ret = 0;
  772. debug_rt_mutex_free_waiter(&rt_waiter);
  773. /*
  774. * Fixup the pi_state owner and possibly acquire the lock if we
  775. * haven't already.
  776. */
  777. res = fixup_pi_owner(uaddr2, &q, !ret);
  778. /*
  779. * If fixup_pi_owner() returned an error, propagate that. If it
  780. * acquired the lock, clear -ETIMEDOUT or -EINTR.
  781. */
  782. if (res)
  783. ret = (res < 0) ? res : 0;
  784. futex_unqueue_pi(&q);
  785. spin_unlock(q.lock_ptr);
  786. if (ret == -EINTR) {
  787. /*
  788. * We've already been requeued, but cannot restart
  789. * by calling futex_lock_pi() directly. We could
  790. * restart this syscall, but it would detect that
  791. * the user space "val" changed and return
  792. * -EWOULDBLOCK. Save the overhead of the restart
  793. * and return -EWOULDBLOCK directly.
  794. */
  795. ret = -EWOULDBLOCK;
  796. }
  797. break;
  798. default:
  799. BUG();
  800. }
  801. out:
  802. if (to) {
  803. hrtimer_cancel(&to->timer);
  804. destroy_hrtimer_on_stack(&to->timer);
  805. }
  806. return ret;
  807. }