mutex.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * kernel/locking/mutex.c
  4. *
  5. * Mutexes: blocking mutual exclusion locks
  6. *
  7. * Started by Ingo Molnar:
  8. *
  9. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <[email protected]>
  10. *
  11. * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  12. * David Howells for suggestions and improvements.
  13. *
  14. * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  15. * from the -rt tree, where it was originally implemented for rtmutexes
  16. * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  17. * and Sven Dietrich.
  18. *
  19. * Also see Documentation/locking/mutex-design.rst.
  20. */
  21. #include <linux/mutex.h>
  22. #include <linux/ww_mutex.h>
  23. #include <linux/sched/signal.h>
  24. #include <linux/sched/rt.h>
  25. #include <linux/sched/wake_q.h>
  26. #include <linux/sched/debug.h>
  27. #include <linux/export.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/debug_locks.h>
  31. #include <linux/osq_lock.h>
  32. #define CREATE_TRACE_POINTS
  33. #include <trace/events/lock.h>
  34. #undef CREATE_TRACE_POINTS
  35. #include <trace/hooks/dtask.h>
  36. #ifndef CONFIG_PREEMPT_RT
  37. #include "mutex.h"
  38. #ifdef CONFIG_DEBUG_MUTEXES
  39. # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
  40. #else
  41. # define MUTEX_WARN_ON(cond)
  42. #endif
  43. void
  44. __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  45. {
  46. atomic_long_set(&lock->owner, 0);
  47. raw_spin_lock_init(&lock->wait_lock);
  48. INIT_LIST_HEAD(&lock->wait_list);
  49. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  50. osq_lock_init(&lock->osq);
  51. #endif
  52. trace_android_vh_mutex_init(lock);
  53. debug_mutex_init(lock, name, key);
  54. }
  55. EXPORT_SYMBOL(__mutex_init);
  56. /*
  57. * @owner: contains: 'struct task_struct *' to the current lock owner,
  58. * NULL means not owned. Since task_struct pointers are aligned at
  59. * at least L1_CACHE_BYTES, we have low bits to store extra state.
  60. *
  61. * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
  62. * Bit1 indicates unlock needs to hand the lock to the top-waiter
  63. * Bit2 indicates handoff has been done and we're waiting for pickup.
  64. */
  65. #define MUTEX_FLAG_WAITERS 0x01
  66. #define MUTEX_FLAG_HANDOFF 0x02
  67. #define MUTEX_FLAG_PICKUP 0x04
  68. #define MUTEX_FLAGS 0x07
  69. /*
  70. * Internal helper function; C doesn't allow us to hide it :/
  71. *
  72. * DO NOT USE (outside of mutex code).
  73. */
  74. static inline struct task_struct *__mutex_owner(struct mutex *lock)
  75. {
  76. return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
  77. }
  78. static inline struct task_struct *__owner_task(unsigned long owner)
  79. {
  80. return (struct task_struct *)(owner & ~MUTEX_FLAGS);
  81. }
  82. bool mutex_is_locked(struct mutex *lock)
  83. {
  84. return __mutex_owner(lock) != NULL;
  85. }
  86. EXPORT_SYMBOL(mutex_is_locked);
  87. static inline unsigned long __owner_flags(unsigned long owner)
  88. {
  89. return owner & MUTEX_FLAGS;
  90. }
  91. /*
  92. * Returns: __mutex_owner(lock) on failure or NULL on success.
  93. */
  94. static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
  95. {
  96. unsigned long owner, curr = (unsigned long)current;
  97. owner = atomic_long_read(&lock->owner);
  98. for (;;) { /* must loop, can race against a flag */
  99. unsigned long flags = __owner_flags(owner);
  100. unsigned long task = owner & ~MUTEX_FLAGS;
  101. if (task) {
  102. if (flags & MUTEX_FLAG_PICKUP) {
  103. if (task != curr)
  104. break;
  105. flags &= ~MUTEX_FLAG_PICKUP;
  106. } else if (handoff) {
  107. if (flags & MUTEX_FLAG_HANDOFF)
  108. break;
  109. flags |= MUTEX_FLAG_HANDOFF;
  110. } else {
  111. break;
  112. }
  113. } else {
  114. MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
  115. task = curr;
  116. }
  117. if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
  118. if (task == curr)
  119. return NULL;
  120. break;
  121. }
  122. }
  123. return __owner_task(owner);
  124. }
  125. /*
  126. * Trylock or set HANDOFF
  127. */
  128. static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
  129. {
  130. return !__mutex_trylock_common(lock, handoff);
  131. }
  132. /*
  133. * Actual trylock that will work on any unlocked state.
  134. */
  135. static inline bool __mutex_trylock(struct mutex *lock)
  136. {
  137. return !__mutex_trylock_common(lock, false);
  138. }
  139. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  140. /*
  141. * Lockdep annotations are contained to the slow paths for simplicity.
  142. * There is nothing that would stop spreading the lockdep annotations outwards
  143. * except more code.
  144. */
  145. /*
  146. * Optimistic trylock that only works in the uncontended case. Make sure to
  147. * follow with a __mutex_trylock() before failing.
  148. */
  149. static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
  150. {
  151. unsigned long curr = (unsigned long)current;
  152. unsigned long zero = 0UL;
  153. if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) {
  154. trace_android_vh_record_mutex_lock_starttime(current, jiffies);
  155. return true;
  156. }
  157. return false;
  158. }
  159. static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
  160. {
  161. unsigned long curr = (unsigned long)current;
  162. return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
  163. }
  164. #endif
  165. static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
  166. {
  167. atomic_long_or(flag, &lock->owner);
  168. }
  169. static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
  170. {
  171. atomic_long_andnot(flag, &lock->owner);
  172. }
  173. static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
  174. {
  175. return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
  176. }
  177. /*
  178. * Add @waiter to a given location in the lock wait_list and set the
  179. * FLAG_WAITERS flag if it's the first waiter.
  180. */
  181. static void
  182. __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
  183. struct list_head *list)
  184. {
  185. bool already_on_list = false;
  186. debug_mutex_add_waiter(lock, waiter, current);
  187. trace_android_vh_alter_mutex_list_add(lock, waiter, list, &already_on_list);
  188. if (!already_on_list)
  189. list_add_tail(&waiter->list, list);
  190. if (__mutex_waiter_is_first(lock, waiter))
  191. __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
  192. }
  193. static void
  194. __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
  195. {
  196. list_del(&waiter->list);
  197. if (likely(list_empty(&lock->wait_list)))
  198. __mutex_clear_flag(lock, MUTEX_FLAGS);
  199. debug_mutex_remove_waiter(lock, waiter, current);
  200. }
  201. /*
  202. * Give up ownership to a specific task, when @task = NULL, this is equivalent
  203. * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
  204. * WAITERS. Provides RELEASE semantics like a regular unlock, the
  205. * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
  206. */
  207. static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
  208. {
  209. unsigned long owner = atomic_long_read(&lock->owner);
  210. for (;;) {
  211. unsigned long new;
  212. MUTEX_WARN_ON(__owner_task(owner) != current);
  213. MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
  214. new = (owner & MUTEX_FLAG_WAITERS);
  215. new |= (unsigned long)task;
  216. if (task)
  217. new |= MUTEX_FLAG_PICKUP;
  218. if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
  219. break;
  220. }
  221. }
  222. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  223. /*
  224. * We split the mutex lock/unlock logic into separate fastpath and
  225. * slowpath functions, to reduce the register pressure on the fastpath.
  226. * We also put the fastpath first in the kernel image, to make sure the
  227. * branch is predicted by the CPU as default-untaken.
  228. */
  229. static void __sched __mutex_lock_slowpath(struct mutex *lock);
  230. /**
  231. * mutex_lock - acquire the mutex
  232. * @lock: the mutex to be acquired
  233. *
  234. * Lock the mutex exclusively for this task. If the mutex is not
  235. * available right now, it will sleep until it can get it.
  236. *
  237. * The mutex must later on be released by the same task that
  238. * acquired it. Recursive locking is not allowed. The task
  239. * may not exit without first unlocking the mutex. Also, kernel
  240. * memory where the mutex resides must not be freed with
  241. * the mutex still locked. The mutex must first be initialized
  242. * (or statically defined) before it can be locked. memset()-ing
  243. * the mutex to 0 is not allowed.
  244. *
  245. * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
  246. * checks that will enforce the restrictions and will also do
  247. * deadlock debugging)
  248. *
  249. * This function is similar to (but not equivalent to) down().
  250. */
  251. void __sched mutex_lock(struct mutex *lock)
  252. {
  253. might_sleep();
  254. if (!__mutex_trylock_fast(lock))
  255. __mutex_lock_slowpath(lock);
  256. }
  257. EXPORT_SYMBOL(mutex_lock);
  258. #endif
  259. #include "ww_mutex.h"
  260. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  261. /*
  262. * Trylock variant that returns the owning task on failure.
  263. */
  264. static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
  265. {
  266. return __mutex_trylock_common(lock, false);
  267. }
  268. static inline
  269. bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
  270. struct mutex_waiter *waiter)
  271. {
  272. struct ww_mutex *ww;
  273. ww = container_of(lock, struct ww_mutex, base);
  274. /*
  275. * If ww->ctx is set the contents are undefined, only
  276. * by acquiring wait_lock there is a guarantee that
  277. * they are not invalid when reading.
  278. *
  279. * As such, when deadlock detection needs to be
  280. * performed the optimistic spinning cannot be done.
  281. *
  282. * Check this in every inner iteration because we may
  283. * be racing against another thread's ww_mutex_lock.
  284. */
  285. if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
  286. return false;
  287. /*
  288. * If we aren't on the wait list yet, cancel the spin
  289. * if there are waiters. We want to avoid stealing the
  290. * lock from a waiter with an earlier stamp, since the
  291. * other thread may already own a lock that we also
  292. * need.
  293. */
  294. if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
  295. return false;
  296. /*
  297. * Similarly, stop spinning if we are no longer the
  298. * first waiter.
  299. */
  300. if (waiter && !__mutex_waiter_is_first(lock, waiter))
  301. return false;
  302. return true;
  303. }
  304. /*
  305. * Look out! "owner" is an entirely speculative pointer access and not
  306. * reliable.
  307. *
  308. * "noinline" so that this function shows up on perf profiles.
  309. */
  310. static noinline
  311. bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
  312. struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
  313. {
  314. bool ret = true;
  315. int cnt = 0;
  316. bool time_out = false;
  317. lockdep_assert_preemption_disabled();
  318. while (__mutex_owner(lock) == owner) {
  319. trace_android_vh_mutex_opt_spin_start(lock, &time_out, &cnt);
  320. if (time_out) {
  321. ret = false;
  322. break;
  323. }
  324. /*
  325. * Ensure we emit the owner->on_cpu, dereference _after_
  326. * checking lock->owner still matches owner. And we already
  327. * disabled preemption which is equal to the RCU read-side
  328. * crital section in optimistic spinning code. Thus the
  329. * task_strcut structure won't go away during the spinning
  330. * period
  331. */
  332. barrier();
  333. /*
  334. * Use vcpu_is_preempted to detect lock holder preemption issue.
  335. */
  336. if (!owner_on_cpu(owner) || need_resched()) {
  337. ret = false;
  338. break;
  339. }
  340. if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
  341. ret = false;
  342. break;
  343. }
  344. cpu_relax();
  345. }
  346. return ret;
  347. }
  348. /*
  349. * Initial check for entering the mutex spinning loop
  350. */
  351. static inline int mutex_can_spin_on_owner(struct mutex *lock)
  352. {
  353. struct task_struct *owner;
  354. int retval = 1;
  355. lockdep_assert_preemption_disabled();
  356. if (need_resched())
  357. return 0;
  358. /*
  359. * We already disabled preemption which is equal to the RCU read-side
  360. * crital section in optimistic spinning code. Thus the task_strcut
  361. * structure won't go away during the spinning period.
  362. */
  363. owner = __mutex_owner(lock);
  364. if (owner)
  365. retval = owner_on_cpu(owner);
  366. trace_android_vh_mutex_can_spin_on_owner(lock, &retval);
  367. /*
  368. * If lock->owner is not set, the mutex has been released. Return true
  369. * such that we'll trylock in the spin path, which is a faster option
  370. * than the blocking slow path.
  371. */
  372. return retval;
  373. }
  374. /*
  375. * Optimistic spinning.
  376. *
  377. * We try to spin for acquisition when we find that the lock owner
  378. * is currently running on a (different) CPU and while we don't
  379. * need to reschedule. The rationale is that if the lock owner is
  380. * running, it is likely to release the lock soon.
  381. *
  382. * The mutex spinners are queued up using MCS lock so that only one
  383. * spinner can compete for the mutex. However, if mutex spinning isn't
  384. * going to happen, there is no point in going through the lock/unlock
  385. * overhead.
  386. *
  387. * Returns true when the lock was taken, otherwise false, indicating
  388. * that we need to jump to the slowpath and sleep.
  389. *
  390. * The waiter flag is set to true if the spinner is a waiter in the wait
  391. * queue. The waiter-spinner will spin on the lock directly and concurrently
  392. * with the spinner at the head of the OSQ, if present, until the owner is
  393. * changed to itself.
  394. */
  395. static __always_inline bool
  396. mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
  397. struct mutex_waiter *waiter)
  398. {
  399. if (!waiter) {
  400. /*
  401. * The purpose of the mutex_can_spin_on_owner() function is
  402. * to eliminate the overhead of osq_lock() and osq_unlock()
  403. * in case spinning isn't possible. As a waiter-spinner
  404. * is not going to take OSQ lock anyway, there is no need
  405. * to call mutex_can_spin_on_owner().
  406. */
  407. if (!mutex_can_spin_on_owner(lock))
  408. goto fail;
  409. /*
  410. * In order to avoid a stampede of mutex spinners trying to
  411. * acquire the mutex all at once, the spinners need to take a
  412. * MCS (queued) lock first before spinning on the owner field.
  413. */
  414. if (!osq_lock(&lock->osq))
  415. goto fail;
  416. }
  417. for (;;) {
  418. struct task_struct *owner;
  419. /* Try to acquire the mutex... */
  420. owner = __mutex_trylock_or_owner(lock);
  421. if (!owner)
  422. break;
  423. /*
  424. * There's an owner, wait for it to either
  425. * release the lock or go to sleep.
  426. */
  427. if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
  428. goto fail_unlock;
  429. /*
  430. * The cpu_relax() call is a compiler barrier which forces
  431. * everything in this loop to be re-loaded. We don't need
  432. * memory barriers as we'll eventually observe the right
  433. * values at the cost of a few extra spins.
  434. */
  435. cpu_relax();
  436. }
  437. if (!waiter)
  438. osq_unlock(&lock->osq);
  439. trace_android_vh_mutex_opt_spin_finish(lock, true);
  440. return true;
  441. fail_unlock:
  442. if (!waiter)
  443. osq_unlock(&lock->osq);
  444. fail:
  445. trace_android_vh_mutex_opt_spin_finish(lock, false);
  446. /*
  447. * If we fell out of the spin path because of need_resched(),
  448. * reschedule now, before we try-lock the mutex. This avoids getting
  449. * scheduled out right after we obtained the mutex.
  450. */
  451. if (need_resched()) {
  452. /*
  453. * We _should_ have TASK_RUNNING here, but just in case
  454. * we do not, make it so, otherwise we might get stuck.
  455. */
  456. __set_current_state(TASK_RUNNING);
  457. schedule_preempt_disabled();
  458. }
  459. return false;
  460. }
  461. #else
  462. static __always_inline bool
  463. mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
  464. struct mutex_waiter *waiter)
  465. {
  466. return false;
  467. }
  468. #endif
  469. static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
  470. /**
  471. * mutex_unlock - release the mutex
  472. * @lock: the mutex to be released
  473. *
  474. * Unlock a mutex that has been locked by this task previously.
  475. *
  476. * This function must not be used in interrupt context. Unlocking
  477. * of a not locked mutex is not allowed.
  478. *
  479. * This function is similar to (but not equivalent to) up().
  480. */
  481. void __sched mutex_unlock(struct mutex *lock)
  482. {
  483. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  484. if (__mutex_unlock_fast(lock)) {
  485. trace_android_vh_record_mutex_lock_starttime(current, 0);
  486. return;
  487. }
  488. #endif
  489. __mutex_unlock_slowpath(lock, _RET_IP_);
  490. trace_android_vh_record_mutex_lock_starttime(current, 0);
  491. }
  492. EXPORT_SYMBOL(mutex_unlock);
  493. /**
  494. * ww_mutex_unlock - release the w/w mutex
  495. * @lock: the mutex to be released
  496. *
  497. * Unlock a mutex that has been locked by this task previously with any of the
  498. * ww_mutex_lock* functions (with or without an acquire context). It is
  499. * forbidden to release the locks after releasing the acquire context.
  500. *
  501. * This function must not be used in interrupt context. Unlocking
  502. * of a unlocked mutex is not allowed.
  503. */
  504. void __sched ww_mutex_unlock(struct ww_mutex *lock)
  505. {
  506. __ww_mutex_unlock(lock);
  507. mutex_unlock(&lock->base);
  508. }
  509. EXPORT_SYMBOL(ww_mutex_unlock);
  510. /*
  511. * Lock a mutex (possibly interruptible), slowpath:
  512. */
  513. static __always_inline int __sched
  514. __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
  515. struct lockdep_map *nest_lock, unsigned long ip,
  516. struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
  517. {
  518. struct mutex_waiter waiter;
  519. struct ww_mutex *ww;
  520. int ret;
  521. if (!use_ww_ctx)
  522. ww_ctx = NULL;
  523. might_sleep();
  524. MUTEX_WARN_ON(lock->magic != lock);
  525. ww = container_of(lock, struct ww_mutex, base);
  526. if (ww_ctx) {
  527. if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
  528. return -EALREADY;
  529. /*
  530. * Reset the wounded flag after a kill. No other process can
  531. * race and wound us here since they can't have a valid owner
  532. * pointer if we don't have any locks held.
  533. */
  534. if (ww_ctx->acquired == 0)
  535. ww_ctx->wounded = 0;
  536. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  537. nest_lock = &ww_ctx->dep_map;
  538. #endif
  539. }
  540. preempt_disable();
  541. mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
  542. trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
  543. if (__mutex_trylock(lock) ||
  544. mutex_optimistic_spin(lock, ww_ctx, NULL)) {
  545. /* got the lock, yay! */
  546. lock_acquired(&lock->dep_map, ip);
  547. if (ww_ctx)
  548. ww_mutex_set_context_fastpath(ww, ww_ctx);
  549. trace_contention_end(lock, 0);
  550. trace_android_vh_record_mutex_lock_starttime(current, jiffies);
  551. preempt_enable();
  552. return 0;
  553. }
  554. raw_spin_lock(&lock->wait_lock);
  555. /*
  556. * After waiting to acquire the wait_lock, try again.
  557. */
  558. if (__mutex_trylock(lock)) {
  559. if (ww_ctx)
  560. __ww_mutex_check_waiters(lock, ww_ctx);
  561. goto skip_wait;
  562. }
  563. debug_mutex_lock_common(lock, &waiter);
  564. waiter.task = current;
  565. if (use_ww_ctx)
  566. waiter.ww_ctx = ww_ctx;
  567. lock_contended(&lock->dep_map, ip);
  568. if (!use_ww_ctx) {
  569. /* add waiting tasks to the end of the waitqueue (FIFO): */
  570. __mutex_add_waiter(lock, &waiter, &lock->wait_list);
  571. } else {
  572. /*
  573. * Add in stamp order, waking up waiters that must kill
  574. * themselves.
  575. */
  576. ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
  577. if (ret)
  578. goto err_early_kill;
  579. }
  580. trace_android_vh_mutex_wait_start(lock);
  581. set_current_state(state);
  582. trace_contention_begin(lock, LCB_F_MUTEX);
  583. for (;;) {
  584. bool first;
  585. /*
  586. * Once we hold wait_lock, we're serialized against
  587. * mutex_unlock() handing the lock off to us, do a trylock
  588. * before testing the error conditions to make sure we pick up
  589. * the handoff.
  590. */
  591. if (__mutex_trylock(lock))
  592. goto acquired;
  593. /*
  594. * Check for signals and kill conditions while holding
  595. * wait_lock. This ensures the lock cancellation is ordered
  596. * against mutex_unlock() and wake-ups do not go missing.
  597. */
  598. if (signal_pending_state(state, current)) {
  599. ret = -EINTR;
  600. goto err;
  601. }
  602. if (ww_ctx) {
  603. ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
  604. if (ret)
  605. goto err;
  606. }
  607. raw_spin_unlock(&lock->wait_lock);
  608. schedule_preempt_disabled();
  609. first = __mutex_waiter_is_first(lock, &waiter);
  610. set_current_state(state);
  611. /*
  612. * Here we order against unlock; we must either see it change
  613. * state back to RUNNING and fall through the next schedule(),
  614. * or we must see its unlock and acquire.
  615. */
  616. if (__mutex_trylock_or_handoff(lock, first))
  617. break;
  618. if (first) {
  619. trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
  620. if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
  621. break;
  622. trace_contention_begin(lock, LCB_F_MUTEX);
  623. }
  624. raw_spin_lock(&lock->wait_lock);
  625. }
  626. raw_spin_lock(&lock->wait_lock);
  627. acquired:
  628. __set_current_state(TASK_RUNNING);
  629. trace_android_vh_mutex_wait_finish(lock);
  630. if (ww_ctx) {
  631. /*
  632. * Wound-Wait; we stole the lock (!first_waiter), check the
  633. * waiters as anyone might want to wound us.
  634. */
  635. if (!ww_ctx->is_wait_die &&
  636. !__mutex_waiter_is_first(lock, &waiter))
  637. __ww_mutex_check_waiters(lock, ww_ctx);
  638. }
  639. __mutex_remove_waiter(lock, &waiter);
  640. debug_mutex_free_waiter(&waiter);
  641. skip_wait:
  642. /* got the lock - cleanup and rejoice! */
  643. lock_acquired(&lock->dep_map, ip);
  644. trace_contention_end(lock, 0);
  645. if (ww_ctx)
  646. ww_mutex_lock_acquired(ww, ww_ctx);
  647. raw_spin_unlock(&lock->wait_lock);
  648. preempt_enable();
  649. trace_android_vh_record_mutex_lock_starttime(current, jiffies);
  650. return 0;
  651. err:
  652. __set_current_state(TASK_RUNNING);
  653. trace_android_vh_mutex_wait_finish(lock);
  654. __mutex_remove_waiter(lock, &waiter);
  655. err_early_kill:
  656. trace_contention_end(lock, ret);
  657. raw_spin_unlock(&lock->wait_lock);
  658. debug_mutex_free_waiter(&waiter);
  659. mutex_release(&lock->dep_map, ip);
  660. preempt_enable();
  661. return ret;
  662. }
  663. static int __sched
  664. __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
  665. struct lockdep_map *nest_lock, unsigned long ip)
  666. {
  667. return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
  668. }
  669. static int __sched
  670. __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
  671. unsigned long ip, struct ww_acquire_ctx *ww_ctx)
  672. {
  673. return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
  674. }
  675. /**
  676. * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
  677. * @ww: mutex to lock
  678. * @ww_ctx: optional w/w acquire context
  679. *
  680. * Trylocks a mutex with the optional acquire context; no deadlock detection is
  681. * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
  682. *
  683. * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
  684. * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
  685. *
  686. * A mutex acquired with this function must be released with ww_mutex_unlock.
  687. */
  688. int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
  689. {
  690. if (!ww_ctx)
  691. return mutex_trylock(&ww->base);
  692. MUTEX_WARN_ON(ww->base.magic != &ww->base);
  693. /*
  694. * Reset the wounded flag after a kill. No other process can
  695. * race and wound us here, since they can't have a valid owner
  696. * pointer if we don't have any locks held.
  697. */
  698. if (ww_ctx->acquired == 0)
  699. ww_ctx->wounded = 0;
  700. if (__mutex_trylock(&ww->base)) {
  701. ww_mutex_set_context_fastpath(ww, ww_ctx);
  702. mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
  703. return 1;
  704. }
  705. return 0;
  706. }
  707. EXPORT_SYMBOL(ww_mutex_trylock);
  708. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  709. void __sched
  710. mutex_lock_nested(struct mutex *lock, unsigned int subclass)
  711. {
  712. __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
  713. }
  714. EXPORT_SYMBOL_GPL(mutex_lock_nested);
  715. void __sched
  716. _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
  717. {
  718. __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
  719. }
  720. EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
  721. int __sched
  722. mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
  723. {
  724. return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
  725. }
  726. EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
  727. int __sched
  728. mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
  729. {
  730. return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
  731. }
  732. EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
  733. void __sched
  734. mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
  735. {
  736. int token;
  737. might_sleep();
  738. token = io_schedule_prepare();
  739. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
  740. subclass, NULL, _RET_IP_, NULL, 0);
  741. io_schedule_finish(token);
  742. }
  743. EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
  744. static inline int
  745. ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  746. {
  747. #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
  748. unsigned tmp;
  749. if (ctx->deadlock_inject_countdown-- == 0) {
  750. tmp = ctx->deadlock_inject_interval;
  751. if (tmp > UINT_MAX/4)
  752. tmp = UINT_MAX;
  753. else
  754. tmp = tmp*2 + tmp + tmp/2;
  755. ctx->deadlock_inject_interval = tmp;
  756. ctx->deadlock_inject_countdown = tmp;
  757. ctx->contending_lock = lock;
  758. ww_mutex_unlock(lock);
  759. return -EDEADLK;
  760. }
  761. #endif
  762. return 0;
  763. }
  764. int __sched
  765. ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  766. {
  767. int ret;
  768. might_sleep();
  769. ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
  770. 0, _RET_IP_, ctx);
  771. if (!ret && ctx && ctx->acquired > 1)
  772. return ww_mutex_deadlock_injection(lock, ctx);
  773. return ret;
  774. }
  775. EXPORT_SYMBOL_GPL(ww_mutex_lock);
  776. int __sched
  777. ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  778. {
  779. int ret;
  780. might_sleep();
  781. ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
  782. 0, _RET_IP_, ctx);
  783. if (!ret && ctx && ctx->acquired > 1)
  784. return ww_mutex_deadlock_injection(lock, ctx);
  785. return ret;
  786. }
  787. EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
  788. #endif
  789. /*
  790. * Release the lock, slowpath:
  791. */
  792. static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
  793. {
  794. struct task_struct *next = NULL;
  795. DEFINE_WAKE_Q(wake_q);
  796. unsigned long owner;
  797. mutex_release(&lock->dep_map, ip);
  798. /*
  799. * Release the lock before (potentially) taking the spinlock such that
  800. * other contenders can get on with things ASAP.
  801. *
  802. * Except when HANDOFF, in that case we must not clear the owner field,
  803. * but instead set it to the top waiter.
  804. */
  805. owner = atomic_long_read(&lock->owner);
  806. for (;;) {
  807. MUTEX_WARN_ON(__owner_task(owner) != current);
  808. MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
  809. if (owner & MUTEX_FLAG_HANDOFF)
  810. break;
  811. if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
  812. if (owner & MUTEX_FLAG_WAITERS)
  813. break;
  814. return;
  815. }
  816. }
  817. raw_spin_lock(&lock->wait_lock);
  818. debug_mutex_unlock(lock);
  819. if (!list_empty(&lock->wait_list)) {
  820. /* get the first entry from the wait-list: */
  821. struct mutex_waiter *waiter =
  822. list_first_entry(&lock->wait_list,
  823. struct mutex_waiter, list);
  824. next = waiter->task;
  825. debug_mutex_wake_waiter(lock, waiter);
  826. wake_q_add(&wake_q, next);
  827. }
  828. if (owner & MUTEX_FLAG_HANDOFF)
  829. __mutex_handoff(lock, next);
  830. trace_android_vh_mutex_unlock_slowpath(lock);
  831. raw_spin_unlock(&lock->wait_lock);
  832. wake_up_q(&wake_q);
  833. }
  834. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  835. /*
  836. * Here come the less common (and hence less performance-critical) APIs:
  837. * mutex_lock_interruptible() and mutex_trylock().
  838. */
  839. static noinline int __sched
  840. __mutex_lock_killable_slowpath(struct mutex *lock);
  841. static noinline int __sched
  842. __mutex_lock_interruptible_slowpath(struct mutex *lock);
  843. /**
  844. * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
  845. * @lock: The mutex to be acquired.
  846. *
  847. * Lock the mutex like mutex_lock(). If a signal is delivered while the
  848. * process is sleeping, this function will return without acquiring the
  849. * mutex.
  850. *
  851. * Context: Process context.
  852. * Return: 0 if the lock was successfully acquired or %-EINTR if a
  853. * signal arrived.
  854. */
  855. int __sched mutex_lock_interruptible(struct mutex *lock)
  856. {
  857. might_sleep();
  858. if (__mutex_trylock_fast(lock))
  859. return 0;
  860. return __mutex_lock_interruptible_slowpath(lock);
  861. }
  862. EXPORT_SYMBOL(mutex_lock_interruptible);
  863. /**
  864. * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
  865. * @lock: The mutex to be acquired.
  866. *
  867. * Lock the mutex like mutex_lock(). If a signal which will be fatal to
  868. * the current process is delivered while the process is sleeping, this
  869. * function will return without acquiring the mutex.
  870. *
  871. * Context: Process context.
  872. * Return: 0 if the lock was successfully acquired or %-EINTR if a
  873. * fatal signal arrived.
  874. */
  875. int __sched mutex_lock_killable(struct mutex *lock)
  876. {
  877. might_sleep();
  878. if (__mutex_trylock_fast(lock))
  879. return 0;
  880. return __mutex_lock_killable_slowpath(lock);
  881. }
  882. EXPORT_SYMBOL(mutex_lock_killable);
  883. /**
  884. * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
  885. * @lock: The mutex to be acquired.
  886. *
  887. * Lock the mutex like mutex_lock(). While the task is waiting for this
  888. * mutex, it will be accounted as being in the IO wait state by the
  889. * scheduler.
  890. *
  891. * Context: Process context.
  892. */
  893. void __sched mutex_lock_io(struct mutex *lock)
  894. {
  895. int token;
  896. token = io_schedule_prepare();
  897. mutex_lock(lock);
  898. io_schedule_finish(token);
  899. }
  900. EXPORT_SYMBOL_GPL(mutex_lock_io);
  901. static noinline void __sched
  902. __mutex_lock_slowpath(struct mutex *lock)
  903. {
  904. __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
  905. }
  906. static noinline int __sched
  907. __mutex_lock_killable_slowpath(struct mutex *lock)
  908. {
  909. return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
  910. }
  911. static noinline int __sched
  912. __mutex_lock_interruptible_slowpath(struct mutex *lock)
  913. {
  914. return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
  915. }
  916. static noinline int __sched
  917. __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  918. {
  919. return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
  920. _RET_IP_, ctx);
  921. }
  922. static noinline int __sched
  923. __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
  924. struct ww_acquire_ctx *ctx)
  925. {
  926. return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
  927. _RET_IP_, ctx);
  928. }
  929. #endif
  930. /**
  931. * mutex_trylock - try to acquire the mutex, without waiting
  932. * @lock: the mutex to be acquired
  933. *
  934. * Try to acquire the mutex atomically. Returns 1 if the mutex
  935. * has been acquired successfully, and 0 on contention.
  936. *
  937. * NOTE: this function follows the spin_trylock() convention, so
  938. * it is negated from the down_trylock() return values! Be careful
  939. * about this when converting semaphore users to mutexes.
  940. *
  941. * This function must not be used in interrupt context. The
  942. * mutex must be released by the same task that acquired it.
  943. */
  944. int __sched mutex_trylock(struct mutex *lock)
  945. {
  946. bool locked;
  947. MUTEX_WARN_ON(lock->magic != lock);
  948. locked = __mutex_trylock(lock);
  949. if (locked) {
  950. trace_android_vh_record_mutex_lock_starttime(current, jiffies);
  951. mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  952. }
  953. return locked;
  954. }
  955. EXPORT_SYMBOL(mutex_trylock);
  956. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  957. int __sched
  958. ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  959. {
  960. might_sleep();
  961. if (__mutex_trylock_fast(&lock->base)) {
  962. if (ctx)
  963. ww_mutex_set_context_fastpath(lock, ctx);
  964. return 0;
  965. }
  966. return __ww_mutex_lock_slowpath(lock, ctx);
  967. }
  968. EXPORT_SYMBOL(ww_mutex_lock);
  969. int __sched
  970. ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  971. {
  972. might_sleep();
  973. if (__mutex_trylock_fast(&lock->base)) {
  974. if (ctx)
  975. ww_mutex_set_context_fastpath(lock, ctx);
  976. return 0;
  977. }
  978. return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
  979. }
  980. EXPORT_SYMBOL(ww_mutex_lock_interruptible);
  981. #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
  982. #endif /* !CONFIG_PREEMPT_RT */
  983. /**
  984. * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
  985. * @cnt: the atomic which we are to dec
  986. * @lock: the mutex to return holding if we dec to 0
  987. *
  988. * return true and hold lock if we dec to 0, return false otherwise
  989. */
  990. int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
  991. {
  992. /* dec if we can't possibly hit 0 */
  993. if (atomic_add_unless(cnt, -1, 1))
  994. return 0;
  995. /* we might hit 0, so take the lock */
  996. mutex_lock(lock);
  997. if (!atomic_dec_and_test(cnt)) {
  998. /* when we actually did the dec, we didn't hit 0 */
  999. mutex_unlock(lock);
  1000. return 0;
  1001. }
  1002. /* we hit 0, and we hold the lock */
  1003. return 1;
  1004. }
  1005. EXPORT_SYMBOL(atomic_dec_and_mutex_lock);