rtmutex_api.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * rtmutex API
  4. */
  5. #include <linux/spinlock.h>
  6. #include <linux/export.h>
  7. #define RT_MUTEX_BUILD_MUTEX
  8. #include "rtmutex.c"
  9. /*
  10. * Max number of times we'll walk the boosting chain:
  11. */
  12. int max_lock_depth = 1024;
  13. /*
  14. * Debug aware fast / slowpath lock,trylock,unlock
  15. *
  16. * The atomic acquire/release ops are compiled away, when either the
  17. * architecture does not support cmpxchg or when debugging is enabled.
  18. */
  19. static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
  20. unsigned int state,
  21. struct lockdep_map *nest_lock,
  22. unsigned int subclass)
  23. {
  24. int ret;
  25. might_sleep();
  26. mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
  27. ret = __rt_mutex_lock(&lock->rtmutex, state);
  28. if (ret)
  29. mutex_release(&lock->dep_map, _RET_IP_);
  30. else
  31. trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
  32. return ret;
  33. }
  34. void rt_mutex_base_init(struct rt_mutex_base *rtb)
  35. {
  36. __rt_mutex_base_init(rtb);
  37. }
  38. EXPORT_SYMBOL(rt_mutex_base_init);
  39. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  40. /**
  41. * rt_mutex_lock_nested - lock a rt_mutex
  42. *
  43. * @lock: the rt_mutex to be locked
  44. * @subclass: the lockdep subclass
  45. */
  46. void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
  47. {
  48. __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
  49. }
  50. EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
  51. void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
  52. {
  53. __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
  54. }
  55. EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
  56. #else /* !CONFIG_DEBUG_LOCK_ALLOC */
  57. /**
  58. * rt_mutex_lock - lock a rt_mutex
  59. *
  60. * @lock: the rt_mutex to be locked
  61. */
  62. void __sched rt_mutex_lock(struct rt_mutex *lock)
  63. {
  64. __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
  65. }
  66. EXPORT_SYMBOL_GPL(rt_mutex_lock);
  67. #endif
  68. /**
  69. * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
  70. *
  71. * @lock: the rt_mutex to be locked
  72. *
  73. * Returns:
  74. * 0 on success
  75. * -EINTR when interrupted by a signal
  76. */
  77. int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
  78. {
  79. return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
  80. }
  81. EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
  82. /**
  83. * rt_mutex_lock_killable - lock a rt_mutex killable
  84. *
  85. * @lock: the rt_mutex to be locked
  86. *
  87. * Returns:
  88. * 0 on success
  89. * -EINTR when interrupted by a signal
  90. */
  91. int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
  92. {
  93. return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
  94. }
  95. EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
  96. /**
  97. * rt_mutex_trylock - try to lock a rt_mutex
  98. *
  99. * @lock: the rt_mutex to be locked
  100. *
  101. * This function can only be called in thread context. It's safe to call it
  102. * from atomic regions, but not from hard or soft interrupt context.
  103. *
  104. * Returns:
  105. * 1 on success
  106. * 0 on contention
  107. */
  108. int __sched rt_mutex_trylock(struct rt_mutex *lock)
  109. {
  110. int ret;
  111. if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
  112. return 0;
  113. ret = __rt_mutex_trylock(&lock->rtmutex);
  114. if (ret) {
  115. trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
  116. mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  117. }
  118. return ret;
  119. }
  120. EXPORT_SYMBOL_GPL(rt_mutex_trylock);
  121. /**
  122. * rt_mutex_unlock - unlock a rt_mutex
  123. *
  124. * @lock: the rt_mutex to be unlocked
  125. */
  126. void __sched rt_mutex_unlock(struct rt_mutex *lock)
  127. {
  128. trace_android_vh_record_rtmutex_lock_starttime(current, 0);
  129. mutex_release(&lock->dep_map, _RET_IP_);
  130. __rt_mutex_unlock(&lock->rtmutex);
  131. }
  132. EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  133. /*
  134. * Futex variants, must not use fastpath.
  135. */
  136. int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
  137. {
  138. return rt_mutex_slowtrylock(lock);
  139. }
  140. int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
  141. {
  142. return __rt_mutex_slowtrylock(lock);
  143. }
  144. /**
  145. * __rt_mutex_futex_unlock - Futex variant, that since futex variants
  146. * do not use the fast-path, can be simple and will not need to retry.
  147. *
  148. * @lock: The rt_mutex to be unlocked
  149. * @wqh: The wake queue head from which to get the next lock waiter
  150. */
  151. bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
  152. struct rt_wake_q_head *wqh)
  153. {
  154. lockdep_assert_held(&lock->wait_lock);
  155. debug_rt_mutex_unlock(lock);
  156. if (!rt_mutex_has_waiters(lock)) {
  157. lock->owner = NULL;
  158. return false; /* done */
  159. }
  160. /*
  161. * We've already deboosted, mark_wakeup_next_waiter() will
  162. * retain preempt_disabled when we drop the wait_lock, to
  163. * avoid inversion prior to the wakeup. preempt_disable()
  164. * therein pairs with rt_mutex_postunlock().
  165. */
  166. mark_wakeup_next_waiter(wqh, lock);
  167. return true; /* call postunlock() */
  168. }
  169. void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
  170. {
  171. DEFINE_RT_WAKE_Q(wqh);
  172. unsigned long flags;
  173. bool postunlock;
  174. raw_spin_lock_irqsave(&lock->wait_lock, flags);
  175. postunlock = __rt_mutex_futex_unlock(lock, &wqh);
  176. raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
  177. if (postunlock)
  178. rt_mutex_postunlock(&wqh);
  179. }
  180. /**
  181. * __rt_mutex_init - initialize the rt_mutex
  182. *
  183. * @lock: The rt_mutex to be initialized
  184. * @name: The lock name used for debugging
  185. * @key: The lock class key used for debugging
  186. *
  187. * Initialize the rt_mutex to unlocked state.
  188. *
  189. * Initializing of a locked rt_mutex is not allowed
  190. */
  191. void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
  192. struct lock_class_key *key)
  193. {
  194. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  195. __rt_mutex_base_init(&lock->rtmutex);
  196. lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
  197. }
  198. EXPORT_SYMBOL_GPL(__rt_mutex_init);
  199. /**
  200. * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
  201. * proxy owner
  202. *
  203. * @lock: the rt_mutex to be locked
  204. * @proxy_owner:the task to set as owner
  205. *
  206. * No locking. Caller has to do serializing itself
  207. *
  208. * Special API call for PI-futex support. This initializes the rtmutex and
  209. * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
  210. * possible at this point because the pi_state which contains the rtmutex
  211. * is not yet visible to other tasks.
  212. */
  213. void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
  214. struct task_struct *proxy_owner)
  215. {
  216. static struct lock_class_key pi_futex_key;
  217. __rt_mutex_base_init(lock);
  218. /*
  219. * On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping'
  220. * and rtmutex based. That causes a lockdep false positive, because
  221. * some of the futex functions invoke spin_unlock(&hb->lock) with
  222. * the wait_lock of the rtmutex associated to the pi_futex held.
  223. * spin_unlock() in turn takes wait_lock of the rtmutex on which
  224. * the spinlock is based, which makes lockdep notice a lock
  225. * recursion. Give the futex/rtmutex wait_lock a separate key.
  226. */
  227. lockdep_set_class(&lock->wait_lock, &pi_futex_key);
  228. rt_mutex_set_owner(lock, proxy_owner);
  229. }
  230. /**
  231. * rt_mutex_proxy_unlock - release a lock on behalf of owner
  232. *
  233. * @lock: the rt_mutex to be locked
  234. *
  235. * No locking. Caller has to do serializing itself
  236. *
  237. * Special API call for PI-futex support. This just cleans up the rtmutex
  238. * (debugging) state. Concurrent operations on this rt_mutex are not
  239. * possible because it belongs to the pi_state which is about to be freed
  240. * and it is not longer visible to other tasks.
  241. */
  242. void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
  243. {
  244. debug_rt_mutex_proxy_unlock(lock);
  245. rt_mutex_clear_owner(lock);
  246. }
  247. /**
  248. * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
  249. * @lock: the rt_mutex to take
  250. * @waiter: the pre-initialized rt_mutex_waiter
  251. * @task: the task to prepare
  252. *
  253. * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
  254. * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
  255. *
  256. * NOTE: does _NOT_ remove the @waiter on failure; must either call
  257. * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
  258. *
  259. * Returns:
  260. * 0 - task blocked on lock
  261. * 1 - acquired the lock for task, caller should wake it up
  262. * <0 - error
  263. *
  264. * Special API call for PI-futex support.
  265. */
  266. int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
  267. struct rt_mutex_waiter *waiter,
  268. struct task_struct *task)
  269. {
  270. int ret;
  271. lockdep_assert_held(&lock->wait_lock);
  272. if (try_to_take_rt_mutex(lock, task, NULL))
  273. return 1;
  274. /* We enforce deadlock detection for futexes */
  275. ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
  276. RT_MUTEX_FULL_CHAINWALK);
  277. if (ret && !rt_mutex_owner(lock)) {
  278. /*
  279. * Reset the return value. We might have
  280. * returned with -EDEADLK and the owner
  281. * released the lock while we were walking the
  282. * pi chain. Let the waiter sort it out.
  283. */
  284. ret = 0;
  285. }
  286. return ret;
  287. }
  288. /**
  289. * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
  290. * @lock: the rt_mutex to take
  291. * @waiter: the pre-initialized rt_mutex_waiter
  292. * @task: the task to prepare
  293. *
  294. * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
  295. * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
  296. *
  297. * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
  298. * on failure.
  299. *
  300. * Returns:
  301. * 0 - task blocked on lock
  302. * 1 - acquired the lock for task, caller should wake it up
  303. * <0 - error
  304. *
  305. * Special API call for PI-futex support.
  306. */
  307. int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
  308. struct rt_mutex_waiter *waiter,
  309. struct task_struct *task)
  310. {
  311. int ret;
  312. raw_spin_lock_irq(&lock->wait_lock);
  313. ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
  314. if (unlikely(ret))
  315. remove_waiter(lock, waiter);
  316. raw_spin_unlock_irq(&lock->wait_lock);
  317. return ret;
  318. }
  319. /**
  320. * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
  321. * @lock: the rt_mutex we were woken on
  322. * @to: the timeout, null if none. hrtimer should already have
  323. * been started.
  324. * @waiter: the pre-initialized rt_mutex_waiter
  325. *
  326. * Wait for the lock acquisition started on our behalf by
  327. * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
  328. * rt_mutex_cleanup_proxy_lock().
  329. *
  330. * Returns:
  331. * 0 - success
  332. * <0 - error, one of -EINTR, -ETIMEDOUT
  333. *
  334. * Special API call for PI-futex support
  335. */
  336. int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
  337. struct hrtimer_sleeper *to,
  338. struct rt_mutex_waiter *waiter)
  339. {
  340. int ret;
  341. raw_spin_lock_irq(&lock->wait_lock);
  342. /* sleep on the mutex */
  343. set_current_state(TASK_INTERRUPTIBLE);
  344. ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
  345. /*
  346. * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
  347. * have to fix that up.
  348. */
  349. fixup_rt_mutex_waiters(lock, true);
  350. raw_spin_unlock_irq(&lock->wait_lock);
  351. return ret;
  352. }
  353. /**
  354. * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
  355. * @lock: the rt_mutex we were woken on
  356. * @waiter: the pre-initialized rt_mutex_waiter
  357. *
  358. * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
  359. * rt_mutex_wait_proxy_lock().
  360. *
  361. * Unless we acquired the lock; we're still enqueued on the wait-list and can
  362. * in fact still be granted ownership until we're removed. Therefore we can
  363. * find we are in fact the owner and must disregard the
  364. * rt_mutex_wait_proxy_lock() failure.
  365. *
  366. * Returns:
  367. * true - did the cleanup, we done.
  368. * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
  369. * caller should disregards its return value.
  370. *
  371. * Special API call for PI-futex support
  372. */
  373. bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
  374. struct rt_mutex_waiter *waiter)
  375. {
  376. bool cleanup = false;
  377. raw_spin_lock_irq(&lock->wait_lock);
  378. /*
  379. * Do an unconditional try-lock, this deals with the lock stealing
  380. * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
  381. * sets a NULL owner.
  382. *
  383. * We're not interested in the return value, because the subsequent
  384. * test on rt_mutex_owner() will infer that. If the trylock succeeded,
  385. * we will own the lock and it will have removed the waiter. If we
  386. * failed the trylock, we're still not owner and we need to remove
  387. * ourselves.
  388. */
  389. try_to_take_rt_mutex(lock, current, waiter);
  390. /*
  391. * Unless we're the owner; we're still enqueued on the wait_list.
  392. * So check if we became owner, if not, take us off the wait_list.
  393. */
  394. if (rt_mutex_owner(lock) != current) {
  395. remove_waiter(lock, waiter);
  396. cleanup = true;
  397. }
  398. /*
  399. * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
  400. * have to fix that up.
  401. */
  402. fixup_rt_mutex_waiters(lock, false);
  403. raw_spin_unlock_irq(&lock->wait_lock);
  404. return cleanup;
  405. }
  406. /*
  407. * Recheck the pi chain, in case we got a priority setting
  408. *
  409. * Called from sched_setscheduler
  410. */
  411. void __sched rt_mutex_adjust_pi(struct task_struct *task)
  412. {
  413. struct rt_mutex_waiter *waiter;
  414. struct rt_mutex_base *next_lock;
  415. unsigned long flags;
  416. raw_spin_lock_irqsave(&task->pi_lock, flags);
  417. waiter = task->pi_blocked_on;
  418. if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
  419. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  420. return;
  421. }
  422. next_lock = waiter->lock;
  423. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  424. /* gets dropped in rt_mutex_adjust_prio_chain()! */
  425. get_task_struct(task);
  426. rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
  427. next_lock, NULL, task);
  428. }
  429. /*
  430. * Performs the wakeup of the top-waiter and re-enables preemption.
  431. */
  432. void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
  433. {
  434. rt_mutex_wake_up_q(wqh);
  435. }
  436. #ifdef CONFIG_DEBUG_RT_MUTEXES
  437. void rt_mutex_debug_task_free(struct task_struct *task)
  438. {
  439. DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
  440. DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
  441. }
  442. #endif
  443. #ifdef CONFIG_PREEMPT_RT
  444. /* Mutexes */
  445. void __mutex_rt_init(struct mutex *mutex, const char *name,
  446. struct lock_class_key *key)
  447. {
  448. debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
  449. lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
  450. }
  451. EXPORT_SYMBOL(__mutex_rt_init);
  452. static __always_inline int __mutex_lock_common(struct mutex *lock,
  453. unsigned int state,
  454. unsigned int subclass,
  455. struct lockdep_map *nest_lock,
  456. unsigned long ip)
  457. {
  458. int ret;
  459. might_sleep();
  460. mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
  461. ret = __rt_mutex_lock(&lock->rtmutex, state);
  462. if (ret)
  463. mutex_release(&lock->dep_map, ip);
  464. else
  465. lock_acquired(&lock->dep_map, ip);
  466. return ret;
  467. }
  468. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  469. void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
  470. {
  471. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
  472. }
  473. EXPORT_SYMBOL_GPL(mutex_lock_nested);
  474. void __sched _mutex_lock_nest_lock(struct mutex *lock,
  475. struct lockdep_map *nest_lock)
  476. {
  477. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
  478. }
  479. EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
  480. int __sched mutex_lock_interruptible_nested(struct mutex *lock,
  481. unsigned int subclass)
  482. {
  483. return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
  484. }
  485. EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
  486. int __sched mutex_lock_killable_nested(struct mutex *lock,
  487. unsigned int subclass)
  488. {
  489. return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
  490. }
  491. EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
  492. void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
  493. {
  494. int token;
  495. might_sleep();
  496. token = io_schedule_prepare();
  497. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
  498. io_schedule_finish(token);
  499. }
  500. EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
  501. #else /* CONFIG_DEBUG_LOCK_ALLOC */
  502. void __sched mutex_lock(struct mutex *lock)
  503. {
  504. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
  505. }
  506. EXPORT_SYMBOL(mutex_lock);
  507. int __sched mutex_lock_interruptible(struct mutex *lock)
  508. {
  509. return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
  510. }
  511. EXPORT_SYMBOL(mutex_lock_interruptible);
  512. int __sched mutex_lock_killable(struct mutex *lock)
  513. {
  514. return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
  515. }
  516. EXPORT_SYMBOL(mutex_lock_killable);
  517. void __sched mutex_lock_io(struct mutex *lock)
  518. {
  519. int token = io_schedule_prepare();
  520. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
  521. io_schedule_finish(token);
  522. }
  523. EXPORT_SYMBOL(mutex_lock_io);
  524. #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
  525. int __sched mutex_trylock(struct mutex *lock)
  526. {
  527. int ret;
  528. if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
  529. return 0;
  530. ret = __rt_mutex_trylock(&lock->rtmutex);
  531. if (ret)
  532. mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  533. return ret;
  534. }
  535. EXPORT_SYMBOL(mutex_trylock);
  536. void __sched mutex_unlock(struct mutex *lock)
  537. {
  538. mutex_release(&lock->dep_map, _RET_IP_);
  539. __rt_mutex_unlock(&lock->rtmutex);
  540. }
  541. EXPORT_SYMBOL(mutex_unlock);
  542. #endif /* CONFIG_PREEMPT_RT */