ww_rt_mutex.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * rtmutex API
  4. */
  5. #include <linux/spinlock.h>
  6. #include <linux/export.h>
  7. #define RT_MUTEX_BUILD_MUTEX
  8. #define WW_RT
  9. #include "rtmutex.c"
  10. int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
  11. {
  12. struct rt_mutex *rtm = &lock->base;
  13. if (!ww_ctx)
  14. return rt_mutex_trylock(rtm);
  15. /*
  16. * Reset the wounded flag after a kill. No other process can
  17. * race and wound us here, since they can't have a valid owner
  18. * pointer if we don't have any locks held.
  19. */
  20. if (ww_ctx->acquired == 0)
  21. ww_ctx->wounded = 0;
  22. if (__rt_mutex_trylock(&rtm->rtmutex)) {
  23. ww_mutex_set_context_fastpath(lock, ww_ctx);
  24. mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
  25. return 1;
  26. }
  27. return 0;
  28. }
  29. EXPORT_SYMBOL(ww_mutex_trylock);
  30. static int __sched
  31. __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
  32. unsigned int state, unsigned long ip)
  33. {
  34. struct lockdep_map __maybe_unused *nest_lock = NULL;
  35. struct rt_mutex *rtm = &lock->base;
  36. int ret;
  37. might_sleep();
  38. if (ww_ctx) {
  39. if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
  40. return -EALREADY;
  41. /*
  42. * Reset the wounded flag after a kill. No other process can
  43. * race and wound us here, since they can't have a valid owner
  44. * pointer if we don't have any locks held.
  45. */
  46. if (ww_ctx->acquired == 0)
  47. ww_ctx->wounded = 0;
  48. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  49. nest_lock = &ww_ctx->dep_map;
  50. #endif
  51. }
  52. mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
  53. if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
  54. if (ww_ctx)
  55. ww_mutex_set_context_fastpath(lock, ww_ctx);
  56. return 0;
  57. }
  58. ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
  59. if (ret)
  60. mutex_release(&rtm->dep_map, ip);
  61. return ret;
  62. }
  63. int __sched
  64. ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  65. {
  66. return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
  67. }
  68. EXPORT_SYMBOL(ww_mutex_lock);
  69. int __sched
  70. ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  71. {
  72. return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
  73. }
  74. EXPORT_SYMBOL(ww_mutex_lock_interruptible);
  75. void __sched ww_mutex_unlock(struct ww_mutex *lock)
  76. {
  77. struct rt_mutex *rtm = &lock->base;
  78. __ww_mutex_unlock(lock);
  79. mutex_release(&rtm->dep_map, _RET_IP_);
  80. __rt_mutex_unlock(&rtm->rtmutex);
  81. }
  82. EXPORT_SYMBOL(ww_mutex_unlock);