spinlock_rt.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * PREEMPT_RT substitution for spin/rw_locks
  4. *
  5. * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
  6. * resemble the non RT semantics:
  7. *
  8. * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
  9. * preserving. The task state is saved before blocking on the underlying
  10. * rtmutex, and restored when the lock has been acquired. Regular wakeups
  11. * during that time are redirected to the saved state so no wake up is
  12. * missed.
  13. *
  14. * - Non RT spin/rwlocks disable preemption and eventually interrupts.
  15. * Disabling preemption has the side effect of disabling migration and
  16. * preventing RCU grace periods.
  17. *
  18. * The RT substitutions explicitly disable migration and take
  19. * rcu_read_lock() across the lock held section.
  20. */
  21. #include <linux/spinlock.h>
  22. #include <linux/export.h>
  23. #define RT_MUTEX_BUILD_SPINLOCKS
  24. #include "rtmutex.c"
  25. /*
  26. * __might_resched() skips the state check as rtlocks are state
  27. * preserving. Take RCU nesting into account as spin/read/write_lock() can
  28. * legitimately nest into an RCU read side critical section.
  29. */
  30. #define RTLOCK_RESCHED_OFFSETS \
  31. (rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
  32. #define rtlock_might_resched() \
  33. __might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
  34. static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
  35. {
  36. if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
  37. rtlock_slowlock(rtm);
  38. }
  39. static __always_inline void __rt_spin_lock(spinlock_t *lock)
  40. {
  41. rtlock_might_resched();
  42. rtlock_lock(&lock->lock);
  43. rcu_read_lock();
  44. migrate_disable();
  45. }
  46. void __sched rt_spin_lock(spinlock_t *lock)
  47. {
  48. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  49. __rt_spin_lock(lock);
  50. }
  51. EXPORT_SYMBOL(rt_spin_lock);
  52. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  53. void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
  54. {
  55. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  56. __rt_spin_lock(lock);
  57. }
  58. EXPORT_SYMBOL(rt_spin_lock_nested);
  59. void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
  60. struct lockdep_map *nest_lock)
  61. {
  62. spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
  63. __rt_spin_lock(lock);
  64. }
  65. EXPORT_SYMBOL(rt_spin_lock_nest_lock);
  66. #endif
  67. void __sched rt_spin_unlock(spinlock_t *lock)
  68. {
  69. spin_release(&lock->dep_map, _RET_IP_);
  70. migrate_enable();
  71. rcu_read_unlock();
  72. if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
  73. rt_mutex_slowunlock(&lock->lock);
  74. }
  75. EXPORT_SYMBOL(rt_spin_unlock);
  76. /*
  77. * Wait for the lock to get unlocked: instead of polling for an unlock
  78. * (like raw spinlocks do), lock and unlock, to force the kernel to
  79. * schedule if there's contention:
  80. */
  81. void __sched rt_spin_lock_unlock(spinlock_t *lock)
  82. {
  83. spin_lock(lock);
  84. spin_unlock(lock);
  85. }
  86. EXPORT_SYMBOL(rt_spin_lock_unlock);
  87. static __always_inline int __rt_spin_trylock(spinlock_t *lock)
  88. {
  89. int ret = 1;
  90. if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
  91. ret = rt_mutex_slowtrylock(&lock->lock);
  92. if (ret) {
  93. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  94. rcu_read_lock();
  95. migrate_disable();
  96. }
  97. return ret;
  98. }
  99. int __sched rt_spin_trylock(spinlock_t *lock)
  100. {
  101. return __rt_spin_trylock(lock);
  102. }
  103. EXPORT_SYMBOL(rt_spin_trylock);
  104. int __sched rt_spin_trylock_bh(spinlock_t *lock)
  105. {
  106. int ret;
  107. local_bh_disable();
  108. ret = __rt_spin_trylock(lock);
  109. if (!ret)
  110. local_bh_enable();
  111. return ret;
  112. }
  113. EXPORT_SYMBOL(rt_spin_trylock_bh);
  114. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  115. void __rt_spin_lock_init(spinlock_t *lock, const char *name,
  116. struct lock_class_key *key, bool percpu)
  117. {
  118. u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
  119. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  120. lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
  121. LD_WAIT_INV, type);
  122. }
  123. EXPORT_SYMBOL(__rt_spin_lock_init);
  124. #endif
  125. /*
  126. * RT-specific reader/writer locks
  127. */
  128. #define rwbase_set_and_save_current_state(state) \
  129. current_save_and_set_rtlock_wait_state()
  130. #define rwbase_restore_current_state() \
  131. current_restore_rtlock_saved_state()
  132. static __always_inline int
  133. rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
  134. {
  135. if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
  136. rtlock_slowlock(rtm);
  137. return 0;
  138. }
  139. static __always_inline int
  140. rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
  141. {
  142. rtlock_slowlock_locked(rtm);
  143. return 0;
  144. }
  145. static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
  146. {
  147. if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
  148. return;
  149. rt_mutex_slowunlock(rtm);
  150. }
  151. static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
  152. {
  153. if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
  154. return 1;
  155. return rt_mutex_slowtrylock(rtm);
  156. }
  157. #define rwbase_signal_pending_state(state, current) (0)
  158. #define rwbase_schedule() \
  159. schedule_rtlock()
  160. #include "rwbase_rt.c"
  161. /*
  162. * The common functions which get wrapped into the rwlock API.
  163. */
  164. int __sched rt_read_trylock(rwlock_t *rwlock)
  165. {
  166. int ret;
  167. ret = rwbase_read_trylock(&rwlock->rwbase);
  168. if (ret) {
  169. rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
  170. rcu_read_lock();
  171. migrate_disable();
  172. }
  173. return ret;
  174. }
  175. EXPORT_SYMBOL(rt_read_trylock);
  176. int __sched rt_write_trylock(rwlock_t *rwlock)
  177. {
  178. int ret;
  179. ret = rwbase_write_trylock(&rwlock->rwbase);
  180. if (ret) {
  181. rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
  182. rcu_read_lock();
  183. migrate_disable();
  184. }
  185. return ret;
  186. }
  187. EXPORT_SYMBOL(rt_write_trylock);
  188. void __sched rt_read_lock(rwlock_t *rwlock)
  189. {
  190. rtlock_might_resched();
  191. rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
  192. rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
  193. rcu_read_lock();
  194. migrate_disable();
  195. }
  196. EXPORT_SYMBOL(rt_read_lock);
  197. void __sched rt_write_lock(rwlock_t *rwlock)
  198. {
  199. rtlock_might_resched();
  200. rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
  201. rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
  202. rcu_read_lock();
  203. migrate_disable();
  204. }
  205. EXPORT_SYMBOL(rt_write_lock);
  206. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  207. void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
  208. {
  209. rtlock_might_resched();
  210. rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
  211. rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
  212. rcu_read_lock();
  213. migrate_disable();
  214. }
  215. EXPORT_SYMBOL(rt_write_lock_nested);
  216. #endif
  217. void __sched rt_read_unlock(rwlock_t *rwlock)
  218. {
  219. rwlock_release(&rwlock->dep_map, _RET_IP_);
  220. migrate_enable();
  221. rcu_read_unlock();
  222. rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
  223. }
  224. EXPORT_SYMBOL(rt_read_unlock);
  225. void __sched rt_write_unlock(rwlock_t *rwlock)
  226. {
  227. rwlock_release(&rwlock->dep_map, _RET_IP_);
  228. rcu_read_unlock();
  229. migrate_enable();
  230. rwbase_write_unlock(&rwlock->rwbase);
  231. }
  232. EXPORT_SYMBOL(rt_write_unlock);
  233. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  234. void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
  235. struct lock_class_key *key)
  236. {
  237. debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
  238. lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
  239. }
  240. EXPORT_SYMBOL(__rt_rwlock_init);
  241. #endif