spinlock_rt.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #ifndef __LINUX_SPINLOCK_RT_H
  3. #define __LINUX_SPINLOCK_RT_H
  4. #ifndef __LINUX_INSIDE_SPINLOCK_H
  5. #error Do not include directly. Use spinlock.h
  6. #endif
  7. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  8. extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
  9. struct lock_class_key *key, bool percpu);
  10. #else
  11. static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
  12. struct lock_class_key *key, bool percpu)
  13. {
  14. }
  15. #endif
  16. #define spin_lock_init(slock) \
  17. do { \
  18. static struct lock_class_key __key; \
  19. \
  20. rt_mutex_base_init(&(slock)->lock); \
  21. __rt_spin_lock_init(slock, #slock, &__key, false); \
  22. } while (0)
  23. #define local_spin_lock_init(slock) \
  24. do { \
  25. static struct lock_class_key __key; \
  26. \
  27. rt_mutex_base_init(&(slock)->lock); \
  28. __rt_spin_lock_init(slock, #slock, &__key, true); \
  29. } while (0)
  30. extern void rt_spin_lock(spinlock_t *lock);
  31. extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
  32. extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
  33. extern void rt_spin_unlock(spinlock_t *lock);
  34. extern void rt_spin_lock_unlock(spinlock_t *lock);
  35. extern int rt_spin_trylock_bh(spinlock_t *lock);
  36. extern int rt_spin_trylock(spinlock_t *lock);
  37. static __always_inline void spin_lock(spinlock_t *lock)
  38. {
  39. rt_spin_lock(lock);
  40. }
  41. #ifdef CONFIG_LOCKDEP
  42. # define __spin_lock_nested(lock, subclass) \
  43. rt_spin_lock_nested(lock, subclass)
  44. # define __spin_lock_nest_lock(lock, nest_lock) \
  45. do { \
  46. typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
  47. rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
  48. } while (0)
  49. # define __spin_lock_irqsave_nested(lock, flags, subclass) \
  50. do { \
  51. typecheck(unsigned long, flags); \
  52. flags = 0; \
  53. __spin_lock_nested(lock, subclass); \
  54. } while (0)
  55. #else
  56. /*
  57. * Always evaluate the 'subclass' argument to avoid that the compiler
  58. * warns about set-but-not-used variables when building with
  59. * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  60. */
  61. # define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock)))
  62. # define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
  63. # define __spin_lock_irqsave_nested(lock, flags, subclass) \
  64. spin_lock_irqsave(((void)(subclass), (lock)), flags)
  65. #endif
  66. #define spin_lock_nested(lock, subclass) \
  67. __spin_lock_nested(lock, subclass)
  68. #define spin_lock_nest_lock(lock, nest_lock) \
  69. __spin_lock_nest_lock(lock, nest_lock)
  70. #define spin_lock_irqsave_nested(lock, flags, subclass) \
  71. __spin_lock_irqsave_nested(lock, flags, subclass)
  72. static __always_inline void spin_lock_bh(spinlock_t *lock)
  73. {
  74. /* Investigate: Drop bh when blocking ? */
  75. local_bh_disable();
  76. rt_spin_lock(lock);
  77. }
  78. static __always_inline void spin_lock_irq(spinlock_t *lock)
  79. {
  80. rt_spin_lock(lock);
  81. }
  82. #define spin_lock_irqsave(lock, flags) \
  83. do { \
  84. typecheck(unsigned long, flags); \
  85. flags = 0; \
  86. spin_lock(lock); \
  87. } while (0)
  88. static __always_inline void spin_unlock(spinlock_t *lock)
  89. {
  90. rt_spin_unlock(lock);
  91. }
  92. static __always_inline void spin_unlock_bh(spinlock_t *lock)
  93. {
  94. rt_spin_unlock(lock);
  95. local_bh_enable();
  96. }
  97. static __always_inline void spin_unlock_irq(spinlock_t *lock)
  98. {
  99. rt_spin_unlock(lock);
  100. }
  101. static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
  102. unsigned long flags)
  103. {
  104. rt_spin_unlock(lock);
  105. }
  106. #define spin_trylock(lock) \
  107. __cond_lock(lock, rt_spin_trylock(lock))
  108. #define spin_trylock_bh(lock) \
  109. __cond_lock(lock, rt_spin_trylock_bh(lock))
  110. #define spin_trylock_irq(lock) \
  111. __cond_lock(lock, rt_spin_trylock(lock))
  112. #define __spin_trylock_irqsave(lock, flags) \
  113. ({ \
  114. int __locked; \
  115. \
  116. typecheck(unsigned long, flags); \
  117. flags = 0; \
  118. __locked = spin_trylock(lock); \
  119. __locked; \
  120. })
  121. #define spin_trylock_irqsave(lock, flags) \
  122. __cond_lock(lock, __spin_trylock_irqsave(lock, flags))
  123. #define spin_is_contended(lock) (((void)(lock), 0))
  124. static inline int spin_is_locked(spinlock_t *lock)
  125. {
  126. return rt_mutex_base_is_locked(&lock->lock);
  127. }
  128. #define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
  129. #include <linux/rwlock_rt.h>
  130. #endif