qspinlock.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Queued spinlock
  4. *
  5. * A 'generic' spinlock implementation that is based on MCS locks. For an
  6. * architecture that's looking for a 'generic' spinlock, please first consider
  7. * ticket-lock.h and only come looking here when you've considered all the
  8. * constraints below and can show your hardware does actually perform better
  9. * with qspinlock.
  10. *
  11. * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
  12. * weaker than RCtso if you're power), where regular code only expects atomic_t
  13. * to be RCpc.
  14. *
  15. * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
  16. * of atomic operations to behave well together, please audit them carefully to
  17. * ensure they all have forward progress. Many atomic operations may default to
  18. * cmpxchg() loops which will not have good forward progress properties on
  19. * LL/SC architectures.
  20. *
  21. * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
  22. * do. Carefully read the patches that introduced
  23. * queued_fetch_set_pending_acquire().
  24. *
  25. * qspinlock also heavily relies on mixed size atomic operations, in specific
  26. * it requires architectures to have xchg16; something which many LL/SC
  27. * architectures need to implement as a 32bit and+or in order to satisfy the
  28. * forward progress guarantees mentioned above.
  29. *
  30. * Further reading on mixed size atomics that might be relevant:
  31. *
  32. * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
  33. *
  34. * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  35. * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
  36. *
  37. * Authors: Waiman Long <[email protected]>
  38. */
  39. #ifndef __ASM_GENERIC_QSPINLOCK_H
  40. #define __ASM_GENERIC_QSPINLOCK_H
  41. #include <asm-generic/qspinlock_types.h>
  42. #include <linux/atomic.h>
  43. #ifndef queued_spin_is_locked
  44. /**
  45. * queued_spin_is_locked - is the spinlock locked?
  46. * @lock: Pointer to queued spinlock structure
  47. * Return: 1 if it is locked, 0 otherwise
  48. */
  49. static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
  50. {
  51. /*
  52. * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
  53. * isn't immediately observable.
  54. */
  55. return atomic_read(&lock->val);
  56. }
  57. #endif
  58. /**
  59. * queued_spin_value_unlocked - is the spinlock structure unlocked?
  60. * @lock: queued spinlock structure
  61. * Return: 1 if it is unlocked, 0 otherwise
  62. *
  63. * N.B. Whenever there are tasks waiting for the lock, it is considered
  64. * locked wrt the lockref code to avoid lock stealing by the lockref
  65. * code and change things underneath the lock. This also allows some
  66. * optimizations to be applied without conflict with lockref.
  67. */
  68. static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
  69. {
  70. return !atomic_read(&lock.val);
  71. }
  72. /**
  73. * queued_spin_is_contended - check if the lock is contended
  74. * @lock : Pointer to queued spinlock structure
  75. * Return: 1 if lock contended, 0 otherwise
  76. */
  77. static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
  78. {
  79. return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
  80. }
  81. /**
  82. * queued_spin_trylock - try to acquire the queued spinlock
  83. * @lock : Pointer to queued spinlock structure
  84. * Return: 1 if lock acquired, 0 if failed
  85. */
  86. static __always_inline int queued_spin_trylock(struct qspinlock *lock)
  87. {
  88. int val = atomic_read(&lock->val);
  89. if (unlikely(val))
  90. return 0;
  91. return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
  92. }
  93. extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  94. #ifndef queued_spin_lock
  95. /**
  96. * queued_spin_lock - acquire a queued spinlock
  97. * @lock: Pointer to queued spinlock structure
  98. */
  99. static __always_inline void queued_spin_lock(struct qspinlock *lock)
  100. {
  101. int val = 0;
  102. if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
  103. return;
  104. queued_spin_lock_slowpath(lock, val);
  105. }
  106. #endif
  107. #ifndef queued_spin_unlock
  108. /**
  109. * queued_spin_unlock - release a queued spinlock
  110. * @lock : Pointer to queued spinlock structure
  111. */
  112. static __always_inline void queued_spin_unlock(struct qspinlock *lock)
  113. {
  114. /*
  115. * unlock() needs release semantics:
  116. */
  117. smp_store_release(&lock->locked, 0);
  118. }
  119. #endif
  120. #ifndef virt_spin_lock
  121. static __always_inline bool virt_spin_lock(struct qspinlock *lock)
  122. {
  123. return false;
  124. }
  125. #endif
  126. /*
  127. * Remapping spinlock architecture specific functions to the corresponding
  128. * queued spinlock functions.
  129. */
  130. #define arch_spin_is_locked(l) queued_spin_is_locked(l)
  131. #define arch_spin_is_contended(l) queued_spin_is_contended(l)
  132. #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
  133. #define arch_spin_lock(l) queued_spin_lock(l)
  134. #define arch_spin_trylock(l) queued_spin_trylock(l)
  135. #define arch_spin_unlock(l) queued_spin_unlock(l)
  136. #endif /* __ASM_GENERIC_QSPINLOCK_H */