simple_spinlock.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H
  3. #define _ASM_POWERPC_SIMPLE_SPINLOCK_H
  4. /*
  5. * Simple spin lock operations.
  6. *
  7. * Copyright (C) 2001-2004 Paul Mackerras <[email protected]>, IBM
  8. * Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM
  9. * Copyright (C) 2002 Dave Engebretsen <[email protected]>, IBM
  10. * Rework to support virtual processors
  11. *
  12. * Type of int is used as a full 64b word is not necessary.
  13. *
  14. * (the type definitions are in asm/simple_spinlock_types.h)
  15. */
  16. #include <linux/irqflags.h>
  17. #include <asm/paravirt.h>
  18. #include <asm/paca.h>
  19. #include <asm/synch.h>
  20. #include <asm/ppc-opcode.h>
  21. #ifdef CONFIG_PPC64
  22. /* use 0x800000yy when locked, where yy == CPU number */
  23. #ifdef __BIG_ENDIAN__
  24. #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
  25. #else
  26. #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
  27. #endif
  28. #else
  29. #define LOCK_TOKEN 1
  30. #endif
  31. static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  32. {
  33. return lock.slock == 0;
  34. }
  35. static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  36. {
  37. return !arch_spin_value_unlocked(READ_ONCE(*lock));
  38. }
  39. /*
  40. * This returns the old value in the lock, so we succeeded
  41. * in getting the lock if the return value is 0.
  42. */
  43. static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
  44. {
  45. unsigned long tmp, token;
  46. unsigned int eh = IS_ENABLED(CONFIG_PPC64);
  47. token = LOCK_TOKEN;
  48. __asm__ __volatile__(
  49. "1: lwarx %0,0,%2,%[eh]\n\
  50. cmpwi 0,%0,0\n\
  51. bne- 2f\n\
  52. stwcx. %1,0,%2\n\
  53. bne- 1b\n"
  54. PPC_ACQUIRE_BARRIER
  55. "2:"
  56. : "=&r" (tmp)
  57. : "r" (token), "r" (&lock->slock), [eh] "n" (eh)
  58. : "cr0", "memory");
  59. return tmp;
  60. }
  61. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  62. {
  63. return __arch_spin_trylock(lock) == 0;
  64. }
  65. /*
  66. * On a system with shared processors (that is, where a physical
  67. * processor is multiplexed between several virtual processors),
  68. * there is no point spinning on a lock if the holder of the lock
  69. * isn't currently scheduled on a physical processor. Instead
  70. * we detect this situation and ask the hypervisor to give the
  71. * rest of our timeslice to the lock holder.
  72. *
  73. * So that we can tell which virtual processor is holding a lock,
  74. * we put 0x80000000 | smp_processor_id() in the lock when it is
  75. * held. Conveniently, we have a word in the paca that holds this
  76. * value.
  77. */
  78. #if defined(CONFIG_PPC_SPLPAR)
  79. /* We only yield to the hypervisor if we are in shared processor mode */
  80. void splpar_spin_yield(arch_spinlock_t *lock);
  81. void splpar_rw_yield(arch_rwlock_t *lock);
  82. #else /* SPLPAR */
  83. static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
  84. static inline void splpar_rw_yield(arch_rwlock_t *lock) {}
  85. #endif
  86. static inline void spin_yield(arch_spinlock_t *lock)
  87. {
  88. if (is_shared_processor())
  89. splpar_spin_yield(lock);
  90. else
  91. barrier();
  92. }
  93. static inline void rw_yield(arch_rwlock_t *lock)
  94. {
  95. if (is_shared_processor())
  96. splpar_rw_yield(lock);
  97. else
  98. barrier();
  99. }
  100. static inline void arch_spin_lock(arch_spinlock_t *lock)
  101. {
  102. while (1) {
  103. if (likely(__arch_spin_trylock(lock) == 0))
  104. break;
  105. do {
  106. HMT_low();
  107. if (is_shared_processor())
  108. splpar_spin_yield(lock);
  109. } while (unlikely(lock->slock != 0));
  110. HMT_medium();
  111. }
  112. }
  113. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  114. {
  115. __asm__ __volatile__("# arch_spin_unlock\n\t"
  116. PPC_RELEASE_BARRIER: : :"memory");
  117. lock->slock = 0;
  118. }
  119. /*
  120. * Read-write spinlocks, allowing multiple readers
  121. * but only one writer.
  122. *
  123. * NOTE! it is quite common to have readers in interrupts
  124. * but no interrupt writers. For those circumstances we
  125. * can "mix" irq-safe locks - any writer needs to get a
  126. * irq-safe write-lock, but readers can get non-irqsafe
  127. * read-locks.
  128. */
  129. #ifdef CONFIG_PPC64
  130. #define __DO_SIGN_EXTEND "extsw %0,%0\n"
  131. #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
  132. #else
  133. #define __DO_SIGN_EXTEND
  134. #define WRLOCK_TOKEN (-1)
  135. #endif
  136. /*
  137. * This returns the old value in the lock + 1,
  138. * so we got a read lock if the return value is > 0.
  139. */
  140. static inline long __arch_read_trylock(arch_rwlock_t *rw)
  141. {
  142. long tmp;
  143. unsigned int eh = IS_ENABLED(CONFIG_PPC64);
  144. __asm__ __volatile__(
  145. "1: lwarx %0,0,%1,%[eh]\n"
  146. __DO_SIGN_EXTEND
  147. " addic. %0,%0,1\n\
  148. ble- 2f\n"
  149. " stwcx. %0,0,%1\n\
  150. bne- 1b\n"
  151. PPC_ACQUIRE_BARRIER
  152. "2:" : "=&r" (tmp)
  153. : "r" (&rw->lock), [eh] "n" (eh)
  154. : "cr0", "xer", "memory");
  155. return tmp;
  156. }
  157. /*
  158. * This returns the old value in the lock,
  159. * so we got the write lock if the return value is 0.
  160. */
  161. static inline long __arch_write_trylock(arch_rwlock_t *rw)
  162. {
  163. long tmp, token;
  164. unsigned int eh = IS_ENABLED(CONFIG_PPC64);
  165. token = WRLOCK_TOKEN;
  166. __asm__ __volatile__(
  167. "1: lwarx %0,0,%2,%[eh]\n\
  168. cmpwi 0,%0,0\n\
  169. bne- 2f\n"
  170. " stwcx. %1,0,%2\n\
  171. bne- 1b\n"
  172. PPC_ACQUIRE_BARRIER
  173. "2:" : "=&r" (tmp)
  174. : "r" (token), "r" (&rw->lock), [eh] "n" (eh)
  175. : "cr0", "memory");
  176. return tmp;
  177. }
  178. static inline void arch_read_lock(arch_rwlock_t *rw)
  179. {
  180. while (1) {
  181. if (likely(__arch_read_trylock(rw) > 0))
  182. break;
  183. do {
  184. HMT_low();
  185. if (is_shared_processor())
  186. splpar_rw_yield(rw);
  187. } while (unlikely(rw->lock < 0));
  188. HMT_medium();
  189. }
  190. }
  191. static inline void arch_write_lock(arch_rwlock_t *rw)
  192. {
  193. while (1) {
  194. if (likely(__arch_write_trylock(rw) == 0))
  195. break;
  196. do {
  197. HMT_low();
  198. if (is_shared_processor())
  199. splpar_rw_yield(rw);
  200. } while (unlikely(rw->lock != 0));
  201. HMT_medium();
  202. }
  203. }
  204. static inline int arch_read_trylock(arch_rwlock_t *rw)
  205. {
  206. return __arch_read_trylock(rw) > 0;
  207. }
  208. static inline int arch_write_trylock(arch_rwlock_t *rw)
  209. {
  210. return __arch_write_trylock(rw) == 0;
  211. }
  212. static inline void arch_read_unlock(arch_rwlock_t *rw)
  213. {
  214. long tmp;
  215. __asm__ __volatile__(
  216. "# read_unlock\n\t"
  217. PPC_RELEASE_BARRIER
  218. "1: lwarx %0,0,%1\n\
  219. addic %0,%0,-1\n"
  220. " stwcx. %0,0,%1\n\
  221. bne- 1b"
  222. : "=&r"(tmp)
  223. : "r"(&rw->lock)
  224. : "cr0", "xer", "memory");
  225. }
  226. static inline void arch_write_unlock(arch_rwlock_t *rw)
  227. {
  228. __asm__ __volatile__("# write_unlock\n\t"
  229. PPC_RELEASE_BARRIER: : :"memory");
  230. rw->lock = 0;
  231. }
  232. #define arch_spin_relax(lock) spin_yield(lock)
  233. #define arch_read_relax(lock) rw_yield(lock)
  234. #define arch_write_relax(lock) rw_yield(lock)
  235. #endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */