spinlock.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. */
  5. #ifndef __ASM_SPINLOCK_H
  6. #define __ASM_SPINLOCK_H
  7. #include <asm/spinlock_types.h>
  8. #include <asm/processor.h>
  9. #include <asm/barrier.h>
  10. #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
  11. #ifdef CONFIG_ARC_HAS_LLSC
  12. static inline void arch_spin_lock(arch_spinlock_t *lock)
  13. {
  14. unsigned int val;
  15. __asm__ __volatile__(
  16. "1: llock %[val], [%[slock]] \n"
  17. " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
  18. " scond %[LOCKED], [%[slock]] \n" /* acquire */
  19. " bnz 1b \n"
  20. " \n"
  21. : [val] "=&r" (val)
  22. : [slock] "r" (&(lock->slock)),
  23. [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
  24. : "memory", "cc");
  25. /*
  26. * ACQUIRE barrier to ensure load/store after taking the lock
  27. * don't "bleed-up" out of the critical section (leak-in is allowed)
  28. * http://www.spinics.net/lists/kernel/msg2010409.html
  29. *
  30. * ARCv2 only has load-load, store-store and all-all barrier
  31. * thus need the full all-all barrier
  32. */
  33. smp_mb();
  34. }
  35. /* 1 - lock taken successfully */
  36. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  37. {
  38. unsigned int val, got_it = 0;
  39. __asm__ __volatile__(
  40. "1: llock %[val], [%[slock]] \n"
  41. " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
  42. " scond %[LOCKED], [%[slock]] \n" /* acquire */
  43. " bnz 1b \n"
  44. " mov %[got_it], 1 \n"
  45. "4: \n"
  46. " \n"
  47. : [val] "=&r" (val),
  48. [got_it] "+&r" (got_it)
  49. : [slock] "r" (&(lock->slock)),
  50. [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
  51. : "memory", "cc");
  52. smp_mb();
  53. return got_it;
  54. }
  55. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  56. {
  57. smp_mb();
  58. WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
  59. }
  60. /*
  61. * Read-write spinlocks, allowing multiple readers but only one writer.
  62. * Unfair locking as Writers could be starved indefinitely by Reader(s)
  63. */
  64. static inline void arch_read_lock(arch_rwlock_t *rw)
  65. {
  66. unsigned int val;
  67. /*
  68. * zero means writer holds the lock exclusively, deny Reader.
  69. * Otherwise grant lock to first/subseq reader
  70. *
  71. * if (rw->counter > 0) {
  72. * rw->counter--;
  73. * ret = 1;
  74. * }
  75. */
  76. __asm__ __volatile__(
  77. "1: llock %[val], [%[rwlock]] \n"
  78. " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
  79. " sub %[val], %[val], 1 \n" /* reader lock */
  80. " scond %[val], [%[rwlock]] \n"
  81. " bnz 1b \n"
  82. " \n"
  83. : [val] "=&r" (val)
  84. : [rwlock] "r" (&(rw->counter)),
  85. [WR_LOCKED] "ir" (0)
  86. : "memory", "cc");
  87. smp_mb();
  88. }
  89. /* 1 - lock taken successfully */
  90. static inline int arch_read_trylock(arch_rwlock_t *rw)
  91. {
  92. unsigned int val, got_it = 0;
  93. __asm__ __volatile__(
  94. "1: llock %[val], [%[rwlock]] \n"
  95. " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
  96. " sub %[val], %[val], 1 \n" /* counter-- */
  97. " scond %[val], [%[rwlock]] \n"
  98. " bnz 1b \n" /* retry if collided with someone */
  99. " mov %[got_it], 1 \n"
  100. " \n"
  101. "4: ; --- done --- \n"
  102. : [val] "=&r" (val),
  103. [got_it] "+&r" (got_it)
  104. : [rwlock] "r" (&(rw->counter)),
  105. [WR_LOCKED] "ir" (0)
  106. : "memory", "cc");
  107. smp_mb();
  108. return got_it;
  109. }
  110. static inline void arch_write_lock(arch_rwlock_t *rw)
  111. {
  112. unsigned int val;
  113. /*
  114. * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
  115. * deny writer. Otherwise if unlocked grant to writer
  116. * Hence the claim that Linux rwlocks are unfair to writers.
  117. * (can be starved for an indefinite time by readers).
  118. *
  119. * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
  120. * rw->counter = 0;
  121. * ret = 1;
  122. * }
  123. */
  124. __asm__ __volatile__(
  125. "1: llock %[val], [%[rwlock]] \n"
  126. " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
  127. " mov %[val], %[WR_LOCKED] \n"
  128. " scond %[val], [%[rwlock]] \n"
  129. " bnz 1b \n"
  130. " \n"
  131. : [val] "=&r" (val)
  132. : [rwlock] "r" (&(rw->counter)),
  133. [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
  134. [WR_LOCKED] "ir" (0)
  135. : "memory", "cc");
  136. smp_mb();
  137. }
  138. /* 1 - lock taken successfully */
  139. static inline int arch_write_trylock(arch_rwlock_t *rw)
  140. {
  141. unsigned int val, got_it = 0;
  142. __asm__ __volatile__(
  143. "1: llock %[val], [%[rwlock]] \n"
  144. " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
  145. " mov %[val], %[WR_LOCKED] \n"
  146. " scond %[val], [%[rwlock]] \n"
  147. " bnz 1b \n" /* retry if collided with someone */
  148. " mov %[got_it], 1 \n"
  149. " \n"
  150. "4: ; --- done --- \n"
  151. : [val] "=&r" (val),
  152. [got_it] "+&r" (got_it)
  153. : [rwlock] "r" (&(rw->counter)),
  154. [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
  155. [WR_LOCKED] "ir" (0)
  156. : "memory", "cc");
  157. smp_mb();
  158. return got_it;
  159. }
  160. static inline void arch_read_unlock(arch_rwlock_t *rw)
  161. {
  162. unsigned int val;
  163. smp_mb();
  164. /*
  165. * rw->counter++;
  166. */
  167. __asm__ __volatile__(
  168. "1: llock %[val], [%[rwlock]] \n"
  169. " add %[val], %[val], 1 \n"
  170. " scond %[val], [%[rwlock]] \n"
  171. " bnz 1b \n"
  172. " \n"
  173. : [val] "=&r" (val)
  174. : [rwlock] "r" (&(rw->counter))
  175. : "memory", "cc");
  176. }
  177. static inline void arch_write_unlock(arch_rwlock_t *rw)
  178. {
  179. smp_mb();
  180. WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
  181. }
  182. #else /* !CONFIG_ARC_HAS_LLSC */
  183. static inline void arch_spin_lock(arch_spinlock_t *lock)
  184. {
  185. unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
  186. /*
  187. * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
  188. * for ACQ and REL semantics respectively. However EX based spinlocks
  189. * need the extra smp_mb to workaround a hardware quirk.
  190. */
  191. smp_mb();
  192. __asm__ __volatile__(
  193. "1: ex %0, [%1] \n"
  194. " breq %0, %2, 1b \n"
  195. : "+&r" (val)
  196. : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
  197. : "memory");
  198. smp_mb();
  199. }
  200. /* 1 - lock taken successfully */
  201. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  202. {
  203. unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
  204. smp_mb();
  205. __asm__ __volatile__(
  206. "1: ex %0, [%1] \n"
  207. : "+r" (val)
  208. : "r"(&(lock->slock))
  209. : "memory");
  210. smp_mb();
  211. return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
  212. }
  213. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  214. {
  215. unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
  216. /*
  217. * RELEASE barrier: given the instructions avail on ARCv2, full barrier
  218. * is the only option
  219. */
  220. smp_mb();
  221. /*
  222. * EX is not really required here, a simple STore of 0 suffices.
  223. * However this causes tasklist livelocks in SystemC based SMP virtual
  224. * platforms where the systemc core scheduler uses EX as a cue for
  225. * moving to next core. Do a git log of this file for details
  226. */
  227. __asm__ __volatile__(
  228. " ex %0, [%1] \n"
  229. : "+r" (val)
  230. : "r"(&(lock->slock))
  231. : "memory");
  232. /*
  233. * see pairing version/comment in arch_spin_lock above
  234. */
  235. smp_mb();
  236. }
  237. /*
  238. * Read-write spinlocks, allowing multiple readers but only one writer.
  239. * Unfair locking as Writers could be starved indefinitely by Reader(s)
  240. *
  241. * The spinlock itself is contained in @counter and access to it is
  242. * serialized with @lock_mutex.
  243. */
  244. /* 1 - lock taken successfully */
  245. static inline int arch_read_trylock(arch_rwlock_t *rw)
  246. {
  247. int ret = 0;
  248. unsigned long flags;
  249. local_irq_save(flags);
  250. arch_spin_lock(&(rw->lock_mutex));
  251. /*
  252. * zero means writer holds the lock exclusively, deny Reader.
  253. * Otherwise grant lock to first/subseq reader
  254. */
  255. if (rw->counter > 0) {
  256. rw->counter--;
  257. ret = 1;
  258. }
  259. arch_spin_unlock(&(rw->lock_mutex));
  260. local_irq_restore(flags);
  261. return ret;
  262. }
  263. /* 1 - lock taken successfully */
  264. static inline int arch_write_trylock(arch_rwlock_t *rw)
  265. {
  266. int ret = 0;
  267. unsigned long flags;
  268. local_irq_save(flags);
  269. arch_spin_lock(&(rw->lock_mutex));
  270. /*
  271. * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
  272. * deny writer. Otherwise if unlocked grant to writer
  273. * Hence the claim that Linux rwlocks are unfair to writers.
  274. * (can be starved for an indefinite time by readers).
  275. */
  276. if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
  277. rw->counter = 0;
  278. ret = 1;
  279. }
  280. arch_spin_unlock(&(rw->lock_mutex));
  281. local_irq_restore(flags);
  282. return ret;
  283. }
  284. static inline void arch_read_lock(arch_rwlock_t *rw)
  285. {
  286. while (!arch_read_trylock(rw))
  287. cpu_relax();
  288. }
  289. static inline void arch_write_lock(arch_rwlock_t *rw)
  290. {
  291. while (!arch_write_trylock(rw))
  292. cpu_relax();
  293. }
  294. static inline void arch_read_unlock(arch_rwlock_t *rw)
  295. {
  296. unsigned long flags;
  297. local_irq_save(flags);
  298. arch_spin_lock(&(rw->lock_mutex));
  299. rw->counter++;
  300. arch_spin_unlock(&(rw->lock_mutex));
  301. local_irq_restore(flags);
  302. }
  303. static inline void arch_write_unlock(arch_rwlock_t *rw)
  304. {
  305. unsigned long flags;
  306. local_irq_save(flags);
  307. arch_spin_lock(&(rw->lock_mutex));
  308. rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
  309. arch_spin_unlock(&(rw->lock_mutex));
  310. local_irq_restore(flags);
  311. }
  312. #endif
  313. #endif /* __ASM_SPINLOCK_H */