spinlock-llsc.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /* SPDX-License-Identifier: GPL-2.0
  2. *
  3. * include/asm-sh/spinlock-llsc.h
  4. *
  5. * Copyright (C) 2002, 2003 Paul Mundt
  6. * Copyright (C) 2006, 2007 Akio Idehara
  7. */
  8. #ifndef __ASM_SH_SPINLOCK_LLSC_H
  9. #define __ASM_SH_SPINLOCK_LLSC_H
  10. #include <asm/barrier.h>
  11. #include <asm/processor.h>
  12. /*
  13. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  14. */
  15. #define arch_spin_is_locked(x) ((x)->lock <= 0)
  16. /*
  17. * Simple spin lock operations. There are two variants, one clears IRQ's
  18. * on the local processor, one does not.
  19. *
  20. * We make no fairness assumptions. They have a cost.
  21. */
  22. static inline void arch_spin_lock(arch_spinlock_t *lock)
  23. {
  24. unsigned long tmp;
  25. unsigned long oldval;
  26. __asm__ __volatile__ (
  27. "1: \n\t"
  28. "movli.l @%2, %0 ! arch_spin_lock \n\t"
  29. "mov %0, %1 \n\t"
  30. "mov #0, %0 \n\t"
  31. "movco.l %0, @%2 \n\t"
  32. "bf 1b \n\t"
  33. "cmp/pl %1 \n\t"
  34. "bf 1b \n\t"
  35. : "=&z" (tmp), "=&r" (oldval)
  36. : "r" (&lock->lock)
  37. : "t", "memory"
  38. );
  39. }
  40. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  41. {
  42. unsigned long tmp;
  43. /* This could be optimised with ARCH_HAS_MMIOWB */
  44. mmiowb();
  45. __asm__ __volatile__ (
  46. "mov #1, %0 ! arch_spin_unlock \n\t"
  47. "mov.l %0, @%1 \n\t"
  48. : "=&z" (tmp)
  49. : "r" (&lock->lock)
  50. : "t", "memory"
  51. );
  52. }
  53. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  54. {
  55. unsigned long tmp, oldval;
  56. __asm__ __volatile__ (
  57. "1: \n\t"
  58. "movli.l @%2, %0 ! arch_spin_trylock \n\t"
  59. "mov %0, %1 \n\t"
  60. "mov #0, %0 \n\t"
  61. "movco.l %0, @%2 \n\t"
  62. "bf 1b \n\t"
  63. "synco \n\t"
  64. : "=&z" (tmp), "=&r" (oldval)
  65. : "r" (&lock->lock)
  66. : "t", "memory"
  67. );
  68. return oldval;
  69. }
  70. /*
  71. * Read-write spinlocks, allowing multiple readers but only one writer.
  72. *
  73. * NOTE! it is quite common to have readers in interrupts but no interrupt
  74. * writers. For those circumstances we can "mix" irq-safe locks - any writer
  75. * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  76. * read-locks.
  77. */
  78. static inline void arch_read_lock(arch_rwlock_t *rw)
  79. {
  80. unsigned long tmp;
  81. __asm__ __volatile__ (
  82. "1: \n\t"
  83. "movli.l @%1, %0 ! arch_read_lock \n\t"
  84. "cmp/pl %0 \n\t"
  85. "bf 1b \n\t"
  86. "add #-1, %0 \n\t"
  87. "movco.l %0, @%1 \n\t"
  88. "bf 1b \n\t"
  89. : "=&z" (tmp)
  90. : "r" (&rw->lock)
  91. : "t", "memory"
  92. );
  93. }
  94. static inline void arch_read_unlock(arch_rwlock_t *rw)
  95. {
  96. unsigned long tmp;
  97. __asm__ __volatile__ (
  98. "1: \n\t"
  99. "movli.l @%1, %0 ! arch_read_unlock \n\t"
  100. "add #1, %0 \n\t"
  101. "movco.l %0, @%1 \n\t"
  102. "bf 1b \n\t"
  103. : "=&z" (tmp)
  104. : "r" (&rw->lock)
  105. : "t", "memory"
  106. );
  107. }
  108. static inline void arch_write_lock(arch_rwlock_t *rw)
  109. {
  110. unsigned long tmp;
  111. __asm__ __volatile__ (
  112. "1: \n\t"
  113. "movli.l @%1, %0 ! arch_write_lock \n\t"
  114. "cmp/hs %2, %0 \n\t"
  115. "bf 1b \n\t"
  116. "sub %2, %0 \n\t"
  117. "movco.l %0, @%1 \n\t"
  118. "bf 1b \n\t"
  119. : "=&z" (tmp)
  120. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  121. : "t", "memory"
  122. );
  123. }
  124. static inline void arch_write_unlock(arch_rwlock_t *rw)
  125. {
  126. __asm__ __volatile__ (
  127. "mov.l %1, @%0 ! arch_write_unlock \n\t"
  128. :
  129. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  130. : "t", "memory"
  131. );
  132. }
  133. static inline int arch_read_trylock(arch_rwlock_t *rw)
  134. {
  135. unsigned long tmp, oldval;
  136. __asm__ __volatile__ (
  137. "1: \n\t"
  138. "movli.l @%2, %0 ! arch_read_trylock \n\t"
  139. "mov %0, %1 \n\t"
  140. "cmp/pl %0 \n\t"
  141. "bf 2f \n\t"
  142. "add #-1, %0 \n\t"
  143. "movco.l %0, @%2 \n\t"
  144. "bf 1b \n\t"
  145. "2: \n\t"
  146. "synco \n\t"
  147. : "=&z" (tmp), "=&r" (oldval)
  148. : "r" (&rw->lock)
  149. : "t", "memory"
  150. );
  151. return (oldval > 0);
  152. }
  153. static inline int arch_write_trylock(arch_rwlock_t *rw)
  154. {
  155. unsigned long tmp, oldval;
  156. __asm__ __volatile__ (
  157. "1: \n\t"
  158. "movli.l @%2, %0 ! arch_write_trylock \n\t"
  159. "mov %0, %1 \n\t"
  160. "cmp/hs %3, %0 \n\t"
  161. "bf 2f \n\t"
  162. "sub %3, %0 \n\t"
  163. "2: \n\t"
  164. "movco.l %0, @%2 \n\t"
  165. "bf 1b \n\t"
  166. "synco \n\t"
  167. : "=&z" (tmp), "=&r" (oldval)
  168. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  169. : "t", "memory"
  170. );
  171. return (oldval > (RW_LOCK_BIAS - 1));
  172. }
  173. #endif /* __ASM_SH_SPINLOCK_LLSC_H */