atomic.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_CSKY_ATOMIC_H
  3. #define __ASM_CSKY_ATOMIC_H
  4. #ifdef CONFIG_SMP
  5. #include <asm-generic/atomic64.h>
  6. #include <asm/cmpxchg.h>
  7. #include <asm/barrier.h>
  8. #define __atomic_acquire_fence() __bar_brarw()
  9. #define __atomic_release_fence() __bar_brwaw()
  10. static __always_inline int arch_atomic_read(const atomic_t *v)
  11. {
  12. return READ_ONCE(v->counter);
  13. }
  14. static __always_inline void arch_atomic_set(atomic_t *v, int i)
  15. {
  16. WRITE_ONCE(v->counter, i);
  17. }
  18. #define ATOMIC_OP(op) \
  19. static __always_inline \
  20. void arch_atomic_##op(int i, atomic_t *v) \
  21. { \
  22. unsigned long tmp; \
  23. __asm__ __volatile__ ( \
  24. "1: ldex.w %0, (%2) \n" \
  25. " " #op " %0, %1 \n" \
  26. " stex.w %0, (%2) \n" \
  27. " bez %0, 1b \n" \
  28. : "=&r" (tmp) \
  29. : "r" (i), "r" (&v->counter) \
  30. : "memory"); \
  31. }
  32. ATOMIC_OP(add)
  33. ATOMIC_OP(sub)
  34. ATOMIC_OP(and)
  35. ATOMIC_OP( or)
  36. ATOMIC_OP(xor)
  37. #undef ATOMIC_OP
  38. #define ATOMIC_FETCH_OP(op) \
  39. static __always_inline \
  40. int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
  41. { \
  42. register int ret, tmp; \
  43. __asm__ __volatile__ ( \
  44. "1: ldex.w %0, (%3) \n" \
  45. " mov %1, %0 \n" \
  46. " " #op " %0, %2 \n" \
  47. " stex.w %0, (%3) \n" \
  48. " bez %0, 1b \n" \
  49. : "=&r" (tmp), "=&r" (ret) \
  50. : "r" (i), "r"(&v->counter) \
  51. : "memory"); \
  52. return ret; \
  53. }
  54. #define ATOMIC_OP_RETURN(op, c_op) \
  55. static __always_inline \
  56. int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
  57. { \
  58. return arch_atomic_fetch_##op##_relaxed(i, v) c_op i; \
  59. }
  60. #define ATOMIC_OPS(op, c_op) \
  61. ATOMIC_FETCH_OP(op) \
  62. ATOMIC_OP_RETURN(op, c_op)
  63. ATOMIC_OPS(add, +)
  64. ATOMIC_OPS(sub, -)
  65. #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
  66. #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
  67. #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
  68. #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
  69. #undef ATOMIC_OPS
  70. #undef ATOMIC_OP_RETURN
  71. #define ATOMIC_OPS(op) \
  72. ATOMIC_FETCH_OP(op)
  73. ATOMIC_OPS(and)
  74. ATOMIC_OPS( or)
  75. ATOMIC_OPS(xor)
  76. #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
  77. #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
  78. #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
  79. #undef ATOMIC_OPS
  80. #undef ATOMIC_FETCH_OP
  81. static __always_inline int
  82. arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
  83. {
  84. int prev, tmp;
  85. __asm__ __volatile__ (
  86. RELEASE_FENCE
  87. "1: ldex.w %0, (%3) \n"
  88. " cmpne %0, %4 \n"
  89. " bf 2f \n"
  90. " mov %1, %0 \n"
  91. " add %1, %2 \n"
  92. " stex.w %1, (%3) \n"
  93. " bez %1, 1b \n"
  94. FULL_FENCE
  95. "2:\n"
  96. : "=&r" (prev), "=&r" (tmp)
  97. : "r" (a), "r" (&v->counter), "r" (u)
  98. : "memory");
  99. return prev;
  100. }
  101. #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
  102. static __always_inline bool
  103. arch_atomic_inc_unless_negative(atomic_t *v)
  104. {
  105. int rc, tmp;
  106. __asm__ __volatile__ (
  107. RELEASE_FENCE
  108. "1: ldex.w %0, (%2) \n"
  109. " movi %1, 0 \n"
  110. " blz %0, 2f \n"
  111. " movi %1, 1 \n"
  112. " addi %0, 1 \n"
  113. " stex.w %0, (%2) \n"
  114. " bez %0, 1b \n"
  115. FULL_FENCE
  116. "2:\n"
  117. : "=&r" (tmp), "=&r" (rc)
  118. : "r" (&v->counter)
  119. : "memory");
  120. return tmp ? true : false;
  121. }
  122. #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
  123. static __always_inline bool
  124. arch_atomic_dec_unless_positive(atomic_t *v)
  125. {
  126. int rc, tmp;
  127. __asm__ __volatile__ (
  128. RELEASE_FENCE
  129. "1: ldex.w %0, (%2) \n"
  130. " movi %1, 0 \n"
  131. " bhz %0, 2f \n"
  132. " movi %1, 1 \n"
  133. " subi %0, 1 \n"
  134. " stex.w %0, (%2) \n"
  135. " bez %0, 1b \n"
  136. FULL_FENCE
  137. "2:\n"
  138. : "=&r" (tmp), "=&r" (rc)
  139. : "r" (&v->counter)
  140. : "memory");
  141. return tmp ? true : false;
  142. }
  143. #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
  144. static __always_inline int
  145. arch_atomic_dec_if_positive(atomic_t *v)
  146. {
  147. int dec, tmp;
  148. __asm__ __volatile__ (
  149. RELEASE_FENCE
  150. "1: ldex.w %0, (%2) \n"
  151. " subi %1, %0, 1 \n"
  152. " blz %1, 2f \n"
  153. " stex.w %1, (%2) \n"
  154. " bez %1, 1b \n"
  155. FULL_FENCE
  156. "2:\n"
  157. : "=&r" (dec), "=&r" (tmp)
  158. : "r" (&v->counter)
  159. : "memory");
  160. return dec - 1;
  161. }
  162. #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
  163. #define ATOMIC_OP() \
  164. static __always_inline \
  165. int arch_atomic_xchg_relaxed(atomic_t *v, int n) \
  166. { \
  167. return __xchg_relaxed(n, &(v->counter), 4); \
  168. } \
  169. static __always_inline \
  170. int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) \
  171. { \
  172. return __cmpxchg_relaxed(&(v->counter), o, n, 4); \
  173. } \
  174. static __always_inline \
  175. int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) \
  176. { \
  177. return __cmpxchg_acquire(&(v->counter), o, n, 4); \
  178. } \
  179. static __always_inline \
  180. int arch_atomic_cmpxchg(atomic_t *v, int o, int n) \
  181. { \
  182. return __cmpxchg(&(v->counter), o, n, 4); \
  183. }
  184. #define ATOMIC_OPS() \
  185. ATOMIC_OP()
  186. ATOMIC_OPS()
  187. #define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
  188. #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
  189. #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
  190. #define arch_atomic_cmpxchg arch_atomic_cmpxchg
  191. #undef ATOMIC_OPS
  192. #undef ATOMIC_OP
  193. #else
  194. #include <asm-generic/atomic.h>
  195. #endif
  196. #endif /* __ASM_CSKY_ATOMIC_H */