atomic.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ALPHA_ATOMIC_H
  3. #define _ALPHA_ATOMIC_H
  4. #include <linux/types.h>
  5. #include <asm/barrier.h>
  6. #include <asm/cmpxchg.h>
  7. /*
  8. * Atomic operations that C can't guarantee us. Useful for
  9. * resource counting etc...
  10. *
  11. * But use these as seldom as possible since they are much slower
  12. * than regular operations.
  13. */
  14. /*
  15. * To ensure dependency ordering is preserved for the _relaxed and
  16. * _release atomics, an smp_mb() is unconditionally inserted into the
  17. * _relaxed variants, which are used to build the barriered versions.
  18. * Avoid redundant back-to-back fences in the _acquire and _fence
  19. * versions.
  20. */
  21. #define __atomic_acquire_fence()
  22. #define __atomic_post_full_fence()
  23. #define ATOMIC64_INIT(i) { (i) }
  24. #define arch_atomic_read(v) READ_ONCE((v)->counter)
  25. #define arch_atomic64_read(v) READ_ONCE((v)->counter)
  26. #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
  27. #define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
  28. /*
  29. * To get proper branch prediction for the main line, we must branch
  30. * forward to code at the end of this object's .text section, then
  31. * branch back to restart the operation.
  32. */
  33. #define ATOMIC_OP(op, asm_op) \
  34. static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
  35. { \
  36. unsigned long temp; \
  37. __asm__ __volatile__( \
  38. "1: ldl_l %0,%1\n" \
  39. " " #asm_op " %0,%2,%0\n" \
  40. " stl_c %0,%1\n" \
  41. " beq %0,2f\n" \
  42. ".subsection 2\n" \
  43. "2: br 1b\n" \
  44. ".previous" \
  45. :"=&r" (temp), "=m" (v->counter) \
  46. :"Ir" (i), "m" (v->counter)); \
  47. } \
  48. #define ATOMIC_OP_RETURN(op, asm_op) \
  49. static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
  50. { \
  51. long temp, result; \
  52. __asm__ __volatile__( \
  53. "1: ldl_l %0,%1\n" \
  54. " " #asm_op " %0,%3,%2\n" \
  55. " " #asm_op " %0,%3,%0\n" \
  56. " stl_c %0,%1\n" \
  57. " beq %0,2f\n" \
  58. ".subsection 2\n" \
  59. "2: br 1b\n" \
  60. ".previous" \
  61. :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
  62. :"Ir" (i), "m" (v->counter) : "memory"); \
  63. smp_mb(); \
  64. return result; \
  65. }
  66. #define ATOMIC_FETCH_OP(op, asm_op) \
  67. static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
  68. { \
  69. long temp, result; \
  70. __asm__ __volatile__( \
  71. "1: ldl_l %2,%1\n" \
  72. " " #asm_op " %2,%3,%0\n" \
  73. " stl_c %0,%1\n" \
  74. " beq %0,2f\n" \
  75. ".subsection 2\n" \
  76. "2: br 1b\n" \
  77. ".previous" \
  78. :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
  79. :"Ir" (i), "m" (v->counter) : "memory"); \
  80. smp_mb(); \
  81. return result; \
  82. }
  83. #define ATOMIC64_OP(op, asm_op) \
  84. static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
  85. { \
  86. s64 temp; \
  87. __asm__ __volatile__( \
  88. "1: ldq_l %0,%1\n" \
  89. " " #asm_op " %0,%2,%0\n" \
  90. " stq_c %0,%1\n" \
  91. " beq %0,2f\n" \
  92. ".subsection 2\n" \
  93. "2: br 1b\n" \
  94. ".previous" \
  95. :"=&r" (temp), "=m" (v->counter) \
  96. :"Ir" (i), "m" (v->counter)); \
  97. } \
  98. #define ATOMIC64_OP_RETURN(op, asm_op) \
  99. static __inline__ s64 \
  100. arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
  101. { \
  102. s64 temp, result; \
  103. __asm__ __volatile__( \
  104. "1: ldq_l %0,%1\n" \
  105. " " #asm_op " %0,%3,%2\n" \
  106. " " #asm_op " %0,%3,%0\n" \
  107. " stq_c %0,%1\n" \
  108. " beq %0,2f\n" \
  109. ".subsection 2\n" \
  110. "2: br 1b\n" \
  111. ".previous" \
  112. :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
  113. :"Ir" (i), "m" (v->counter) : "memory"); \
  114. smp_mb(); \
  115. return result; \
  116. }
  117. #define ATOMIC64_FETCH_OP(op, asm_op) \
  118. static __inline__ s64 \
  119. arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
  120. { \
  121. s64 temp, result; \
  122. __asm__ __volatile__( \
  123. "1: ldq_l %2,%1\n" \
  124. " " #asm_op " %2,%3,%0\n" \
  125. " stq_c %0,%1\n" \
  126. " beq %0,2f\n" \
  127. ".subsection 2\n" \
  128. "2: br 1b\n" \
  129. ".previous" \
  130. :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
  131. :"Ir" (i), "m" (v->counter) : "memory"); \
  132. smp_mb(); \
  133. return result; \
  134. }
  135. #define ATOMIC_OPS(op) \
  136. ATOMIC_OP(op, op##l) \
  137. ATOMIC_OP_RETURN(op, op##l) \
  138. ATOMIC_FETCH_OP(op, op##l) \
  139. ATOMIC64_OP(op, op##q) \
  140. ATOMIC64_OP_RETURN(op, op##q) \
  141. ATOMIC64_FETCH_OP(op, op##q)
  142. ATOMIC_OPS(add)
  143. ATOMIC_OPS(sub)
  144. #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
  145. #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
  146. #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
  147. #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
  148. #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
  149. #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
  150. #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
  151. #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
  152. #define arch_atomic_andnot arch_atomic_andnot
  153. #define arch_atomic64_andnot arch_atomic64_andnot
  154. #undef ATOMIC_OPS
  155. #define ATOMIC_OPS(op, asm) \
  156. ATOMIC_OP(op, asm) \
  157. ATOMIC_FETCH_OP(op, asm) \
  158. ATOMIC64_OP(op, asm) \
  159. ATOMIC64_FETCH_OP(op, asm)
  160. ATOMIC_OPS(and, and)
  161. ATOMIC_OPS(andnot, bic)
  162. ATOMIC_OPS(or, bis)
  163. ATOMIC_OPS(xor, xor)
  164. #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
  165. #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
  166. #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
  167. #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
  168. #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
  169. #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
  170. #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
  171. #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
  172. #undef ATOMIC_OPS
  173. #undef ATOMIC64_FETCH_OP
  174. #undef ATOMIC64_OP_RETURN
  175. #undef ATOMIC64_OP
  176. #undef ATOMIC_FETCH_OP
  177. #undef ATOMIC_OP_RETURN
  178. #undef ATOMIC_OP
  179. #define arch_atomic64_cmpxchg(v, old, new) \
  180. (arch_cmpxchg(&((v)->counter), old, new))
  181. #define arch_atomic64_xchg(v, new) \
  182. (arch_xchg(&((v)->counter), new))
  183. #define arch_atomic_cmpxchg(v, old, new) \
  184. (arch_cmpxchg(&((v)->counter), old, new))
  185. #define arch_atomic_xchg(v, new) \
  186. (arch_xchg(&((v)->counter), new))
  187. /**
  188. * arch_atomic_fetch_add_unless - add unless the number is a given value
  189. * @v: pointer of type atomic_t
  190. * @a: the amount to add to v...
  191. * @u: ...unless v is equal to u.
  192. *
  193. * Atomically adds @a to @v, so long as it was not @u.
  194. * Returns the old value of @v.
  195. */
  196. static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
  197. {
  198. int c, new, old;
  199. smp_mb();
  200. __asm__ __volatile__(
  201. "1: ldl_l %[old],%[mem]\n"
  202. " cmpeq %[old],%[u],%[c]\n"
  203. " addl %[old],%[a],%[new]\n"
  204. " bne %[c],2f\n"
  205. " stl_c %[new],%[mem]\n"
  206. " beq %[new],3f\n"
  207. "2:\n"
  208. ".subsection 2\n"
  209. "3: br 1b\n"
  210. ".previous"
  211. : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
  212. : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
  213. : "memory");
  214. smp_mb();
  215. return old;
  216. }
  217. #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
  218. /**
  219. * arch_atomic64_fetch_add_unless - add unless the number is a given value
  220. * @v: pointer of type atomic64_t
  221. * @a: the amount to add to v...
  222. * @u: ...unless v is equal to u.
  223. *
  224. * Atomically adds @a to @v, so long as it was not @u.
  225. * Returns the old value of @v.
  226. */
  227. static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
  228. {
  229. s64 c, new, old;
  230. smp_mb();
  231. __asm__ __volatile__(
  232. "1: ldq_l %[old],%[mem]\n"
  233. " cmpeq %[old],%[u],%[c]\n"
  234. " addq %[old],%[a],%[new]\n"
  235. " bne %[c],2f\n"
  236. " stq_c %[new],%[mem]\n"
  237. " beq %[new],3f\n"
  238. "2:\n"
  239. ".subsection 2\n"
  240. "3: br 1b\n"
  241. ".previous"
  242. : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
  243. : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
  244. : "memory");
  245. smp_mb();
  246. return old;
  247. }
  248. #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
  249. /*
  250. * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
  251. * @v: pointer of type atomic_t
  252. *
  253. * The function returns the old value of *v minus 1, even if
  254. * the atomic variable, v, was not decremented.
  255. */
  256. static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
  257. {
  258. s64 old, tmp;
  259. smp_mb();
  260. __asm__ __volatile__(
  261. "1: ldq_l %[old],%[mem]\n"
  262. " subq %[old],1,%[tmp]\n"
  263. " ble %[old],2f\n"
  264. " stq_c %[tmp],%[mem]\n"
  265. " beq %[tmp],3f\n"
  266. "2:\n"
  267. ".subsection 2\n"
  268. "3: br 1b\n"
  269. ".previous"
  270. : [old] "=&r"(old), [tmp] "=&r"(tmp)
  271. : [mem] "m"(*v)
  272. : "memory");
  273. smp_mb();
  274. return old - 1;
  275. }
  276. #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
  277. #endif /* _ALPHA_ATOMIC_H */