atomic_lse.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Based on arch/arm/include/asm/atomic.h
  4. *
  5. * Copyright (C) 1996 Russell King.
  6. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  7. * Copyright (C) 2012 ARM Ltd.
  8. */
  9. #ifndef __ASM_ATOMIC_LSE_H
  10. #define __ASM_ATOMIC_LSE_H
  11. #define ATOMIC_OP(op, asm_op) \
  12. static __always_inline void \
  13. __lse_atomic_##op(int i, atomic_t *v) \
  14. { \
  15. asm volatile( \
  16. __LSE_PREAMBLE \
  17. " " #asm_op " %w[i], %[v]\n" \
  18. : [v] "+Q" (v->counter) \
  19. : [i] "r" (i)); \
  20. }
  21. ATOMIC_OP(andnot, stclr)
  22. ATOMIC_OP(or, stset)
  23. ATOMIC_OP(xor, steor)
  24. ATOMIC_OP(add, stadd)
  25. static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
  26. {
  27. __lse_atomic_add(-i, v);
  28. }
  29. #undef ATOMIC_OP
  30. #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
  31. static __always_inline int \
  32. __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
  33. { \
  34. int old; \
  35. \
  36. asm volatile( \
  37. __LSE_PREAMBLE \
  38. " " #asm_op #mb " %w[i], %w[old], %[v]" \
  39. : [v] "+Q" (v->counter), \
  40. [old] "=r" (old) \
  41. : [i] "r" (i) \
  42. : cl); \
  43. \
  44. return old; \
  45. }
  46. #define ATOMIC_FETCH_OPS(op, asm_op) \
  47. ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
  48. ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
  49. ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
  50. ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
  51. ATOMIC_FETCH_OPS(andnot, ldclr)
  52. ATOMIC_FETCH_OPS(or, ldset)
  53. ATOMIC_FETCH_OPS(xor, ldeor)
  54. ATOMIC_FETCH_OPS(add, ldadd)
  55. #undef ATOMIC_FETCH_OP
  56. #undef ATOMIC_FETCH_OPS
  57. #define ATOMIC_FETCH_OP_SUB(name) \
  58. static __always_inline int \
  59. __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
  60. { \
  61. return __lse_atomic_fetch_add##name(-i, v); \
  62. }
  63. ATOMIC_FETCH_OP_SUB(_relaxed)
  64. ATOMIC_FETCH_OP_SUB(_acquire)
  65. ATOMIC_FETCH_OP_SUB(_release)
  66. ATOMIC_FETCH_OP_SUB( )
  67. #undef ATOMIC_FETCH_OP_SUB
  68. #define ATOMIC_OP_ADD_SUB_RETURN(name) \
  69. static __always_inline int \
  70. __lse_atomic_add_return##name(int i, atomic_t *v) \
  71. { \
  72. return __lse_atomic_fetch_add##name(i, v) + i; \
  73. } \
  74. \
  75. static __always_inline int \
  76. __lse_atomic_sub_return##name(int i, atomic_t *v) \
  77. { \
  78. return __lse_atomic_fetch_sub(i, v) - i; \
  79. }
  80. ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
  81. ATOMIC_OP_ADD_SUB_RETURN(_acquire)
  82. ATOMIC_OP_ADD_SUB_RETURN(_release)
  83. ATOMIC_OP_ADD_SUB_RETURN( )
  84. #undef ATOMIC_OP_ADD_SUB_RETURN
  85. static __always_inline void __lse_atomic_and(int i, atomic_t *v)
  86. {
  87. return __lse_atomic_andnot(~i, v);
  88. }
  89. #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
  90. static __always_inline int \
  91. __lse_atomic_fetch_and##name(int i, atomic_t *v) \
  92. { \
  93. return __lse_atomic_fetch_andnot##name(~i, v); \
  94. }
  95. ATOMIC_FETCH_OP_AND(_relaxed, )
  96. ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
  97. ATOMIC_FETCH_OP_AND(_release, l, "memory")
  98. ATOMIC_FETCH_OP_AND( , al, "memory")
  99. #undef ATOMIC_FETCH_OP_AND
  100. #define ATOMIC64_OP(op, asm_op) \
  101. static __always_inline void \
  102. __lse_atomic64_##op(s64 i, atomic64_t *v) \
  103. { \
  104. asm volatile( \
  105. __LSE_PREAMBLE \
  106. " " #asm_op " %[i], %[v]\n" \
  107. : [v] "+Q" (v->counter) \
  108. : [i] "r" (i)); \
  109. }
  110. ATOMIC64_OP(andnot, stclr)
  111. ATOMIC64_OP(or, stset)
  112. ATOMIC64_OP(xor, steor)
  113. ATOMIC64_OP(add, stadd)
  114. static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
  115. {
  116. __lse_atomic64_add(-i, v);
  117. }
  118. #undef ATOMIC64_OP
  119. #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
  120. static __always_inline long \
  121. __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
  122. { \
  123. s64 old; \
  124. \
  125. asm volatile( \
  126. __LSE_PREAMBLE \
  127. " " #asm_op #mb " %[i], %[old], %[v]" \
  128. : [v] "+Q" (v->counter), \
  129. [old] "=r" (old) \
  130. : [i] "r" (i) \
  131. : cl); \
  132. \
  133. return old; \
  134. }
  135. #define ATOMIC64_FETCH_OPS(op, asm_op) \
  136. ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
  137. ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
  138. ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
  139. ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
  140. ATOMIC64_FETCH_OPS(andnot, ldclr)
  141. ATOMIC64_FETCH_OPS(or, ldset)
  142. ATOMIC64_FETCH_OPS(xor, ldeor)
  143. ATOMIC64_FETCH_OPS(add, ldadd)
  144. #undef ATOMIC64_FETCH_OP
  145. #undef ATOMIC64_FETCH_OPS
  146. #define ATOMIC64_FETCH_OP_SUB(name) \
  147. static __always_inline long \
  148. __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
  149. { \
  150. return __lse_atomic64_fetch_add##name(-i, v); \
  151. }
  152. ATOMIC64_FETCH_OP_SUB(_relaxed)
  153. ATOMIC64_FETCH_OP_SUB(_acquire)
  154. ATOMIC64_FETCH_OP_SUB(_release)
  155. ATOMIC64_FETCH_OP_SUB( )
  156. #undef ATOMIC64_FETCH_OP_SUB
  157. #define ATOMIC64_OP_ADD_SUB_RETURN(name) \
  158. static __always_inline long \
  159. __lse_atomic64_add_return##name(s64 i, atomic64_t *v) \
  160. { \
  161. return __lse_atomic64_fetch_add##name(i, v) + i; \
  162. } \
  163. \
  164. static __always_inline long \
  165. __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
  166. { \
  167. return __lse_atomic64_fetch_sub##name(i, v) - i; \
  168. }
  169. ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
  170. ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
  171. ATOMIC64_OP_ADD_SUB_RETURN(_release)
  172. ATOMIC64_OP_ADD_SUB_RETURN( )
  173. #undef ATOMIC64_OP_ADD_SUB_RETURN
  174. static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
  175. {
  176. return __lse_atomic64_andnot(~i, v);
  177. }
  178. #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
  179. static __always_inline long \
  180. __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
  181. { \
  182. return __lse_atomic64_fetch_andnot##name(~i, v); \
  183. }
  184. ATOMIC64_FETCH_OP_AND(_relaxed, )
  185. ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
  186. ATOMIC64_FETCH_OP_AND(_release, l, "memory")
  187. ATOMIC64_FETCH_OP_AND( , al, "memory")
  188. #undef ATOMIC64_FETCH_OP_AND
  189. static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
  190. {
  191. unsigned long tmp;
  192. asm volatile(
  193. __LSE_PREAMBLE
  194. "1: ldr %x[tmp], %[v]\n"
  195. " subs %[ret], %x[tmp], #1\n"
  196. " b.lt 2f\n"
  197. " casal %x[tmp], %[ret], %[v]\n"
  198. " sub %x[tmp], %x[tmp], #1\n"
  199. " sub %x[tmp], %x[tmp], %[ret]\n"
  200. " cbnz %x[tmp], 1b\n"
  201. "2:"
  202. : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
  203. :
  204. : "cc", "memory");
  205. return (long)v;
  206. }
  207. #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
  208. static __always_inline u##sz \
  209. __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
  210. u##sz old, \
  211. u##sz new) \
  212. { \
  213. register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
  214. register u##sz x1 asm ("x1") = old; \
  215. register u##sz x2 asm ("x2") = new; \
  216. unsigned long tmp; \
  217. \
  218. asm volatile( \
  219. __LSE_PREAMBLE \
  220. " mov %" #w "[tmp], %" #w "[old]\n" \
  221. " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
  222. " mov %" #w "[ret], %" #w "[tmp]" \
  223. : [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr), \
  224. [tmp] "=&r" (tmp) \
  225. : [old] "r" (x1), [new] "r" (x2) \
  226. : cl); \
  227. \
  228. return x0; \
  229. }
  230. __CMPXCHG_CASE(w, b, , 8, )
  231. __CMPXCHG_CASE(w, h, , 16, )
  232. __CMPXCHG_CASE(w, , , 32, )
  233. __CMPXCHG_CASE(x, , , 64, )
  234. __CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
  235. __CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
  236. __CMPXCHG_CASE(w, , acq_, 32, a, "memory")
  237. __CMPXCHG_CASE(x, , acq_, 64, a, "memory")
  238. __CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
  239. __CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
  240. __CMPXCHG_CASE(w, , rel_, 32, l, "memory")
  241. __CMPXCHG_CASE(x, , rel_, 64, l, "memory")
  242. __CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
  243. __CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
  244. __CMPXCHG_CASE(w, , mb_, 32, al, "memory")
  245. __CMPXCHG_CASE(x, , mb_, 64, al, "memory")
  246. #undef __CMPXCHG_CASE
  247. #define __CMPXCHG_DBL(name, mb, cl...) \
  248. static __always_inline long \
  249. __lse__cmpxchg_double##name(unsigned long old1, \
  250. unsigned long old2, \
  251. unsigned long new1, \
  252. unsigned long new2, \
  253. volatile void *ptr) \
  254. { \
  255. unsigned long oldval1 = old1; \
  256. unsigned long oldval2 = old2; \
  257. register unsigned long x0 asm ("x0") = old1; \
  258. register unsigned long x1 asm ("x1") = old2; \
  259. register unsigned long x2 asm ("x2") = new1; \
  260. register unsigned long x3 asm ("x3") = new2; \
  261. register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
  262. \
  263. asm volatile( \
  264. __LSE_PREAMBLE \
  265. " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
  266. " eor %[old1], %[old1], %[oldval1]\n" \
  267. " eor %[old2], %[old2], %[oldval2]\n" \
  268. " orr %[old1], %[old1], %[old2]" \
  269. : [old1] "+&r" (x0), [old2] "+&r" (x1), \
  270. [v] "+Q" (*(__uint128_t *)ptr) \
  271. : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
  272. [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
  273. : cl); \
  274. \
  275. return x0; \
  276. }
  277. __CMPXCHG_DBL( , )
  278. __CMPXCHG_DBL(_mb, al, "memory")
  279. #undef __CMPXCHG_DBL
  280. #endif /* __ASM_ATOMIC_LSE_H */