atomic.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Copyright (C) 2012 Regents of the University of California
  5. * Copyright (C) 2017 SiFive
  6. */
  7. #ifndef _ASM_RISCV_ATOMIC_H
  8. #define _ASM_RISCV_ATOMIC_H
  9. #ifdef CONFIG_GENERIC_ATOMIC64
  10. # include <asm-generic/atomic64.h>
  11. #else
  12. # if (__riscv_xlen < 64)
  13. # error "64-bit atomics require XLEN to be at least 64"
  14. # endif
  15. #endif
  16. #include <asm/cmpxchg.h>
  17. #include <asm/barrier.h>
  18. #define __atomic_acquire_fence() \
  19. __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
  20. #define __atomic_release_fence() \
  21. __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
  22. static __always_inline int arch_atomic_read(const atomic_t *v)
  23. {
  24. return READ_ONCE(v->counter);
  25. }
  26. static __always_inline void arch_atomic_set(atomic_t *v, int i)
  27. {
  28. WRITE_ONCE(v->counter, i);
  29. }
  30. #ifndef CONFIG_GENERIC_ATOMIC64
  31. #define ATOMIC64_INIT(i) { (i) }
  32. static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
  33. {
  34. return READ_ONCE(v->counter);
  35. }
  36. static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
  37. {
  38. WRITE_ONCE(v->counter, i);
  39. }
  40. #endif
  41. /*
  42. * First, the atomic ops that have no ordering constraints and therefor don't
  43. * have the AQ or RL bits set. These don't return anything, so there's only
  44. * one version to worry about.
  45. */
  46. #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
  47. static __always_inline \
  48. void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
  49. { \
  50. __asm__ __volatile__ ( \
  51. " amo" #asm_op "." #asm_type " zero, %1, %0" \
  52. : "+A" (v->counter) \
  53. : "r" (I) \
  54. : "memory"); \
  55. } \
  56. #ifdef CONFIG_GENERIC_ATOMIC64
  57. #define ATOMIC_OPS(op, asm_op, I) \
  58. ATOMIC_OP (op, asm_op, I, w, int, )
  59. #else
  60. #define ATOMIC_OPS(op, asm_op, I) \
  61. ATOMIC_OP (op, asm_op, I, w, int, ) \
  62. ATOMIC_OP (op, asm_op, I, d, s64, 64)
  63. #endif
  64. ATOMIC_OPS(add, add, i)
  65. ATOMIC_OPS(sub, add, -i)
  66. ATOMIC_OPS(and, and, i)
  67. ATOMIC_OPS( or, or, i)
  68. ATOMIC_OPS(xor, xor, i)
  69. #undef ATOMIC_OP
  70. #undef ATOMIC_OPS
  71. /*
  72. * Atomic ops that have ordered, relaxed, acquire, and release variants.
  73. * There's two flavors of these: the arithmatic ops have both fetch and return
  74. * versions, while the logical ops only have fetch versions.
  75. */
  76. #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
  77. static __always_inline \
  78. c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
  79. atomic##prefix##_t *v) \
  80. { \
  81. register c_type ret; \
  82. __asm__ __volatile__ ( \
  83. " amo" #asm_op "." #asm_type " %1, %2, %0" \
  84. : "+A" (v->counter), "=r" (ret) \
  85. : "r" (I) \
  86. : "memory"); \
  87. return ret; \
  88. } \
  89. static __always_inline \
  90. c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
  91. { \
  92. register c_type ret; \
  93. __asm__ __volatile__ ( \
  94. " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
  95. : "+A" (v->counter), "=r" (ret) \
  96. : "r" (I) \
  97. : "memory"); \
  98. return ret; \
  99. }
  100. #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
  101. static __always_inline \
  102. c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
  103. atomic##prefix##_t *v) \
  104. { \
  105. return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
  106. } \
  107. static __always_inline \
  108. c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
  109. { \
  110. return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
  111. }
  112. #ifdef CONFIG_GENERIC_ATOMIC64
  113. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  114. ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
  115. ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
  116. #else
  117. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  118. ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
  119. ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
  120. ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
  121. ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
  122. #endif
  123. ATOMIC_OPS(add, add, +, i)
  124. ATOMIC_OPS(sub, add, +, -i)
  125. #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
  126. #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
  127. #define arch_atomic_add_return arch_atomic_add_return
  128. #define arch_atomic_sub_return arch_atomic_sub_return
  129. #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
  130. #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
  131. #define arch_atomic_fetch_add arch_atomic_fetch_add
  132. #define arch_atomic_fetch_sub arch_atomic_fetch_sub
  133. #ifndef CONFIG_GENERIC_ATOMIC64
  134. #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
  135. #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
  136. #define arch_atomic64_add_return arch_atomic64_add_return
  137. #define arch_atomic64_sub_return arch_atomic64_sub_return
  138. #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
  139. #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
  140. #define arch_atomic64_fetch_add arch_atomic64_fetch_add
  141. #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
  142. #endif
  143. #undef ATOMIC_OPS
  144. #ifdef CONFIG_GENERIC_ATOMIC64
  145. #define ATOMIC_OPS(op, asm_op, I) \
  146. ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
  147. #else
  148. #define ATOMIC_OPS(op, asm_op, I) \
  149. ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
  150. ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
  151. #endif
  152. ATOMIC_OPS(and, and, i)
  153. ATOMIC_OPS( or, or, i)
  154. ATOMIC_OPS(xor, xor, i)
  155. #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
  156. #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
  157. #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
  158. #define arch_atomic_fetch_and arch_atomic_fetch_and
  159. #define arch_atomic_fetch_or arch_atomic_fetch_or
  160. #define arch_atomic_fetch_xor arch_atomic_fetch_xor
  161. #ifndef CONFIG_GENERIC_ATOMIC64
  162. #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
  163. #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
  164. #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
  165. #define arch_atomic64_fetch_and arch_atomic64_fetch_and
  166. #define arch_atomic64_fetch_or arch_atomic64_fetch_or
  167. #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
  168. #endif
  169. #undef ATOMIC_OPS
  170. #undef ATOMIC_FETCH_OP
  171. #undef ATOMIC_OP_RETURN
  172. /* This is required to provide a full barrier on success. */
  173. static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
  174. {
  175. int prev, rc;
  176. __asm__ __volatile__ (
  177. "0: lr.w %[p], %[c]\n"
  178. " beq %[p], %[u], 1f\n"
  179. " add %[rc], %[p], %[a]\n"
  180. " sc.w.rl %[rc], %[rc], %[c]\n"
  181. " bnez %[rc], 0b\n"
  182. " fence rw, rw\n"
  183. "1:\n"
  184. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  185. : [a]"r" (a), [u]"r" (u)
  186. : "memory");
  187. return prev;
  188. }
  189. #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
  190. #ifndef CONFIG_GENERIC_ATOMIC64
  191. static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
  192. {
  193. s64 prev;
  194. long rc;
  195. __asm__ __volatile__ (
  196. "0: lr.d %[p], %[c]\n"
  197. " beq %[p], %[u], 1f\n"
  198. " add %[rc], %[p], %[a]\n"
  199. " sc.d.rl %[rc], %[rc], %[c]\n"
  200. " bnez %[rc], 0b\n"
  201. " fence rw, rw\n"
  202. "1:\n"
  203. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  204. : [a]"r" (a), [u]"r" (u)
  205. : "memory");
  206. return prev;
  207. }
  208. #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
  209. #endif
  210. /*
  211. * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
  212. * {cmp,}xchg and the operations that return, so they need a full barrier.
  213. */
  214. #define ATOMIC_OP(c_t, prefix, size) \
  215. static __always_inline \
  216. c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
  217. { \
  218. return __xchg_relaxed(&(v->counter), n, size); \
  219. } \
  220. static __always_inline \
  221. c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
  222. { \
  223. return __xchg_acquire(&(v->counter), n, size); \
  224. } \
  225. static __always_inline \
  226. c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
  227. { \
  228. return __xchg_release(&(v->counter), n, size); \
  229. } \
  230. static __always_inline \
  231. c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
  232. { \
  233. return __xchg(&(v->counter), n, size); \
  234. } \
  235. static __always_inline \
  236. c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
  237. c_t o, c_t n) \
  238. { \
  239. return __cmpxchg_relaxed(&(v->counter), o, n, size); \
  240. } \
  241. static __always_inline \
  242. c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
  243. c_t o, c_t n) \
  244. { \
  245. return __cmpxchg_acquire(&(v->counter), o, n, size); \
  246. } \
  247. static __always_inline \
  248. c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
  249. c_t o, c_t n) \
  250. { \
  251. return __cmpxchg_release(&(v->counter), o, n, size); \
  252. } \
  253. static __always_inline \
  254. c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
  255. { \
  256. return __cmpxchg(&(v->counter), o, n, size); \
  257. }
  258. #ifdef CONFIG_GENERIC_ATOMIC64
  259. #define ATOMIC_OPS() \
  260. ATOMIC_OP(int, , 4)
  261. #else
  262. #define ATOMIC_OPS() \
  263. ATOMIC_OP(int, , 4) \
  264. ATOMIC_OP(s64, 64, 8)
  265. #endif
  266. ATOMIC_OPS()
  267. #define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
  268. #define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
  269. #define arch_atomic_xchg_release arch_atomic_xchg_release
  270. #define arch_atomic_xchg arch_atomic_xchg
  271. #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
  272. #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
  273. #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
  274. #define arch_atomic_cmpxchg arch_atomic_cmpxchg
  275. #undef ATOMIC_OPS
  276. #undef ATOMIC_OP
  277. static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
  278. {
  279. int prev, rc;
  280. __asm__ __volatile__ (
  281. "0: lr.w %[p], %[c]\n"
  282. " bltz %[p], 1f\n"
  283. " addi %[rc], %[p], 1\n"
  284. " sc.w.rl %[rc], %[rc], %[c]\n"
  285. " bnez %[rc], 0b\n"
  286. " fence rw, rw\n"
  287. "1:\n"
  288. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  289. :
  290. : "memory");
  291. return !(prev < 0);
  292. }
  293. #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
  294. static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
  295. {
  296. int prev, rc;
  297. __asm__ __volatile__ (
  298. "0: lr.w %[p], %[c]\n"
  299. " bgtz %[p], 1f\n"
  300. " addi %[rc], %[p], -1\n"
  301. " sc.w.rl %[rc], %[rc], %[c]\n"
  302. " bnez %[rc], 0b\n"
  303. " fence rw, rw\n"
  304. "1:\n"
  305. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  306. :
  307. : "memory");
  308. return !(prev > 0);
  309. }
  310. #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
  311. static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
  312. {
  313. int prev, rc;
  314. __asm__ __volatile__ (
  315. "0: lr.w %[p], %[c]\n"
  316. " addi %[rc], %[p], -1\n"
  317. " bltz %[rc], 1f\n"
  318. " sc.w.rl %[rc], %[rc], %[c]\n"
  319. " bnez %[rc], 0b\n"
  320. " fence rw, rw\n"
  321. "1:\n"
  322. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  323. :
  324. : "memory");
  325. return prev - 1;
  326. }
  327. #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
  328. #ifndef CONFIG_GENERIC_ATOMIC64
  329. static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
  330. {
  331. s64 prev;
  332. long rc;
  333. __asm__ __volatile__ (
  334. "0: lr.d %[p], %[c]\n"
  335. " bltz %[p], 1f\n"
  336. " addi %[rc], %[p], 1\n"
  337. " sc.d.rl %[rc], %[rc], %[c]\n"
  338. " bnez %[rc], 0b\n"
  339. " fence rw, rw\n"
  340. "1:\n"
  341. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  342. :
  343. : "memory");
  344. return !(prev < 0);
  345. }
  346. #define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
  347. static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
  348. {
  349. s64 prev;
  350. long rc;
  351. __asm__ __volatile__ (
  352. "0: lr.d %[p], %[c]\n"
  353. " bgtz %[p], 1f\n"
  354. " addi %[rc], %[p], -1\n"
  355. " sc.d.rl %[rc], %[rc], %[c]\n"
  356. " bnez %[rc], 0b\n"
  357. " fence rw, rw\n"
  358. "1:\n"
  359. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  360. :
  361. : "memory");
  362. return !(prev > 0);
  363. }
  364. #define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
  365. static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
  366. {
  367. s64 prev;
  368. long rc;
  369. __asm__ __volatile__ (
  370. "0: lr.d %[p], %[c]\n"
  371. " addi %[rc], %[p], -1\n"
  372. " bltz %[rc], 1f\n"
  373. " sc.d.rl %[rc], %[rc], %[c]\n"
  374. " bnez %[rc], 0b\n"
  375. " fence rw, rw\n"
  376. "1:\n"
  377. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  378. :
  379. : "memory");
  380. return prev - 1;
  381. }
  382. #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
  383. #endif
  384. #endif /* _ASM_RISCV_ATOMIC_H */