atomic.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * arch/arm/include/asm/atomic.h
  4. *
  5. * Copyright (C) 1996 Russell King.
  6. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  7. */
  8. #ifndef __ASM_ARM_ATOMIC_H
  9. #define __ASM_ARM_ATOMIC_H
  10. #include <linux/compiler.h>
  11. #include <linux/prefetch.h>
  12. #include <linux/types.h>
  13. #include <linux/irqflags.h>
  14. #include <asm/barrier.h>
  15. #include <asm/cmpxchg.h>
  16. #ifdef __KERNEL__
  17. /*
  18. * On ARM, ordinary assignment (str instruction) doesn't clear the local
  19. * strex/ldrex monitor on some implementations. The reason we can use it for
  20. * atomic_set() is the clrex or dummy strex done on every exception return.
  21. */
  22. #define arch_atomic_read(v) READ_ONCE((v)->counter)
  23. #define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
  24. #if __LINUX_ARM_ARCH__ >= 6
  25. /*
  26. * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
  27. * store exclusive to ensure that these are atomic. We may loop
  28. * to ensure that the update happens.
  29. */
  30. #define ATOMIC_OP(op, c_op, asm_op) \
  31. static inline void arch_atomic_##op(int i, atomic_t *v) \
  32. { \
  33. unsigned long tmp; \
  34. int result; \
  35. \
  36. prefetchw(&v->counter); \
  37. __asm__ __volatile__("@ atomic_" #op "\n" \
  38. "1: ldrex %0, [%3]\n" \
  39. " " #asm_op " %0, %0, %4\n" \
  40. " strex %1, %0, [%3]\n" \
  41. " teq %1, #0\n" \
  42. " bne 1b" \
  43. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  44. : "r" (&v->counter), "Ir" (i) \
  45. : "cc"); \
  46. } \
  47. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  48. static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
  49. { \
  50. unsigned long tmp; \
  51. int result; \
  52. \
  53. prefetchw(&v->counter); \
  54. \
  55. __asm__ __volatile__("@ atomic_" #op "_return\n" \
  56. "1: ldrex %0, [%3]\n" \
  57. " " #asm_op " %0, %0, %4\n" \
  58. " strex %1, %0, [%3]\n" \
  59. " teq %1, #0\n" \
  60. " bne 1b" \
  61. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  62. : "r" (&v->counter), "Ir" (i) \
  63. : "cc"); \
  64. \
  65. return result; \
  66. }
  67. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  68. static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
  69. { \
  70. unsigned long tmp; \
  71. int result, val; \
  72. \
  73. prefetchw(&v->counter); \
  74. \
  75. __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
  76. "1: ldrex %0, [%4]\n" \
  77. " " #asm_op " %1, %0, %5\n" \
  78. " strex %2, %1, [%4]\n" \
  79. " teq %2, #0\n" \
  80. " bne 1b" \
  81. : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
  82. : "r" (&v->counter), "Ir" (i) \
  83. : "cc"); \
  84. \
  85. return result; \
  86. }
  87. #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
  88. #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
  89. #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
  90. #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
  91. #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
  92. #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
  93. #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
  94. #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
  95. static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
  96. {
  97. int oldval;
  98. unsigned long res;
  99. prefetchw(&ptr->counter);
  100. do {
  101. __asm__ __volatile__("@ atomic_cmpxchg\n"
  102. "ldrex %1, [%3]\n"
  103. "mov %0, #0\n"
  104. "teq %1, %4\n"
  105. "strexeq %0, %5, [%3]\n"
  106. : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
  107. : "r" (&ptr->counter), "Ir" (old), "r" (new)
  108. : "cc");
  109. } while (res);
  110. return oldval;
  111. }
  112. #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
  113. static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
  114. {
  115. int oldval, newval;
  116. unsigned long tmp;
  117. smp_mb();
  118. prefetchw(&v->counter);
  119. __asm__ __volatile__ ("@ atomic_add_unless\n"
  120. "1: ldrex %0, [%4]\n"
  121. " teq %0, %5\n"
  122. " beq 2f\n"
  123. " add %1, %0, %6\n"
  124. " strex %2, %1, [%4]\n"
  125. " teq %2, #0\n"
  126. " bne 1b\n"
  127. "2:"
  128. : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
  129. : "r" (&v->counter), "r" (u), "r" (a)
  130. : "cc");
  131. if (oldval != u)
  132. smp_mb();
  133. return oldval;
  134. }
  135. #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
  136. #else /* ARM_ARCH_6 */
  137. #ifdef CONFIG_SMP
  138. #error SMP not supported on pre-ARMv6 CPUs
  139. #endif
  140. #define ATOMIC_OP(op, c_op, asm_op) \
  141. static inline void arch_atomic_##op(int i, atomic_t *v) \
  142. { \
  143. unsigned long flags; \
  144. \
  145. raw_local_irq_save(flags); \
  146. v->counter c_op i; \
  147. raw_local_irq_restore(flags); \
  148. } \
  149. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  150. static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
  151. { \
  152. unsigned long flags; \
  153. int val; \
  154. \
  155. raw_local_irq_save(flags); \
  156. v->counter c_op i; \
  157. val = v->counter; \
  158. raw_local_irq_restore(flags); \
  159. \
  160. return val; \
  161. }
  162. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  163. static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
  164. { \
  165. unsigned long flags; \
  166. int val; \
  167. \
  168. raw_local_irq_save(flags); \
  169. val = v->counter; \
  170. v->counter c_op i; \
  171. raw_local_irq_restore(flags); \
  172. \
  173. return val; \
  174. }
  175. static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
  176. {
  177. int ret;
  178. unsigned long flags;
  179. raw_local_irq_save(flags);
  180. ret = v->counter;
  181. if (likely(ret == old))
  182. v->counter = new;
  183. raw_local_irq_restore(flags);
  184. return ret;
  185. }
  186. #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
  187. #endif /* __LINUX_ARM_ARCH__ */
  188. #define ATOMIC_OPS(op, c_op, asm_op) \
  189. ATOMIC_OP(op, c_op, asm_op) \
  190. ATOMIC_OP_RETURN(op, c_op, asm_op) \
  191. ATOMIC_FETCH_OP(op, c_op, asm_op)
  192. ATOMIC_OPS(add, +=, add)
  193. ATOMIC_OPS(sub, -=, sub)
  194. #define arch_atomic_andnot arch_atomic_andnot
  195. #undef ATOMIC_OPS
  196. #define ATOMIC_OPS(op, c_op, asm_op) \
  197. ATOMIC_OP(op, c_op, asm_op) \
  198. ATOMIC_FETCH_OP(op, c_op, asm_op)
  199. ATOMIC_OPS(and, &=, and)
  200. ATOMIC_OPS(andnot, &= ~, bic)
  201. ATOMIC_OPS(or, |=, orr)
  202. ATOMIC_OPS(xor, ^=, eor)
  203. #undef ATOMIC_OPS
  204. #undef ATOMIC_FETCH_OP
  205. #undef ATOMIC_OP_RETURN
  206. #undef ATOMIC_OP
  207. #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
  208. #ifndef CONFIG_GENERIC_ATOMIC64
  209. typedef struct {
  210. s64 counter;
  211. } atomic64_t;
  212. #define ATOMIC64_INIT(i) { (i) }
  213. #ifdef CONFIG_ARM_LPAE
  214. static inline s64 arch_atomic64_read(const atomic64_t *v)
  215. {
  216. s64 result;
  217. __asm__ __volatile__("@ atomic64_read\n"
  218. " ldrd %0, %H0, [%1]"
  219. : "=&r" (result)
  220. : "r" (&v->counter), "Qo" (v->counter)
  221. );
  222. return result;
  223. }
  224. static inline void arch_atomic64_set(atomic64_t *v, s64 i)
  225. {
  226. __asm__ __volatile__("@ atomic64_set\n"
  227. " strd %2, %H2, [%1]"
  228. : "=Qo" (v->counter)
  229. : "r" (&v->counter), "r" (i)
  230. );
  231. }
  232. #else
  233. static inline s64 arch_atomic64_read(const atomic64_t *v)
  234. {
  235. s64 result;
  236. __asm__ __volatile__("@ atomic64_read\n"
  237. " ldrexd %0, %H0, [%1]"
  238. : "=&r" (result)
  239. : "r" (&v->counter), "Qo" (v->counter)
  240. );
  241. return result;
  242. }
  243. static inline void arch_atomic64_set(atomic64_t *v, s64 i)
  244. {
  245. s64 tmp;
  246. prefetchw(&v->counter);
  247. __asm__ __volatile__("@ atomic64_set\n"
  248. "1: ldrexd %0, %H0, [%2]\n"
  249. " strexd %0, %3, %H3, [%2]\n"
  250. " teq %0, #0\n"
  251. " bne 1b"
  252. : "=&r" (tmp), "=Qo" (v->counter)
  253. : "r" (&v->counter), "r" (i)
  254. : "cc");
  255. }
  256. #endif
  257. #define ATOMIC64_OP(op, op1, op2) \
  258. static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
  259. { \
  260. s64 result; \
  261. unsigned long tmp; \
  262. \
  263. prefetchw(&v->counter); \
  264. __asm__ __volatile__("@ atomic64_" #op "\n" \
  265. "1: ldrexd %0, %H0, [%3]\n" \
  266. " " #op1 " %Q0, %Q0, %Q4\n" \
  267. " " #op2 " %R0, %R0, %R4\n" \
  268. " strexd %1, %0, %H0, [%3]\n" \
  269. " teq %1, #0\n" \
  270. " bne 1b" \
  271. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  272. : "r" (&v->counter), "r" (i) \
  273. : "cc"); \
  274. } \
  275. #define ATOMIC64_OP_RETURN(op, op1, op2) \
  276. static inline s64 \
  277. arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
  278. { \
  279. s64 result; \
  280. unsigned long tmp; \
  281. \
  282. prefetchw(&v->counter); \
  283. \
  284. __asm__ __volatile__("@ atomic64_" #op "_return\n" \
  285. "1: ldrexd %0, %H0, [%3]\n" \
  286. " " #op1 " %Q0, %Q0, %Q4\n" \
  287. " " #op2 " %R0, %R0, %R4\n" \
  288. " strexd %1, %0, %H0, [%3]\n" \
  289. " teq %1, #0\n" \
  290. " bne 1b" \
  291. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  292. : "r" (&v->counter), "r" (i) \
  293. : "cc"); \
  294. \
  295. return result; \
  296. }
  297. #define ATOMIC64_FETCH_OP(op, op1, op2) \
  298. static inline s64 \
  299. arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
  300. { \
  301. s64 result, val; \
  302. unsigned long tmp; \
  303. \
  304. prefetchw(&v->counter); \
  305. \
  306. __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
  307. "1: ldrexd %0, %H0, [%4]\n" \
  308. " " #op1 " %Q1, %Q0, %Q5\n" \
  309. " " #op2 " %R1, %R0, %R5\n" \
  310. " strexd %2, %1, %H1, [%4]\n" \
  311. " teq %2, #0\n" \
  312. " bne 1b" \
  313. : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
  314. : "r" (&v->counter), "r" (i) \
  315. : "cc"); \
  316. \
  317. return result; \
  318. }
  319. #define ATOMIC64_OPS(op, op1, op2) \
  320. ATOMIC64_OP(op, op1, op2) \
  321. ATOMIC64_OP_RETURN(op, op1, op2) \
  322. ATOMIC64_FETCH_OP(op, op1, op2)
  323. ATOMIC64_OPS(add, adds, adc)
  324. ATOMIC64_OPS(sub, subs, sbc)
  325. #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
  326. #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
  327. #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
  328. #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
  329. #undef ATOMIC64_OPS
  330. #define ATOMIC64_OPS(op, op1, op2) \
  331. ATOMIC64_OP(op, op1, op2) \
  332. ATOMIC64_FETCH_OP(op, op1, op2)
  333. #define arch_atomic64_andnot arch_atomic64_andnot
  334. ATOMIC64_OPS(and, and, and)
  335. ATOMIC64_OPS(andnot, bic, bic)
  336. ATOMIC64_OPS(or, orr, orr)
  337. ATOMIC64_OPS(xor, eor, eor)
  338. #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
  339. #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
  340. #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
  341. #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
  342. #undef ATOMIC64_OPS
  343. #undef ATOMIC64_FETCH_OP
  344. #undef ATOMIC64_OP_RETURN
  345. #undef ATOMIC64_OP
  346. static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
  347. {
  348. s64 oldval;
  349. unsigned long res;
  350. prefetchw(&ptr->counter);
  351. do {
  352. __asm__ __volatile__("@ atomic64_cmpxchg\n"
  353. "ldrexd %1, %H1, [%3]\n"
  354. "mov %0, #0\n"
  355. "teq %1, %4\n"
  356. "teqeq %H1, %H4\n"
  357. "strexdeq %0, %5, %H5, [%3]"
  358. : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
  359. : "r" (&ptr->counter), "r" (old), "r" (new)
  360. : "cc");
  361. } while (res);
  362. return oldval;
  363. }
  364. #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
  365. static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
  366. {
  367. s64 result;
  368. unsigned long tmp;
  369. prefetchw(&ptr->counter);
  370. __asm__ __volatile__("@ atomic64_xchg\n"
  371. "1: ldrexd %0, %H0, [%3]\n"
  372. " strexd %1, %4, %H4, [%3]\n"
  373. " teq %1, #0\n"
  374. " bne 1b"
  375. : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
  376. : "r" (&ptr->counter), "r" (new)
  377. : "cc");
  378. return result;
  379. }
  380. #define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
  381. static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
  382. {
  383. s64 result;
  384. unsigned long tmp;
  385. smp_mb();
  386. prefetchw(&v->counter);
  387. __asm__ __volatile__("@ atomic64_dec_if_positive\n"
  388. "1: ldrexd %0, %H0, [%3]\n"
  389. " subs %Q0, %Q0, #1\n"
  390. " sbc %R0, %R0, #0\n"
  391. " teq %R0, #0\n"
  392. " bmi 2f\n"
  393. " strexd %1, %0, %H0, [%3]\n"
  394. " teq %1, #0\n"
  395. " bne 1b\n"
  396. "2:"
  397. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
  398. : "r" (&v->counter)
  399. : "cc");
  400. smp_mb();
  401. return result;
  402. }
  403. #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
  404. static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
  405. {
  406. s64 oldval, newval;
  407. unsigned long tmp;
  408. smp_mb();
  409. prefetchw(&v->counter);
  410. __asm__ __volatile__("@ atomic64_add_unless\n"
  411. "1: ldrexd %0, %H0, [%4]\n"
  412. " teq %0, %5\n"
  413. " teqeq %H0, %H5\n"
  414. " beq 2f\n"
  415. " adds %Q1, %Q0, %Q6\n"
  416. " adc %R1, %R0, %R6\n"
  417. " strexd %2, %1, %H1, [%4]\n"
  418. " teq %2, #0\n"
  419. " bne 1b\n"
  420. "2:"
  421. : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
  422. : "r" (&v->counter), "r" (u), "r" (a)
  423. : "cc");
  424. if (oldval != u)
  425. smp_mb();
  426. return oldval;
  427. }
  428. #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
  429. #endif /* !CONFIG_GENERIC_ATOMIC64 */
  430. #endif
  431. #endif