atomic-llsc.h 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. #ifndef _ASM_ARC_ATOMIC_LLSC_H
  3. #define _ASM_ARC_ATOMIC_LLSC_H
  4. #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
  5. #define ATOMIC_OP(op, asm_op) \
  6. static inline void arch_atomic_##op(int i, atomic_t *v) \
  7. { \
  8. unsigned int val; \
  9. \
  10. __asm__ __volatile__( \
  11. "1: llock %[val], [%[ctr]] \n" \
  12. " " #asm_op " %[val], %[val], %[i] \n" \
  13. " scond %[val], [%[ctr]] \n" \
  14. " bnz 1b \n" \
  15. : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
  16. : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
  17. [i] "ir" (i) \
  18. : "cc", "memory"); \
  19. } \
  20. #define ATOMIC_OP_RETURN(op, asm_op) \
  21. static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
  22. { \
  23. unsigned int val; \
  24. \
  25. __asm__ __volatile__( \
  26. "1: llock %[val], [%[ctr]] \n" \
  27. " " #asm_op " %[val], %[val], %[i] \n" \
  28. " scond %[val], [%[ctr]] \n" \
  29. " bnz 1b \n" \
  30. : [val] "=&r" (val) \
  31. : [ctr] "r" (&v->counter), \
  32. [i] "ir" (i) \
  33. : "cc", "memory"); \
  34. \
  35. return val; \
  36. }
  37. #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
  38. #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
  39. #define ATOMIC_FETCH_OP(op, asm_op) \
  40. static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
  41. { \
  42. unsigned int val, orig; \
  43. \
  44. __asm__ __volatile__( \
  45. "1: llock %[orig], [%[ctr]] \n" \
  46. " " #asm_op " %[val], %[orig], %[i] \n" \
  47. " scond %[val], [%[ctr]] \n" \
  48. " bnz 1b \n" \
  49. : [val] "=&r" (val), \
  50. [orig] "=&r" (orig) \
  51. : [ctr] "r" (&v->counter), \
  52. [i] "ir" (i) \
  53. : "cc", "memory"); \
  54. \
  55. return orig; \
  56. }
  57. #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
  58. #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
  59. #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
  60. #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
  61. #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
  62. #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
  63. #define ATOMIC_OPS(op, asm_op) \
  64. ATOMIC_OP(op, asm_op) \
  65. ATOMIC_OP_RETURN(op, asm_op) \
  66. ATOMIC_FETCH_OP(op, asm_op)
  67. ATOMIC_OPS(add, add)
  68. ATOMIC_OPS(sub, sub)
  69. #undef ATOMIC_OPS
  70. #define ATOMIC_OPS(op, asm_op) \
  71. ATOMIC_OP(op, asm_op) \
  72. ATOMIC_FETCH_OP(op, asm_op)
  73. ATOMIC_OPS(and, and)
  74. ATOMIC_OPS(andnot, bic)
  75. ATOMIC_OPS(or, or)
  76. ATOMIC_OPS(xor, xor)
  77. #define arch_atomic_andnot arch_atomic_andnot
  78. #undef ATOMIC_OPS
  79. #undef ATOMIC_FETCH_OP
  80. #undef ATOMIC_OP_RETURN
  81. #undef ATOMIC_OP
  82. #endif