atomic.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Atomic operations for the Hexagon architecture
  4. *
  5. * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  6. */
  7. #ifndef _ASM_ATOMIC_H
  8. #define _ASM_ATOMIC_H
  9. #include <linux/types.h>
  10. #include <asm/cmpxchg.h>
  11. #include <asm/barrier.h>
  12. /* Normal writes in our arch don't clear lock reservations */
  13. static inline void arch_atomic_set(atomic_t *v, int new)
  14. {
  15. asm volatile(
  16. "1: r6 = memw_locked(%0);\n"
  17. " memw_locked(%0,p0) = %1;\n"
  18. " if (!P0) jump 1b;\n"
  19. :
  20. : "r" (&v->counter), "r" (new)
  21. : "memory", "p0", "r6"
  22. );
  23. }
  24. #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
  25. /**
  26. * arch_atomic_read - reads a word, atomically
  27. * @v: pointer to atomic value
  28. *
  29. * Assumes all word reads on our architecture are atomic.
  30. */
  31. #define arch_atomic_read(v) READ_ONCE((v)->counter)
  32. /**
  33. * arch_atomic_xchg - atomic
  34. * @v: pointer to memory to change
  35. * @new: new value (technically passed in a register -- see xchg)
  36. */
  37. #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
  38. /**
  39. * arch_atomic_cmpxchg - atomic compare-and-exchange values
  40. * @v: pointer to value to change
  41. * @old: desired old value to match
  42. * @new: new value to put in
  43. *
  44. * Parameters are then pointer, value-in-register, value-in-register,
  45. * and the output is the old value.
  46. *
  47. * Apparently this is complicated for archs that don't support
  48. * the memw_locked like we do (or it's broken or whatever).
  49. *
  50. * Kind of the lynchpin of the rest of the generically defined routines.
  51. * Remember V2 had that bug with dotnew predicate set by memw_locked.
  52. *
  53. * "old" is "expected" old val, __oldval is actual old value
  54. */
  55. static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
  56. {
  57. int __oldval;
  58. asm volatile(
  59. "1: %0 = memw_locked(%1);\n"
  60. " { P0 = cmp.eq(%0,%2);\n"
  61. " if (!P0.new) jump:nt 2f; }\n"
  62. " memw_locked(%1,P0) = %3;\n"
  63. " if (!P0) jump 1b;\n"
  64. "2:\n"
  65. : "=&r" (__oldval)
  66. : "r" (&v->counter), "r" (old), "r" (new)
  67. : "memory", "p0"
  68. );
  69. return __oldval;
  70. }
  71. #define ATOMIC_OP(op) \
  72. static inline void arch_atomic_##op(int i, atomic_t *v) \
  73. { \
  74. int output; \
  75. \
  76. __asm__ __volatile__ ( \
  77. "1: %0 = memw_locked(%1);\n" \
  78. " %0 = "#op "(%0,%2);\n" \
  79. " memw_locked(%1,P3)=%0;\n" \
  80. " if (!P3) jump 1b;\n" \
  81. : "=&r" (output) \
  82. : "r" (&v->counter), "r" (i) \
  83. : "memory", "p3" \
  84. ); \
  85. } \
  86. #define ATOMIC_OP_RETURN(op) \
  87. static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
  88. { \
  89. int output; \
  90. \
  91. __asm__ __volatile__ ( \
  92. "1: %0 = memw_locked(%1);\n" \
  93. " %0 = "#op "(%0,%2);\n" \
  94. " memw_locked(%1,P3)=%0;\n" \
  95. " if (!P3) jump 1b;\n" \
  96. : "=&r" (output) \
  97. : "r" (&v->counter), "r" (i) \
  98. : "memory", "p3" \
  99. ); \
  100. return output; \
  101. }
  102. #define ATOMIC_FETCH_OP(op) \
  103. static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
  104. { \
  105. int output, val; \
  106. \
  107. __asm__ __volatile__ ( \
  108. "1: %0 = memw_locked(%2);\n" \
  109. " %1 = "#op "(%0,%3);\n" \
  110. " memw_locked(%2,P3)=%1;\n" \
  111. " if (!P3) jump 1b;\n" \
  112. : "=&r" (output), "=&r" (val) \
  113. : "r" (&v->counter), "r" (i) \
  114. : "memory", "p3" \
  115. ); \
  116. return output; \
  117. }
  118. #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
  119. ATOMIC_OPS(add)
  120. ATOMIC_OPS(sub)
  121. #undef ATOMIC_OPS
  122. #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
  123. ATOMIC_OPS(and)
  124. ATOMIC_OPS(or)
  125. ATOMIC_OPS(xor)
  126. #undef ATOMIC_OPS
  127. #undef ATOMIC_FETCH_OP
  128. #undef ATOMIC_OP_RETURN
  129. #undef ATOMIC_OP
  130. /**
  131. * arch_atomic_fetch_add_unless - add unless the number is a given value
  132. * @v: pointer to value
  133. * @a: amount to add
  134. * @u: unless value is equal to u
  135. *
  136. * Returns old value.
  137. *
  138. */
  139. static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
  140. {
  141. int __oldval;
  142. register int tmp;
  143. asm volatile(
  144. "1: %0 = memw_locked(%2);"
  145. " {"
  146. " p3 = cmp.eq(%0, %4);"
  147. " if (p3.new) jump:nt 2f;"
  148. " %1 = add(%0, %3);"
  149. " }"
  150. " memw_locked(%2, p3) = %1;"
  151. " {"
  152. " if (!p3) jump 1b;"
  153. " }"
  154. "2:"
  155. : "=&r" (__oldval), "=&r" (tmp)
  156. : "r" (v), "r" (a), "r" (u)
  157. : "memory", "p3"
  158. );
  159. return __oldval;
  160. }
  161. #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
  162. #endif