atomic.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ARCH_M68K_ATOMIC__
  3. #define __ARCH_M68K_ATOMIC__
  4. #include <linux/types.h>
  5. #include <linux/irqflags.h>
  6. #include <asm/cmpxchg.h>
  7. #include <asm/barrier.h>
  8. /*
  9. * Atomic operations that C can't guarantee us. Useful for
  10. * resource counting etc..
  11. */
  12. /*
  13. * We do not have SMP m68k systems, so we don't have to deal with that.
  14. */
  15. #define arch_atomic_read(v) READ_ONCE((v)->counter)
  16. #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
  17. /*
  18. * The ColdFire parts cannot do some immediate to memory operations,
  19. * so for them we do not specify the "i" asm constraint.
  20. */
  21. #ifdef CONFIG_COLDFIRE
  22. #define ASM_DI "d"
  23. #else
  24. #define ASM_DI "di"
  25. #endif
  26. #define ATOMIC_OP(op, c_op, asm_op) \
  27. static inline void arch_atomic_##op(int i, atomic_t *v) \
  28. { \
  29. __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
  30. } \
  31. #ifdef CONFIG_RMW_INSNS
  32. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  33. static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
  34. { \
  35. int t, tmp; \
  36. \
  37. __asm__ __volatile__( \
  38. "1: movel %2,%1\n" \
  39. " " #asm_op "l %3,%1\n" \
  40. " casl %2,%1,%0\n" \
  41. " jne 1b" \
  42. : "+m" (*v), "=&d" (t), "=&d" (tmp) \
  43. : "di" (i), "2" (arch_atomic_read(v))); \
  44. return t; \
  45. }
  46. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  47. static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
  48. { \
  49. int t, tmp; \
  50. \
  51. __asm__ __volatile__( \
  52. "1: movel %2,%1\n" \
  53. " " #asm_op "l %3,%1\n" \
  54. " casl %2,%1,%0\n" \
  55. " jne 1b" \
  56. : "+m" (*v), "=&d" (t), "=&d" (tmp) \
  57. : "di" (i), "2" (arch_atomic_read(v))); \
  58. return tmp; \
  59. }
  60. #else
  61. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  62. static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
  63. { \
  64. unsigned long flags; \
  65. int t; \
  66. \
  67. local_irq_save(flags); \
  68. t = (v->counter c_op i); \
  69. local_irq_restore(flags); \
  70. \
  71. return t; \
  72. }
  73. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  74. static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
  75. { \
  76. unsigned long flags; \
  77. int t; \
  78. \
  79. local_irq_save(flags); \
  80. t = v->counter; \
  81. v->counter c_op i; \
  82. local_irq_restore(flags); \
  83. \
  84. return t; \
  85. }
  86. #endif /* CONFIG_RMW_INSNS */
  87. #define ATOMIC_OPS(op, c_op, asm_op) \
  88. ATOMIC_OP(op, c_op, asm_op) \
  89. ATOMIC_OP_RETURN(op, c_op, asm_op) \
  90. ATOMIC_FETCH_OP(op, c_op, asm_op)
  91. ATOMIC_OPS(add, +=, add)
  92. ATOMIC_OPS(sub, -=, sub)
  93. #undef ATOMIC_OPS
  94. #define ATOMIC_OPS(op, c_op, asm_op) \
  95. ATOMIC_OP(op, c_op, asm_op) \
  96. ATOMIC_FETCH_OP(op, c_op, asm_op)
  97. ATOMIC_OPS(and, &=, and)
  98. ATOMIC_OPS(or, |=, or)
  99. ATOMIC_OPS(xor, ^=, eor)
  100. #undef ATOMIC_OPS
  101. #undef ATOMIC_FETCH_OP
  102. #undef ATOMIC_OP_RETURN
  103. #undef ATOMIC_OP
  104. static inline void arch_atomic_inc(atomic_t *v)
  105. {
  106. __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  107. }
  108. #define arch_atomic_inc arch_atomic_inc
  109. static inline void arch_atomic_dec(atomic_t *v)
  110. {
  111. __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  112. }
  113. #define arch_atomic_dec arch_atomic_dec
  114. static inline int arch_atomic_dec_and_test(atomic_t *v)
  115. {
  116. char c;
  117. __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  118. return c != 0;
  119. }
  120. #define arch_atomic_dec_and_test arch_atomic_dec_and_test
  121. static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
  122. {
  123. char c;
  124. __asm__ __volatile__(
  125. "subql #1,%1; slt %0"
  126. : "=d" (c), "=m" (*v)
  127. : "m" (*v));
  128. return c != 0;
  129. }
  130. static inline int arch_atomic_inc_and_test(atomic_t *v)
  131. {
  132. char c;
  133. __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  134. return c != 0;
  135. }
  136. #define arch_atomic_inc_and_test arch_atomic_inc_and_test
  137. #ifdef CONFIG_RMW_INSNS
  138. #define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
  139. #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
  140. #else /* !CONFIG_RMW_INSNS */
  141. static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
  142. {
  143. unsigned long flags;
  144. int prev;
  145. local_irq_save(flags);
  146. prev = arch_atomic_read(v);
  147. if (prev == old)
  148. arch_atomic_set(v, new);
  149. local_irq_restore(flags);
  150. return prev;
  151. }
  152. static inline int arch_atomic_xchg(atomic_t *v, int new)
  153. {
  154. unsigned long flags;
  155. int prev;
  156. local_irq_save(flags);
  157. prev = arch_atomic_read(v);
  158. arch_atomic_set(v, new);
  159. local_irq_restore(flags);
  160. return prev;
  161. }
  162. #endif /* !CONFIG_RMW_INSNS */
  163. static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
  164. {
  165. char c;
  166. __asm__ __volatile__("subl %2,%1; seq %0"
  167. : "=d" (c), "+m" (*v)
  168. : ASM_DI (i));
  169. return c != 0;
  170. }
  171. #define arch_atomic_sub_and_test arch_atomic_sub_and_test
  172. static inline int arch_atomic_add_negative(int i, atomic_t *v)
  173. {
  174. char c;
  175. __asm__ __volatile__("addl %2,%1; smi %0"
  176. : "=d" (c), "+m" (*v)
  177. : ASM_DI (i));
  178. return c != 0;
  179. }
  180. #define arch_atomic_add_negative arch_atomic_add_negative
  181. #endif /* __ARCH_M68K_ATOMIC __ */