atomic.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_IA64_ATOMIC_H
  3. #define _ASM_IA64_ATOMIC_H
  4. /*
  5. * Atomic operations that C can't guarantee us. Useful for
  6. * resource counting etc..
  7. *
  8. * NOTE: don't mess with the types below! The "unsigned long" and
  9. * "int" types were carefully placed so as to ensure proper operation
  10. * of the macros.
  11. *
  12. * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
  13. * David Mosberger-Tang <[email protected]>
  14. */
  15. #include <linux/types.h>
  16. #include <asm/intrinsics.h>
  17. #include <asm/barrier.h>
  18. #define ATOMIC64_INIT(i) { (i) }
  19. #define arch_atomic_read(v) READ_ONCE((v)->counter)
  20. #define arch_atomic64_read(v) READ_ONCE((v)->counter)
  21. #define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
  22. #define arch_atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
  23. #define ATOMIC_OP(op, c_op) \
  24. static __inline__ int \
  25. ia64_atomic_##op (int i, atomic_t *v) \
  26. { \
  27. __s32 old, new; \
  28. CMPXCHG_BUGCHECK_DECL \
  29. \
  30. do { \
  31. CMPXCHG_BUGCHECK(v); \
  32. old = arch_atomic_read(v); \
  33. new = old c_op i; \
  34. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
  35. return new; \
  36. }
  37. #define ATOMIC_FETCH_OP(op, c_op) \
  38. static __inline__ int \
  39. ia64_atomic_fetch_##op (int i, atomic_t *v) \
  40. { \
  41. __s32 old, new; \
  42. CMPXCHG_BUGCHECK_DECL \
  43. \
  44. do { \
  45. CMPXCHG_BUGCHECK(v); \
  46. old = arch_atomic_read(v); \
  47. new = old c_op i; \
  48. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
  49. return old; \
  50. }
  51. #define ATOMIC_OPS(op, c_op) \
  52. ATOMIC_OP(op, c_op) \
  53. ATOMIC_FETCH_OP(op, c_op)
  54. ATOMIC_OPS(add, +)
  55. ATOMIC_OPS(sub, -)
  56. #ifdef __OPTIMIZE__
  57. #define __ia64_atomic_const(i) \
  58. static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
  59. ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
  60. (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
  61. __ia64_atomic_p
  62. #else
  63. #define __ia64_atomic_const(i) 0
  64. #endif
  65. #define arch_atomic_add_return(i,v) \
  66. ({ \
  67. int __ia64_aar_i = (i); \
  68. __ia64_atomic_const(i) \
  69. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  70. : ia64_atomic_add(__ia64_aar_i, v); \
  71. })
  72. #define arch_atomic_sub_return(i,v) \
  73. ({ \
  74. int __ia64_asr_i = (i); \
  75. __ia64_atomic_const(i) \
  76. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  77. : ia64_atomic_sub(__ia64_asr_i, v); \
  78. })
  79. #define arch_atomic_fetch_add(i,v) \
  80. ({ \
  81. int __ia64_aar_i = (i); \
  82. __ia64_atomic_const(i) \
  83. ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
  84. : ia64_atomic_fetch_add(__ia64_aar_i, v); \
  85. })
  86. #define arch_atomic_fetch_sub(i,v) \
  87. ({ \
  88. int __ia64_asr_i = (i); \
  89. __ia64_atomic_const(i) \
  90. ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
  91. : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
  92. })
  93. ATOMIC_FETCH_OP(and, &)
  94. ATOMIC_FETCH_OP(or, |)
  95. ATOMIC_FETCH_OP(xor, ^)
  96. #define arch_atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
  97. #define arch_atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
  98. #define arch_atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
  99. #define arch_atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
  100. #define arch_atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
  101. #define arch_atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
  102. #undef ATOMIC_OPS
  103. #undef ATOMIC_FETCH_OP
  104. #undef ATOMIC_OP
  105. #define ATOMIC64_OP(op, c_op) \
  106. static __inline__ s64 \
  107. ia64_atomic64_##op (s64 i, atomic64_t *v) \
  108. { \
  109. s64 old, new; \
  110. CMPXCHG_BUGCHECK_DECL \
  111. \
  112. do { \
  113. CMPXCHG_BUGCHECK(v); \
  114. old = arch_atomic64_read(v); \
  115. new = old c_op i; \
  116. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
  117. return new; \
  118. }
  119. #define ATOMIC64_FETCH_OP(op, c_op) \
  120. static __inline__ s64 \
  121. ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
  122. { \
  123. s64 old, new; \
  124. CMPXCHG_BUGCHECK_DECL \
  125. \
  126. do { \
  127. CMPXCHG_BUGCHECK(v); \
  128. old = arch_atomic64_read(v); \
  129. new = old c_op i; \
  130. } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
  131. return old; \
  132. }
  133. #define ATOMIC64_OPS(op, c_op) \
  134. ATOMIC64_OP(op, c_op) \
  135. ATOMIC64_FETCH_OP(op, c_op)
  136. ATOMIC64_OPS(add, +)
  137. ATOMIC64_OPS(sub, -)
  138. #define arch_atomic64_add_return(i,v) \
  139. ({ \
  140. s64 __ia64_aar_i = (i); \
  141. __ia64_atomic_const(i) \
  142. ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
  143. : ia64_atomic64_add(__ia64_aar_i, v); \
  144. })
  145. #define arch_atomic64_sub_return(i,v) \
  146. ({ \
  147. s64 __ia64_asr_i = (i); \
  148. __ia64_atomic_const(i) \
  149. ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
  150. : ia64_atomic64_sub(__ia64_asr_i, v); \
  151. })
  152. #define arch_atomic64_fetch_add(i,v) \
  153. ({ \
  154. s64 __ia64_aar_i = (i); \
  155. __ia64_atomic_const(i) \
  156. ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
  157. : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
  158. })
  159. #define arch_atomic64_fetch_sub(i,v) \
  160. ({ \
  161. s64 __ia64_asr_i = (i); \
  162. __ia64_atomic_const(i) \
  163. ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
  164. : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
  165. })
  166. ATOMIC64_FETCH_OP(and, &)
  167. ATOMIC64_FETCH_OP(or, |)
  168. ATOMIC64_FETCH_OP(xor, ^)
  169. #define arch_atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
  170. #define arch_atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
  171. #define arch_atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
  172. #define arch_atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
  173. #define arch_atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
  174. #define arch_atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
  175. #undef ATOMIC64_OPS
  176. #undef ATOMIC64_FETCH_OP
  177. #undef ATOMIC64_OP
  178. #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new))
  179. #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
  180. #define arch_atomic64_cmpxchg(v, old, new) \
  181. (arch_cmpxchg(&((v)->counter), old, new))
  182. #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
  183. #define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v))
  184. #define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v))
  185. #define arch_atomic64_add(i,v) (void)arch_atomic64_add_return((i), (v))
  186. #define arch_atomic64_sub(i,v) (void)arch_atomic64_sub_return((i), (v))
  187. #endif /* _ASM_IA64_ATOMIC_H */