cmpxchg.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef ASM_X86_CMPXCHG_H
  3. #define ASM_X86_CMPXCHG_H
  4. #include <linux/compiler.h>
  5. #include <asm/cpufeatures.h>
  6. #include <asm/alternative.h> /* Provides LOCK_PREFIX */
  7. /*
  8. * Non-existent functions to indicate usage errors at link time
  9. * (or compile-time if the compiler implements __compiletime_error().
  10. */
  11. extern void __xchg_wrong_size(void)
  12. __compiletime_error("Bad argument size for xchg");
  13. extern void __cmpxchg_wrong_size(void)
  14. __compiletime_error("Bad argument size for cmpxchg");
  15. extern void __xadd_wrong_size(void)
  16. __compiletime_error("Bad argument size for xadd");
  17. extern void __add_wrong_size(void)
  18. __compiletime_error("Bad argument size for add");
  19. /*
  20. * Constants for operation sizes. On 32-bit, the 64-bit size it set to
  21. * -1 because sizeof will never return -1, thereby making those switch
  22. * case statements guaranteed dead code which the compiler will
  23. * eliminate, and allowing the "missing symbol in the default case" to
  24. * indicate a usage error.
  25. */
  26. #define __X86_CASE_B 1
  27. #define __X86_CASE_W 2
  28. #define __X86_CASE_L 4
  29. #ifdef CONFIG_64BIT
  30. #define __X86_CASE_Q 8
  31. #else
  32. #define __X86_CASE_Q -1 /* sizeof will never return -1 */
  33. #endif
  34. /*
  35. * An exchange-type operation, which takes a value and a pointer, and
  36. * returns the old value.
  37. */
  38. #define __xchg_op(ptr, arg, op, lock) \
  39. ({ \
  40. __typeof__ (*(ptr)) __ret = (arg); \
  41. switch (sizeof(*(ptr))) { \
  42. case __X86_CASE_B: \
  43. asm volatile (lock #op "b %b0, %1\n" \
  44. : "+q" (__ret), "+m" (*(ptr)) \
  45. : : "memory", "cc"); \
  46. break; \
  47. case __X86_CASE_W: \
  48. asm volatile (lock #op "w %w0, %1\n" \
  49. : "+r" (__ret), "+m" (*(ptr)) \
  50. : : "memory", "cc"); \
  51. break; \
  52. case __X86_CASE_L: \
  53. asm volatile (lock #op "l %0, %1\n" \
  54. : "+r" (__ret), "+m" (*(ptr)) \
  55. : : "memory", "cc"); \
  56. break; \
  57. case __X86_CASE_Q: \
  58. asm volatile (lock #op "q %q0, %1\n" \
  59. : "+r" (__ret), "+m" (*(ptr)) \
  60. : : "memory", "cc"); \
  61. break; \
  62. default: \
  63. __ ## op ## _wrong_size(); \
  64. } \
  65. __ret; \
  66. })
  67. /*
  68. * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
  69. * Since this is generally used to protect other memory information, we
  70. * use "asm volatile" and "memory" clobbers to prevent gcc from moving
  71. * information around.
  72. */
  73. #define arch_xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
  74. /*
  75. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  76. * store NEW in MEM. Return the initial value in MEM. Success is
  77. * indicated by comparing RETURN with OLD.
  78. */
  79. #define __raw_cmpxchg(ptr, old, new, size, lock) \
  80. ({ \
  81. __typeof__(*(ptr)) __ret; \
  82. __typeof__(*(ptr)) __old = (old); \
  83. __typeof__(*(ptr)) __new = (new); \
  84. switch (size) { \
  85. case __X86_CASE_B: \
  86. { \
  87. volatile u8 *__ptr = (volatile u8 *)(ptr); \
  88. asm volatile(lock "cmpxchgb %2,%1" \
  89. : "=a" (__ret), "+m" (*__ptr) \
  90. : "q" (__new), "0" (__old) \
  91. : "memory"); \
  92. break; \
  93. } \
  94. case __X86_CASE_W: \
  95. { \
  96. volatile u16 *__ptr = (volatile u16 *)(ptr); \
  97. asm volatile(lock "cmpxchgw %2,%1" \
  98. : "=a" (__ret), "+m" (*__ptr) \
  99. : "r" (__new), "0" (__old) \
  100. : "memory"); \
  101. break; \
  102. } \
  103. case __X86_CASE_L: \
  104. { \
  105. volatile u32 *__ptr = (volatile u32 *)(ptr); \
  106. asm volatile(lock "cmpxchgl %2,%1" \
  107. : "=a" (__ret), "+m" (*__ptr) \
  108. : "r" (__new), "0" (__old) \
  109. : "memory"); \
  110. break; \
  111. } \
  112. case __X86_CASE_Q: \
  113. { \
  114. volatile u64 *__ptr = (volatile u64 *)(ptr); \
  115. asm volatile(lock "cmpxchgq %2,%1" \
  116. : "=a" (__ret), "+m" (*__ptr) \
  117. : "r" (__new), "0" (__old) \
  118. : "memory"); \
  119. break; \
  120. } \
  121. default: \
  122. __cmpxchg_wrong_size(); \
  123. } \
  124. __ret; \
  125. })
  126. #define __cmpxchg(ptr, old, new, size) \
  127. __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
  128. #define __sync_cmpxchg(ptr, old, new, size) \
  129. __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
  130. #define __cmpxchg_local(ptr, old, new, size) \
  131. __raw_cmpxchg((ptr), (old), (new), (size), "")
  132. #ifdef CONFIG_X86_32
  133. # include <asm/cmpxchg_32.h>
  134. #else
  135. # include <asm/cmpxchg_64.h>
  136. #endif
  137. #define arch_cmpxchg(ptr, old, new) \
  138. __cmpxchg(ptr, old, new, sizeof(*(ptr)))
  139. #define arch_sync_cmpxchg(ptr, old, new) \
  140. __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
  141. #define arch_cmpxchg_local(ptr, old, new) \
  142. __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
  143. #define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \
  144. ({ \
  145. bool success; \
  146. __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
  147. __typeof__(*(_ptr)) __old = *_old; \
  148. __typeof__(*(_ptr)) __new = (_new); \
  149. switch (size) { \
  150. case __X86_CASE_B: \
  151. { \
  152. volatile u8 *__ptr = (volatile u8 *)(_ptr); \
  153. asm volatile(lock "cmpxchgb %[new], %[ptr]" \
  154. CC_SET(z) \
  155. : CC_OUT(z) (success), \
  156. [ptr] "+m" (*__ptr), \
  157. [old] "+a" (__old) \
  158. : [new] "q" (__new) \
  159. : "memory"); \
  160. break; \
  161. } \
  162. case __X86_CASE_W: \
  163. { \
  164. volatile u16 *__ptr = (volatile u16 *)(_ptr); \
  165. asm volatile(lock "cmpxchgw %[new], %[ptr]" \
  166. CC_SET(z) \
  167. : CC_OUT(z) (success), \
  168. [ptr] "+m" (*__ptr), \
  169. [old] "+a" (__old) \
  170. : [new] "r" (__new) \
  171. : "memory"); \
  172. break; \
  173. } \
  174. case __X86_CASE_L: \
  175. { \
  176. volatile u32 *__ptr = (volatile u32 *)(_ptr); \
  177. asm volatile(lock "cmpxchgl %[new], %[ptr]" \
  178. CC_SET(z) \
  179. : CC_OUT(z) (success), \
  180. [ptr] "+m" (*__ptr), \
  181. [old] "+a" (__old) \
  182. : [new] "r" (__new) \
  183. : "memory"); \
  184. break; \
  185. } \
  186. case __X86_CASE_Q: \
  187. { \
  188. volatile u64 *__ptr = (volatile u64 *)(_ptr); \
  189. asm volatile(lock "cmpxchgq %[new], %[ptr]" \
  190. CC_SET(z) \
  191. : CC_OUT(z) (success), \
  192. [ptr] "+m" (*__ptr), \
  193. [old] "+a" (__old) \
  194. : [new] "r" (__new) \
  195. : "memory"); \
  196. break; \
  197. } \
  198. default: \
  199. __cmpxchg_wrong_size(); \
  200. } \
  201. if (unlikely(!success)) \
  202. *_old = __old; \
  203. likely(success); \
  204. })
  205. #define __try_cmpxchg(ptr, pold, new, size) \
  206. __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
  207. #define arch_try_cmpxchg(ptr, pold, new) \
  208. __try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))
  209. /*
  210. * xadd() adds "inc" to "*ptr" and atomically returns the previous
  211. * value of "*ptr".
  212. *
  213. * xadd() is locked when multiple CPUs are online
  214. */
  215. #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
  216. #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
  217. #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
  218. ({ \
  219. bool __ret; \
  220. __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
  221. __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
  222. BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
  223. BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
  224. VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
  225. VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
  226. asm volatile(pfx "cmpxchg%c5b %1" \
  227. CC_SET(e) \
  228. : CC_OUT(e) (__ret), \
  229. "+m" (*(p1)), "+m" (*(p2)), \
  230. "+a" (__old1), "+d" (__old2) \
  231. : "i" (2 * sizeof(long)), \
  232. "b" (__new1), "c" (__new2)); \
  233. __ret; \
  234. })
  235. #define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
  236. __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
  237. #define arch_cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
  238. __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
  239. #endif /* ASM_X86_CMPXCHG_H */