cmpxchg_32.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_CMPXCHG_32_H
  3. #define _ASM_X86_CMPXCHG_32_H
  4. /*
  5. * Note: if you use set64_bit(), __cmpxchg64(), or their variants,
  6. * you need to test for the feature in boot_cpu_data.
  7. */
  8. /*
  9. * CMPXCHG8B only writes to the target if we had the previous
  10. * value in registers, otherwise it acts as a read and gives us the
  11. * "new previous" value. That is why there is a loop. Preloading
  12. * EDX:EAX is a performance optimization: in the common case it means
  13. * we need only one locked operation.
  14. *
  15. * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
  16. * least an FPU save and/or %cr0.ts manipulation.
  17. *
  18. * cmpxchg8b must be used with the lock prefix here to allow the
  19. * instruction to be executed atomically. We need to have the reader
  20. * side to see the coherent 64bit value.
  21. */
  22. static inline void set_64bit(volatile u64 *ptr, u64 value)
  23. {
  24. u32 low = value;
  25. u32 high = value >> 32;
  26. u64 prev = *ptr;
  27. asm volatile("\n1:\t"
  28. LOCK_PREFIX "cmpxchg8b %0\n\t"
  29. "jnz 1b"
  30. : "=m" (*ptr), "+A" (prev)
  31. : "b" (low), "c" (high)
  32. : "memory");
  33. }
  34. #ifdef CONFIG_X86_CMPXCHG64
  35. #define arch_cmpxchg64(ptr, o, n) \
  36. ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
  37. (unsigned long long)(n)))
  38. #define arch_cmpxchg64_local(ptr, o, n) \
  39. ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
  40. (unsigned long long)(n)))
  41. #define arch_try_cmpxchg64(ptr, po, n) \
  42. __try_cmpxchg64((ptr), (unsigned long long *)(po), \
  43. (unsigned long long)(n))
  44. #endif
  45. static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
  46. {
  47. u64 prev;
  48. asm volatile(LOCK_PREFIX "cmpxchg8b %1"
  49. : "=A" (prev),
  50. "+m" (*ptr)
  51. : "b" ((u32)new),
  52. "c" ((u32)(new >> 32)),
  53. "0" (old)
  54. : "memory");
  55. return prev;
  56. }
  57. static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
  58. {
  59. u64 prev;
  60. asm volatile("cmpxchg8b %1"
  61. : "=A" (prev),
  62. "+m" (*ptr)
  63. : "b" ((u32)new),
  64. "c" ((u32)(new >> 32)),
  65. "0" (old)
  66. : "memory");
  67. return prev;
  68. }
  69. static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new)
  70. {
  71. bool success;
  72. u64 old = *pold;
  73. asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]"
  74. CC_SET(z)
  75. : CC_OUT(z) (success),
  76. [ptr] "+m" (*ptr),
  77. "+A" (old)
  78. : "b" ((u32)new),
  79. "c" ((u32)(new >> 32))
  80. : "memory");
  81. if (unlikely(!success))
  82. *pold = old;
  83. return success;
  84. }
  85. #ifndef CONFIG_X86_CMPXCHG64
  86. /*
  87. * Building a kernel capable running on 80386 and 80486. It may be necessary
  88. * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
  89. */
  90. #define arch_cmpxchg64(ptr, o, n) \
  91. ({ \
  92. __typeof__(*(ptr)) __ret; \
  93. __typeof__(*(ptr)) __old = (o); \
  94. __typeof__(*(ptr)) __new = (n); \
  95. alternative_io(LOCK_PREFIX_HERE \
  96. "call cmpxchg8b_emu", \
  97. "lock; cmpxchg8b (%%esi)" , \
  98. X86_FEATURE_CX8, \
  99. "=A" (__ret), \
  100. "S" ((ptr)), "0" (__old), \
  101. "b" ((unsigned int)__new), \
  102. "c" ((unsigned int)(__new>>32)) \
  103. : "memory"); \
  104. __ret; })
  105. #define arch_cmpxchg64_local(ptr, o, n) \
  106. ({ \
  107. __typeof__(*(ptr)) __ret; \
  108. __typeof__(*(ptr)) __old = (o); \
  109. __typeof__(*(ptr)) __new = (n); \
  110. alternative_io("call cmpxchg8b_emu", \
  111. "cmpxchg8b (%%esi)" , \
  112. X86_FEATURE_CX8, \
  113. "=A" (__ret), \
  114. "S" ((ptr)), "0" (__old), \
  115. "b" ((unsigned int)__new), \
  116. "c" ((unsigned int)(__new>>32)) \
  117. : "memory"); \
  118. __ret; })
  119. #endif
  120. #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
  121. #endif /* _ASM_X86_CMPXCHG_32_H */