cmpxchg.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. */
  5. #ifndef __ASM_CMPXCHG_H
  6. #define __ASM_CMPXCHG_H
  7. #include <linux/bits.h>
  8. #include <linux/build_bug.h>
  9. #include <asm/barrier.h>
  10. #define __xchg_asm(amswap_db, m, val) \
  11. ({ \
  12. __typeof(val) __ret; \
  13. \
  14. __asm__ __volatile__ ( \
  15. " "amswap_db" %1, %z2, %0 \n" \
  16. : "+ZB" (*m), "=&r" (__ret) \
  17. : "Jr" (val) \
  18. : "memory"); \
  19. \
  20. __ret; \
  21. })
  22. static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
  23. unsigned int size)
  24. {
  25. unsigned int shift;
  26. u32 old32, mask, temp;
  27. volatile u32 *ptr32;
  28. /* Mask value to the correct size. */
  29. mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
  30. val &= mask;
  31. /*
  32. * Calculate a shift & mask that correspond to the value we wish to
  33. * exchange within the naturally aligned 4 byte integerthat includes
  34. * it.
  35. */
  36. shift = (unsigned long)ptr & 0x3;
  37. shift *= BITS_PER_BYTE;
  38. mask <<= shift;
  39. /*
  40. * Calculate a pointer to the naturally aligned 4 byte integer that
  41. * includes our byte of interest, and load its value.
  42. */
  43. ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
  44. asm volatile (
  45. "1: ll.w %0, %3 \n"
  46. " andn %1, %0, %z4 \n"
  47. " or %1, %1, %z5 \n"
  48. " sc.w %1, %2 \n"
  49. " beqz %1, 1b \n"
  50. : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
  51. : "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
  52. : "memory");
  53. return (old32 & mask) >> shift;
  54. }
  55. static __always_inline unsigned long
  56. __xchg(volatile void *ptr, unsigned long x, int size)
  57. {
  58. switch (size) {
  59. case 1:
  60. case 2:
  61. return __xchg_small(ptr, x, size);
  62. case 4:
  63. return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
  64. case 8:
  65. return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
  66. default:
  67. BUILD_BUG();
  68. }
  69. return 0;
  70. }
  71. #define arch_xchg(ptr, x) \
  72. ({ \
  73. __typeof__(*(ptr)) __res; \
  74. \
  75. __res = (__typeof__(*(ptr))) \
  76. __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
  77. \
  78. __res; \
  79. })
  80. #define __cmpxchg_asm(ld, st, m, old, new) \
  81. ({ \
  82. __typeof(old) __ret; \
  83. \
  84. __asm__ __volatile__( \
  85. "1: " ld " %0, %2 # __cmpxchg_asm \n" \
  86. " bne %0, %z3, 2f \n" \
  87. " move $t0, %z4 \n" \
  88. " " st " $t0, %1 \n" \
  89. " beqz $t0, 1b \n" \
  90. "2: \n" \
  91. __WEAK_LLSC_MB \
  92. : "=&r" (__ret), "=ZB"(*m) \
  93. : "ZB"(*m), "Jr" (old), "Jr" (new) \
  94. : "t0", "memory"); \
  95. \
  96. __ret; \
  97. })
  98. static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
  99. unsigned int new, unsigned int size)
  100. {
  101. unsigned int shift;
  102. u32 old32, mask, temp;
  103. volatile u32 *ptr32;
  104. /* Mask inputs to the correct size. */
  105. mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
  106. old &= mask;
  107. new &= mask;
  108. /*
  109. * Calculate a shift & mask that correspond to the value we wish to
  110. * compare & exchange within the naturally aligned 4 byte integer
  111. * that includes it.
  112. */
  113. shift = (unsigned long)ptr & 0x3;
  114. shift *= BITS_PER_BYTE;
  115. old <<= shift;
  116. new <<= shift;
  117. mask <<= shift;
  118. /*
  119. * Calculate a pointer to the naturally aligned 4 byte integer that
  120. * includes our byte of interest, and load its value.
  121. */
  122. ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
  123. asm volatile (
  124. "1: ll.w %0, %3 \n"
  125. " and %1, %0, %z4 \n"
  126. " bne %1, %z5, 2f \n"
  127. " andn %1, %0, %z4 \n"
  128. " or %1, %1, %z6 \n"
  129. " sc.w %1, %2 \n"
  130. " beqz %1, 1b \n"
  131. " b 3f \n"
  132. "2: \n"
  133. __WEAK_LLSC_MB
  134. "3: \n"
  135. : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
  136. : "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
  137. : "memory");
  138. return (old32 & mask) >> shift;
  139. }
  140. static __always_inline unsigned long
  141. __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size)
  142. {
  143. switch (size) {
  144. case 1:
  145. case 2:
  146. return __cmpxchg_small(ptr, old, new, size);
  147. case 4:
  148. return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
  149. (u32)old, new);
  150. case 8:
  151. return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr,
  152. (u64)old, new);
  153. default:
  154. BUILD_BUG();
  155. }
  156. return 0;
  157. }
  158. #define arch_cmpxchg_local(ptr, old, new) \
  159. ((__typeof__(*(ptr))) \
  160. __cmpxchg((ptr), \
  161. (unsigned long)(__typeof__(*(ptr)))(old), \
  162. (unsigned long)(__typeof__(*(ptr)))(new), \
  163. sizeof(*(ptr))))
  164. #define arch_cmpxchg(ptr, old, new) \
  165. ({ \
  166. __typeof__(*(ptr)) __res; \
  167. \
  168. __res = arch_cmpxchg_local((ptr), (old), (new)); \
  169. \
  170. __res; \
  171. })
  172. #ifdef CONFIG_64BIT
  173. #define arch_cmpxchg64_local(ptr, o, n) \
  174. ({ \
  175. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  176. arch_cmpxchg_local((ptr), (o), (n)); \
  177. })
  178. #define arch_cmpxchg64(ptr, o, n) \
  179. ({ \
  180. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  181. arch_cmpxchg((ptr), (o), (n)); \
  182. })
  183. #else
  184. #include <asm-generic/cmpxchg-local.h>
  185. #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
  186. #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
  187. #endif
  188. #endif /* __ASM_CMPXCHG_H */