atomic.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_ATOMIC_H
  3. #define _ASM_X86_ATOMIC_H
  4. #include <linux/compiler.h>
  5. #include <linux/types.h>
  6. #include <asm/alternative.h>
  7. #include <asm/cmpxchg.h>
  8. #include <asm/rmwcc.h>
  9. #include <asm/barrier.h>
  10. /*
  11. * Atomic operations that C can't guarantee us. Useful for
  12. * resource counting etc..
  13. */
  14. /**
  15. * arch_atomic_read - read atomic variable
  16. * @v: pointer of type atomic_t
  17. *
  18. * Atomically reads the value of @v.
  19. */
  20. static __always_inline int arch_atomic_read(const atomic_t *v)
  21. {
  22. /*
  23. * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
  24. * it's non-inlined function that increases binary size and stack usage.
  25. */
  26. return __READ_ONCE((v)->counter);
  27. }
  28. /**
  29. * arch_atomic_set - set atomic variable
  30. * @v: pointer of type atomic_t
  31. * @i: required value
  32. *
  33. * Atomically sets the value of @v to @i.
  34. */
  35. static __always_inline void arch_atomic_set(atomic_t *v, int i)
  36. {
  37. __WRITE_ONCE(v->counter, i);
  38. }
  39. /**
  40. * arch_atomic_add - add integer to atomic variable
  41. * @i: integer value to add
  42. * @v: pointer of type atomic_t
  43. *
  44. * Atomically adds @i to @v.
  45. */
  46. static __always_inline void arch_atomic_add(int i, atomic_t *v)
  47. {
  48. asm volatile(LOCK_PREFIX "addl %1,%0"
  49. : "+m" (v->counter)
  50. : "ir" (i) : "memory");
  51. }
  52. /**
  53. * arch_atomic_sub - subtract integer from atomic variable
  54. * @i: integer value to subtract
  55. * @v: pointer of type atomic_t
  56. *
  57. * Atomically subtracts @i from @v.
  58. */
  59. static __always_inline void arch_atomic_sub(int i, atomic_t *v)
  60. {
  61. asm volatile(LOCK_PREFIX "subl %1,%0"
  62. : "+m" (v->counter)
  63. : "ir" (i) : "memory");
  64. }
  65. /**
  66. * arch_atomic_sub_and_test - subtract value from variable and test result
  67. * @i: integer value to subtract
  68. * @v: pointer of type atomic_t
  69. *
  70. * Atomically subtracts @i from @v and returns
  71. * true if the result is zero, or false for all
  72. * other cases.
  73. */
  74. static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
  75. {
  76. return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
  77. }
  78. #define arch_atomic_sub_and_test arch_atomic_sub_and_test
  79. /**
  80. * arch_atomic_inc - increment atomic variable
  81. * @v: pointer of type atomic_t
  82. *
  83. * Atomically increments @v by 1.
  84. */
  85. static __always_inline void arch_atomic_inc(atomic_t *v)
  86. {
  87. asm volatile(LOCK_PREFIX "incl %0"
  88. : "+m" (v->counter) :: "memory");
  89. }
  90. #define arch_atomic_inc arch_atomic_inc
  91. /**
  92. * arch_atomic_dec - decrement atomic variable
  93. * @v: pointer of type atomic_t
  94. *
  95. * Atomically decrements @v by 1.
  96. */
  97. static __always_inline void arch_atomic_dec(atomic_t *v)
  98. {
  99. asm volatile(LOCK_PREFIX "decl %0"
  100. : "+m" (v->counter) :: "memory");
  101. }
  102. #define arch_atomic_dec arch_atomic_dec
  103. /**
  104. * arch_atomic_dec_and_test - decrement and test
  105. * @v: pointer of type atomic_t
  106. *
  107. * Atomically decrements @v by 1 and
  108. * returns true if the result is 0, or false for all other
  109. * cases.
  110. */
  111. static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
  112. {
  113. return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
  114. }
  115. #define arch_atomic_dec_and_test arch_atomic_dec_and_test
  116. /**
  117. * arch_atomic_inc_and_test - increment and test
  118. * @v: pointer of type atomic_t
  119. *
  120. * Atomically increments @v by 1
  121. * and returns true if the result is zero, or false for all
  122. * other cases.
  123. */
  124. static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
  125. {
  126. return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
  127. }
  128. #define arch_atomic_inc_and_test arch_atomic_inc_and_test
  129. /**
  130. * arch_atomic_add_negative - add and test if negative
  131. * @i: integer value to add
  132. * @v: pointer of type atomic_t
  133. *
  134. * Atomically adds @i to @v and returns true
  135. * if the result is negative, or false when
  136. * result is greater than or equal to zero.
  137. */
  138. static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
  139. {
  140. return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
  141. }
  142. #define arch_atomic_add_negative arch_atomic_add_negative
  143. /**
  144. * arch_atomic_add_return - add integer and return
  145. * @i: integer value to add
  146. * @v: pointer of type atomic_t
  147. *
  148. * Atomically adds @i to @v and returns @i + @v
  149. */
  150. static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
  151. {
  152. return i + xadd(&v->counter, i);
  153. }
  154. #define arch_atomic_add_return arch_atomic_add_return
  155. /**
  156. * arch_atomic_sub_return - subtract integer and return
  157. * @v: pointer of type atomic_t
  158. * @i: integer value to subtract
  159. *
  160. * Atomically subtracts @i from @v and returns @v - @i
  161. */
  162. static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
  163. {
  164. return arch_atomic_add_return(-i, v);
  165. }
  166. #define arch_atomic_sub_return arch_atomic_sub_return
  167. static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
  168. {
  169. return xadd(&v->counter, i);
  170. }
  171. #define arch_atomic_fetch_add arch_atomic_fetch_add
  172. static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
  173. {
  174. return xadd(&v->counter, -i);
  175. }
  176. #define arch_atomic_fetch_sub arch_atomic_fetch_sub
  177. static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
  178. {
  179. return arch_cmpxchg(&v->counter, old, new);
  180. }
  181. #define arch_atomic_cmpxchg arch_atomic_cmpxchg
  182. static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
  183. {
  184. return arch_try_cmpxchg(&v->counter, old, new);
  185. }
  186. #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
  187. static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
  188. {
  189. return arch_xchg(&v->counter, new);
  190. }
  191. #define arch_atomic_xchg arch_atomic_xchg
  192. static __always_inline void arch_atomic_and(int i, atomic_t *v)
  193. {
  194. asm volatile(LOCK_PREFIX "andl %1,%0"
  195. : "+m" (v->counter)
  196. : "ir" (i)
  197. : "memory");
  198. }
  199. static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
  200. {
  201. int val = arch_atomic_read(v);
  202. do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
  203. return val;
  204. }
  205. #define arch_atomic_fetch_and arch_atomic_fetch_and
  206. static __always_inline void arch_atomic_or(int i, atomic_t *v)
  207. {
  208. asm volatile(LOCK_PREFIX "orl %1,%0"
  209. : "+m" (v->counter)
  210. : "ir" (i)
  211. : "memory");
  212. }
  213. static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
  214. {
  215. int val = arch_atomic_read(v);
  216. do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
  217. return val;
  218. }
  219. #define arch_atomic_fetch_or arch_atomic_fetch_or
  220. static __always_inline void arch_atomic_xor(int i, atomic_t *v)
  221. {
  222. asm volatile(LOCK_PREFIX "xorl %1,%0"
  223. : "+m" (v->counter)
  224. : "ir" (i)
  225. : "memory");
  226. }
  227. static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
  228. {
  229. int val = arch_atomic_read(v);
  230. do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
  231. return val;
  232. }
  233. #define arch_atomic_fetch_xor arch_atomic_fetch_xor
  234. #ifdef CONFIG_X86_32
  235. # include <asm/atomic64_32.h>
  236. #else
  237. # include <asm/atomic64_64.h>
  238. #endif
  239. #endif /* _ASM_X86_ATOMIC_H */