cmpxchg.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
  2. #ifndef _UAPI_ASM_IA64_CMPXCHG_H
  3. #define _UAPI_ASM_IA64_CMPXCHG_H
  4. /*
  5. * Compare/Exchange, forked from asm/intrinsics.h
  6. * which was:
  7. *
  8. * Copyright (C) 2002-2003 Hewlett-Packard Co
  9. * David Mosberger-Tang <[email protected]>
  10. */
  11. #ifndef __ASSEMBLY__
  12. #include <linux/types.h>
  13. /* include compiler specific intrinsics */
  14. #include <asm/ia64regs.h>
  15. #ifdef __INTEL_COMPILER
  16. # include <asm/intel_intrin.h>
  17. #else
  18. # include <asm/gcc_intrin.h>
  19. #endif
  20. /*
  21. * This function doesn't exist, so you'll get a linker error if
  22. * something tries to do an invalid xchg().
  23. */
  24. extern void ia64_xchg_called_with_bad_pointer(void);
  25. #define __xchg(x, ptr, size) \
  26. ({ \
  27. unsigned long __xchg_result; \
  28. \
  29. switch (size) { \
  30. case 1: \
  31. __xchg_result = ia64_xchg1((__u8 __force *)ptr, x); \
  32. break; \
  33. \
  34. case 2: \
  35. __xchg_result = ia64_xchg2((__u16 __force *)ptr, x); \
  36. break; \
  37. \
  38. case 4: \
  39. __xchg_result = ia64_xchg4((__u32 __force *)ptr, x); \
  40. break; \
  41. \
  42. case 8: \
  43. __xchg_result = ia64_xchg8((__u64 __force *)ptr, x); \
  44. break; \
  45. default: \
  46. ia64_xchg_called_with_bad_pointer(); \
  47. } \
  48. (__typeof__ (*(ptr)) __force) __xchg_result; \
  49. })
  50. #ifndef __KERNEL__
  51. #define xchg(ptr, x) \
  52. ({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
  53. #endif
  54. /*
  55. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  56. * store NEW in MEM. Return the initial value in MEM. Success is
  57. * indicated by comparing RETURN with OLD.
  58. */
  59. /*
  60. * This function doesn't exist, so you'll get a linker error
  61. * if something tries to do an invalid cmpxchg().
  62. */
  63. extern long ia64_cmpxchg_called_with_bad_pointer(void);
  64. #define ia64_cmpxchg(sem, ptr, old, new, size) \
  65. ({ \
  66. __u64 _o_, _r_; \
  67. \
  68. switch (size) { \
  69. case 1: \
  70. _o_ = (__u8) (long __force) (old); \
  71. break; \
  72. case 2: \
  73. _o_ = (__u16) (long __force) (old); \
  74. break; \
  75. case 4: \
  76. _o_ = (__u32) (long __force) (old); \
  77. break; \
  78. case 8: \
  79. _o_ = (__u64) (long __force) (old); \
  80. break; \
  81. default: \
  82. break; \
  83. } \
  84. switch (size) { \
  85. case 1: \
  86. _r_ = ia64_cmpxchg1_##sem((__u8 __force *) ptr, new, _o_); \
  87. break; \
  88. \
  89. case 2: \
  90. _r_ = ia64_cmpxchg2_##sem((__u16 __force *) ptr, new, _o_); \
  91. break; \
  92. \
  93. case 4: \
  94. _r_ = ia64_cmpxchg4_##sem((__u32 __force *) ptr, new, _o_); \
  95. break; \
  96. \
  97. case 8: \
  98. _r_ = ia64_cmpxchg8_##sem((__u64 __force *) ptr, new, _o_); \
  99. break; \
  100. \
  101. default: \
  102. _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
  103. break; \
  104. } \
  105. (__typeof__(old) __force) _r_; \
  106. })
  107. #define cmpxchg_acq(ptr, o, n) \
  108. ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
  109. #define cmpxchg_rel(ptr, o, n) \
  110. ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
  111. /*
  112. * Worse still - early processor implementations actually just ignored
  113. * the acquire/release and did a full fence all the time. Unfortunately
  114. * this meant a lot of badly written code that used .acq when they really
  115. * wanted .rel became legacy out in the wild - so when we made a cpu
  116. * that strictly did the .acq or .rel ... all that code started breaking - so
  117. * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
  118. */
  119. #ifndef __KERNEL__
  120. /* for compatibility with other platforms: */
  121. #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
  122. #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
  123. #define cmpxchg_local cmpxchg
  124. #define cmpxchg64_local cmpxchg64
  125. #endif
  126. #ifdef CONFIG_IA64_DEBUG_CMPXCHG
  127. # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
  128. # define CMPXCHG_BUGCHECK(v) \
  129. do { \
  130. if (_cmpxchg_bugcheck_count-- <= 0) { \
  131. void *ip; \
  132. extern int _printk(const char *fmt, ...); \
  133. ip = (void *) ia64_getreg(_IA64_REG_IP); \
  134. _printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\
  135. break; \
  136. } \
  137. } while (0)
  138. #else /* !CONFIG_IA64_DEBUG_CMPXCHG */
  139. # define CMPXCHG_BUGCHECK_DECL
  140. # define CMPXCHG_BUGCHECK(v)
  141. #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
  142. #endif /* !__ASSEMBLY__ */
  143. #endif /* _UAPI_ASM_IA64_CMPXCHG_H */