intrinsics.h 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
  2. /*
  3. * Compiler-dependent intrinsics.
  4. *
  5. * Copyright (C) 2002-2003 Hewlett-Packard Co
  6. * David Mosberger-Tang <[email protected]>
  7. */
  8. #ifndef _UAPI_ASM_IA64_INTRINSICS_H
  9. #define _UAPI_ASM_IA64_INTRINSICS_H
  10. #ifndef __ASSEMBLY__
  11. #include <linux/types.h>
  12. /* include compiler specific intrinsics */
  13. #include <asm/ia64regs.h>
  14. #ifdef __INTEL_COMPILER
  15. # include <asm/intel_intrin.h>
  16. #else
  17. # include <asm/gcc_intrin.h>
  18. #endif
  19. #include <asm/cmpxchg.h>
  20. #define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \
  21. do { \
  22. ia64_set_rr(0x0000000000000000UL, (val0)); \
  23. ia64_set_rr(0x2000000000000000UL, (val1)); \
  24. ia64_set_rr(0x4000000000000000UL, (val2)); \
  25. ia64_set_rr(0x6000000000000000UL, (val3)); \
  26. ia64_set_rr(0x8000000000000000UL, (val4)); \
  27. } while (0)
  28. /*
  29. * Force an unresolved reference if someone tries to use
  30. * ia64_fetch_and_add() with a bad value.
  31. */
  32. extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
  33. extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
  34. #define IA64_FETCHADD(tmp,v,n,sz,sem) \
  35. ({ \
  36. switch (sz) { \
  37. case 4: \
  38. tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
  39. break; \
  40. \
  41. case 8: \
  42. tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
  43. break; \
  44. \
  45. default: \
  46. __bad_size_for_ia64_fetch_and_add(); \
  47. } \
  48. })
  49. #define ia64_fetchadd(i,v,sem) \
  50. ({ \
  51. __u64 _tmp; \
  52. volatile __typeof__(*(v)) *_v = (v); \
  53. /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
  54. if ((i) == -16) \
  55. IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \
  56. else if ((i) == -8) \
  57. IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \
  58. else if ((i) == -4) \
  59. IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \
  60. else if ((i) == -1) \
  61. IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \
  62. else if ((i) == 1) \
  63. IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \
  64. else if ((i) == 4) \
  65. IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \
  66. else if ((i) == 8) \
  67. IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \
  68. else if ((i) == 16) \
  69. IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \
  70. else \
  71. _tmp = __bad_increment_for_ia64_fetch_and_add(); \
  72. (__typeof__(*(v))) (_tmp); /* return old value */ \
  73. })
  74. #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
  75. #endif
  76. #endif /* _UAPI_ASM_IA64_INTRINSICS_H */