barrier.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_BARRIER_H
  3. #define _ASM_X86_BARRIER_H
  4. #include <asm/alternative.h>
  5. #include <asm/nops.h>
  6. /*
  7. * Force strict CPU ordering.
  8. * And yes, this might be required on UP too when we're talking
  9. * to devices.
  10. */
  11. #ifdef CONFIG_X86_32
  12. #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
  13. X86_FEATURE_XMM2) ::: "memory", "cc")
  14. #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
  15. X86_FEATURE_XMM2) ::: "memory", "cc")
  16. #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
  17. X86_FEATURE_XMM2) ::: "memory", "cc")
  18. #else
  19. #define __mb() asm volatile("mfence":::"memory")
  20. #define __rmb() asm volatile("lfence":::"memory")
  21. #define __wmb() asm volatile("sfence" ::: "memory")
  22. #endif
  23. /**
  24. * array_index_mask_nospec() - generate a mask that is ~0UL when the
  25. * bounds check succeeds and 0 otherwise
  26. * @index: array element index
  27. * @size: number of elements in array
  28. *
  29. * Returns:
  30. * 0 - (index < size)
  31. */
  32. static inline unsigned long array_index_mask_nospec(unsigned long index,
  33. unsigned long size)
  34. {
  35. unsigned long mask;
  36. asm volatile ("cmp %1,%2; sbb %0,%0;"
  37. :"=r" (mask)
  38. :"g"(size),"r" (index)
  39. :"cc");
  40. return mask;
  41. }
  42. /* Override the default implementation from linux/nospec.h. */
  43. #define array_index_mask_nospec array_index_mask_nospec
  44. /* Prevent speculative execution past this barrier. */
  45. #define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
  46. #define __dma_rmb() barrier()
  47. #define __dma_wmb() barrier()
  48. #define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
  49. #define __smp_rmb() dma_rmb()
  50. #define __smp_wmb() barrier()
  51. #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
  52. #define __smp_store_release(p, v) \
  53. do { \
  54. compiletime_assert_atomic_type(*p); \
  55. barrier(); \
  56. WRITE_ONCE(*p, v); \
  57. } while (0)
  58. #define __smp_load_acquire(p) \
  59. ({ \
  60. typeof(*p) ___p1 = READ_ONCE(*p); \
  61. compiletime_assert_atomic_type(*p); \
  62. barrier(); \
  63. ___p1; \
  64. })
  65. /* Atomic operations are already serializing on x86 */
  66. #define __smp_mb__before_atomic() do { } while (0)
  67. #define __smp_mb__after_atomic() do { } while (0)
  68. #include <asm-generic/barrier.h>
  69. /*
  70. * Make previous memory operations globally visible before
  71. * a WRMSR.
  72. *
  73. * MFENCE makes writes visible, but only affects load/store
  74. * instructions. WRMSR is unfortunately not a load/store
  75. * instruction and is unaffected by MFENCE. The LFENCE ensures
  76. * that the WRMSR is not reordered.
  77. *
  78. * Most WRMSRs are full serializing instructions themselves and
  79. * do not require this barrier. This is only required for the
  80. * IA32_TSC_DEADLINE and X2APIC MSRs.
  81. */
  82. static inline void weak_wrmsr_fence(void)
  83. {
  84. asm volatile("mfence; lfence" : : : "memory");
  85. }
  86. #endif /* _ASM_X86_BARRIER_H */