barrier.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2006 by Ralf Baechle ([email protected])
  7. */
  8. #ifndef __ASM_BARRIER_H
  9. #define __ASM_BARRIER_H
  10. #include <asm/addrspace.h>
  11. #include <asm/sync.h>
  12. static inline void __sync(void)
  13. {
  14. asm volatile(__SYNC(full, always) ::: "memory");
  15. }
  16. static inline void rmb(void)
  17. {
  18. asm volatile(__SYNC(rmb, always) ::: "memory");
  19. }
  20. #define rmb rmb
  21. static inline void wmb(void)
  22. {
  23. asm volatile(__SYNC(wmb, always) ::: "memory");
  24. }
  25. #define wmb wmb
  26. #define fast_mb() __sync()
  27. #define __fast_iob() \
  28. __asm__ __volatile__( \
  29. ".set push\n\t" \
  30. ".set noreorder\n\t" \
  31. "lw $0,%0\n\t" \
  32. "nop\n\t" \
  33. ".set pop" \
  34. : /* no output */ \
  35. : "m" (*(int *)CKSEG1) \
  36. : "memory")
  37. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  38. # define fast_iob() do { } while (0)
  39. #else /* ! CONFIG_CPU_CAVIUM_OCTEON */
  40. # ifdef CONFIG_SGI_IP28
  41. # define fast_iob() \
  42. __asm__ __volatile__( \
  43. ".set push\n\t" \
  44. ".set noreorder\n\t" \
  45. "lw $0,%0\n\t" \
  46. "sync\n\t" \
  47. "lw $0,%0\n\t" \
  48. ".set pop" \
  49. : /* no output */ \
  50. : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
  51. : "memory")
  52. # else
  53. # define fast_iob() \
  54. do { \
  55. __sync(); \
  56. __fast_iob(); \
  57. } while (0)
  58. # endif
  59. #endif /* CONFIG_CPU_CAVIUM_OCTEON */
  60. #ifdef CONFIG_CPU_HAS_WB
  61. #include <asm/wbflush.h>
  62. #define mb() wbflush()
  63. #define iob() wbflush()
  64. #else /* !CONFIG_CPU_HAS_WB */
  65. #define mb() fast_mb()
  66. #define iob() fast_iob()
  67. #endif /* !CONFIG_CPU_HAS_WB */
  68. #if defined(CONFIG_WEAK_ORDERING)
  69. # define __smp_mb() __sync()
  70. # define __smp_rmb() rmb()
  71. # define __smp_wmb() wmb()
  72. #else
  73. # define __smp_mb() barrier()
  74. # define __smp_rmb() barrier()
  75. # define __smp_wmb() barrier()
  76. #endif
  77. /*
  78. * When LL/SC does imply order, it must also be a compiler barrier to avoid the
  79. * compiler from reordering where the CPU will not. When it does not imply
  80. * order, the compiler is also free to reorder across the LL/SC loop and
  81. * ordering will be done by smp_llsc_mb() and friends.
  82. */
  83. #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
  84. # define __WEAK_LLSC_MB sync
  85. # define smp_llsc_mb() \
  86. __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
  87. # define __LLSC_CLOBBER
  88. #else
  89. # define __WEAK_LLSC_MB
  90. # define smp_llsc_mb() do { } while (0)
  91. # define __LLSC_CLOBBER "memory"
  92. #endif
  93. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  94. #define smp_mb__before_llsc() smp_wmb()
  95. #define __smp_mb__before_llsc() __smp_wmb()
  96. /* Cause previous writes to become visible on all CPUs as soon as possible */
  97. #define nudge_writes() __asm__ __volatile__(".set push\n\t" \
  98. ".set arch=octeon\n\t" \
  99. "syncw\n\t" \
  100. ".set pop" : : : "memory")
  101. #else
  102. #define smp_mb__before_llsc() smp_llsc_mb()
  103. #define __smp_mb__before_llsc() smp_llsc_mb()
  104. #define nudge_writes() mb()
  105. #endif
  106. /*
  107. * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have
  108. * a completion barrier immediately preceding the LL instruction. Therefore we
  109. * can skip emitting a barrier from __smp_mb__before_atomic().
  110. */
  111. #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
  112. # define __smp_mb__before_atomic()
  113. #else
  114. # define __smp_mb__before_atomic() __smp_mb__before_llsc()
  115. #endif
  116. #define __smp_mb__after_atomic() smp_llsc_mb()
  117. static inline void sync_ginv(void)
  118. {
  119. asm volatile(__SYNC(ginv, always));
  120. }
  121. #include <asm-generic/barrier.h>
  122. #endif /* __ASM_BARRIER_H */