sync_bitops.h 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_SYNC_BITOPS_H__
  3. #define __ASM_SYNC_BITOPS_H__
  4. #include <asm/bitops.h>
  5. /* sync_bitops functions are equivalent to the SMP implementation of the
  6. * original functions, independently from CONFIG_SMP being defined.
  7. *
  8. * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But
  9. * under Xen you might be communicating with a completely external entity
  10. * who might be on another CPU (e.g. two uniprocessor guests communicating
  11. * via event channels and grant tables). So we need a variant of the bit
  12. * ops which are SMP safe even on a UP kernel.
  13. */
  14. /*
  15. * Unordered
  16. */
  17. #define sync_set_bit(nr, p) _set_bit(nr, p)
  18. #define sync_clear_bit(nr, p) _clear_bit(nr, p)
  19. #define sync_change_bit(nr, p) _change_bit(nr, p)
  20. #define sync_test_bit(nr, addr) test_bit(nr, addr)
  21. /*
  22. * Fully ordered
  23. */
  24. int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
  25. #define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
  26. int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
  27. #define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
  28. int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
  29. #define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
  30. #define arch_sync_cmpxchg(ptr, old, new) \
  31. ({ \
  32. __typeof__(*(ptr)) __ret; \
  33. __smp_mb__before_atomic(); \
  34. __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
  35. __smp_mb__after_atomic(); \
  36. __ret; \
  37. })
  38. #endif