qspinlock.h 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_POWERPC_QSPINLOCK_H
  3. #define _ASM_POWERPC_QSPINLOCK_H
  4. #include <asm-generic/qspinlock_types.h>
  5. #include <asm/paravirt.h>
  6. #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
  7. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  8. extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  9. extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  10. extern void __pv_queued_spin_unlock(struct qspinlock *lock);
  11. static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
  12. {
  13. if (!is_shared_processor())
  14. native_queued_spin_lock_slowpath(lock, val);
  15. else
  16. __pv_queued_spin_lock_slowpath(lock, val);
  17. }
  18. #define queued_spin_unlock queued_spin_unlock
  19. static inline void queued_spin_unlock(struct qspinlock *lock)
  20. {
  21. if (!is_shared_processor())
  22. smp_store_release(&lock->locked, 0);
  23. else
  24. __pv_queued_spin_unlock(lock);
  25. }
  26. #else
  27. extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  28. #endif
  29. static __always_inline void queued_spin_lock(struct qspinlock *lock)
  30. {
  31. u32 val = 0;
  32. if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
  33. return;
  34. queued_spin_lock_slowpath(lock, val);
  35. }
  36. #define queued_spin_lock queued_spin_lock
  37. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  38. #define SPIN_THRESHOLD (1<<15) /* not tuned */
  39. static __always_inline void pv_wait(u8 *ptr, u8 val)
  40. {
  41. if (*ptr != val)
  42. return;
  43. yield_to_any();
  44. /*
  45. * We could pass in a CPU here if waiting in the queue and yield to
  46. * the previous CPU in the queue.
  47. */
  48. }
  49. static __always_inline void pv_kick(int cpu)
  50. {
  51. prod_cpu(cpu);
  52. }
  53. extern void __pv_init_lock_hash(void);
  54. static inline void pv_spinlocks_init(void)
  55. {
  56. __pv_init_lock_hash();
  57. }
  58. #endif
  59. /*
  60. * Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait,
  61. * which was found to have performance problems if implemented with
  62. * the preferred spin_begin()/spin_end() SMT priority pattern. Use the
  63. * generic version instead.
  64. */
  65. #include <asm-generic/qspinlock.h>
  66. #endif /* _ASM_POWERPC_QSPINLOCK_H */