paravirt.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. #ifndef _ASM_POWERPC_PARAVIRT_H
  3. #define _ASM_POWERPC_PARAVIRT_H
  4. #include <linux/jump_label.h>
  5. #include <asm/smp.h>
  6. #ifdef CONFIG_PPC64
  7. #include <asm/paca.h>
  8. #include <asm/lppaca.h>
  9. #include <asm/hvcall.h>
  10. #endif
  11. #ifdef CONFIG_PPC_SPLPAR
  12. #include <linux/smp.h>
  13. #include <asm/kvm_guest.h>
  14. #include <asm/cputhreads.h>
  15. DECLARE_STATIC_KEY_FALSE(shared_processor);
  16. static inline bool is_shared_processor(void)
  17. {
  18. return static_branch_unlikely(&shared_processor);
  19. }
  20. #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
  21. extern struct static_key paravirt_steal_enabled;
  22. extern struct static_key paravirt_steal_rq_enabled;
  23. u64 pseries_paravirt_steal_clock(int cpu);
  24. static inline u64 paravirt_steal_clock(int cpu)
  25. {
  26. return pseries_paravirt_steal_clock(cpu);
  27. }
  28. #endif
  29. /* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
  30. static inline u32 yield_count_of(int cpu)
  31. {
  32. __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
  33. return be32_to_cpu(yield_count);
  34. }
  35. /*
  36. * Spinlock code confers and prods, so don't trace the hcalls because the
  37. * tracing code takes spinlocks which can cause recursion deadlocks.
  38. *
  39. * These calls are made while the lock is not held: the lock slowpath yields if
  40. * it can not acquire the lock, and unlock slow path might prod if a waiter has
  41. * yielded). So this may not be a problem for simple spin locks because the
  42. * tracing does not technically recurse on the lock, but we avoid it anyway.
  43. *
  44. * However the queued spin lock contended path is more strictly ordered: the
  45. * H_CONFER hcall is made after the task has queued itself on the lock, so then
  46. * recursing on that lock will cause the task to then queue up again behind the
  47. * first instance (or worse: queued spinlocks use tricks that assume a context
  48. * never waits on more than one spinlock, so such recursion may cause random
  49. * corruption in the lock code).
  50. */
  51. static inline void yield_to_preempted(int cpu, u32 yield_count)
  52. {
  53. plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
  54. }
  55. static inline void prod_cpu(int cpu)
  56. {
  57. plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
  58. }
  59. static inline void yield_to_any(void)
  60. {
  61. plpar_hcall_norets_notrace(H_CONFER, -1, 0);
  62. }
  63. #else
  64. static inline bool is_shared_processor(void)
  65. {
  66. return false;
  67. }
  68. static inline u32 yield_count_of(int cpu)
  69. {
  70. return 0;
  71. }
  72. extern void ___bad_yield_to_preempted(void);
  73. static inline void yield_to_preempted(int cpu, u32 yield_count)
  74. {
  75. ___bad_yield_to_preempted(); /* This would be a bug */
  76. }
  77. extern void ___bad_yield_to_any(void);
  78. static inline void yield_to_any(void)
  79. {
  80. ___bad_yield_to_any(); /* This would be a bug */
  81. }
  82. extern void ___bad_prod_cpu(void);
  83. static inline void prod_cpu(int cpu)
  84. {
  85. ___bad_prod_cpu(); /* This would be a bug */
  86. }
  87. #endif
  88. #define vcpu_is_preempted vcpu_is_preempted
  89. static inline bool vcpu_is_preempted(int cpu)
  90. {
  91. /*
  92. * The dispatch/yield bit alone is an imperfect indicator of
  93. * whether the hypervisor has dispatched @cpu to run on a physical
  94. * processor. When it is clear, @cpu is definitely not preempted.
  95. * But when it is set, it means only that it *might* be, subject to
  96. * other conditions. So we check other properties of the VM and
  97. * @cpu first, resorting to the yield count last.
  98. */
  99. /*
  100. * Hypervisor preemption isn't possible in dedicated processor
  101. * mode by definition.
  102. */
  103. if (!is_shared_processor())
  104. return false;
  105. #ifdef CONFIG_PPC_SPLPAR
  106. if (!is_kvm_guest()) {
  107. int first_cpu;
  108. /*
  109. * The result of vcpu_is_preempted() is used in a
  110. * speculative way, and is always subject to invalidation
  111. * by events internal and external to Linux. While we can
  112. * be called in preemptable context (in the Linux sense),
  113. * we're not accessing per-cpu resources in a way that can
  114. * race destructively with Linux scheduler preemption and
  115. * migration, and callers can tolerate the potential for
  116. * error introduced by sampling the CPU index without
  117. * pinning the task to it. So it is permissible to use
  118. * raw_smp_processor_id() here to defeat the preempt debug
  119. * warnings that can arise from using smp_processor_id()
  120. * in arbitrary contexts.
  121. */
  122. first_cpu = cpu_first_thread_sibling(raw_smp_processor_id());
  123. /*
  124. * The PowerVM hypervisor dispatches VMs on a whole core
  125. * basis. So we know that a thread sibling of the local CPU
  126. * cannot have been preempted by the hypervisor, even if it
  127. * has called H_CONFER, which will set the yield bit.
  128. */
  129. if (cpu_first_thread_sibling(cpu) == first_cpu)
  130. return false;
  131. }
  132. #endif
  133. if (yield_count_of(cpu) & 1)
  134. return true;
  135. return false;
  136. }
  137. static inline bool pv_is_native_spin_unlock(void)
  138. {
  139. return !is_shared_processor();
  140. }
  141. #endif /* _ASM_POWERPC_PARAVIRT_H */