kvm_onhyperv.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * KVM L1 hypervisor optimizations on Hyper-V.
  4. */
  5. #include <linux/kvm_host.h>
  6. #include <asm/mshyperv.h>
  7. #include "hyperv.h"
  8. #include "kvm_onhyperv.h"
  9. static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
  10. void *data)
  11. {
  12. struct kvm_tlb_range *range = data;
  13. return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
  14. range->pages);
  15. }
  16. static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
  17. struct kvm_tlb_range *range)
  18. {
  19. if (range)
  20. return hyperv_flush_guest_mapping_range(root_tdp,
  21. kvm_fill_hv_flush_list_func, (void *)range);
  22. else
  23. return hyperv_flush_guest_mapping(root_tdp);
  24. }
  25. int hv_remote_flush_tlb_with_range(struct kvm *kvm,
  26. struct kvm_tlb_range *range)
  27. {
  28. struct kvm_arch *kvm_arch = &kvm->arch;
  29. struct kvm_vcpu *vcpu;
  30. int ret = 0, nr_unique_valid_roots;
  31. unsigned long i;
  32. hpa_t root;
  33. spin_lock(&kvm_arch->hv_root_tdp_lock);
  34. if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
  35. nr_unique_valid_roots = 0;
  36. /*
  37. * Flush all valid roots, and see if all vCPUs have converged
  38. * on a common root, in which case future flushes can skip the
  39. * loop and flush the common root.
  40. */
  41. kvm_for_each_vcpu(i, vcpu, kvm) {
  42. root = vcpu->arch.hv_root_tdp;
  43. if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
  44. continue;
  45. /*
  46. * Set the tracked root to the first valid root. Keep
  47. * this root for the entirety of the loop even if more
  48. * roots are encountered as a low effort optimization
  49. * to avoid flushing the same (first) root again.
  50. */
  51. if (++nr_unique_valid_roots == 1)
  52. kvm_arch->hv_root_tdp = root;
  53. if (!ret)
  54. ret = hv_remote_flush_root_tdp(root, range);
  55. /*
  56. * Stop processing roots if a failure occurred and
  57. * multiple valid roots have already been detected.
  58. */
  59. if (ret && nr_unique_valid_roots > 1)
  60. break;
  61. }
  62. /*
  63. * The optimized flush of a single root can't be used if there
  64. * are multiple valid roots (obviously).
  65. */
  66. if (nr_unique_valid_roots > 1)
  67. kvm_arch->hv_root_tdp = INVALID_PAGE;
  68. } else {
  69. ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
  70. }
  71. spin_unlock(&kvm_arch->hv_root_tdp_lock);
  72. return ret;
  73. }
  74. EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range);
  75. int hv_remote_flush_tlb(struct kvm *kvm)
  76. {
  77. return hv_remote_flush_tlb_with_range(kvm, NULL);
  78. }
  79. EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
  80. void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
  81. {
  82. struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
  83. if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
  84. spin_lock(&kvm_arch->hv_root_tdp_lock);
  85. vcpu->arch.hv_root_tdp = root_tdp;
  86. if (root_tdp != kvm_arch->hv_root_tdp)
  87. kvm_arch->hv_root_tdp = INVALID_PAGE;
  88. spin_unlock(&kvm_arch->hv_root_tdp_lock);
  89. }
  90. }
  91. EXPORT_SYMBOL_GPL(hv_track_root_tdp);