tdp_mmu.h 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. // SPDX-License-Identifier: GPL-2.0
  2. #ifndef __KVM_X86_MMU_TDP_MMU_H
  3. #define __KVM_X86_MMU_TDP_MMU_H
  4. #include <linux/kvm_host.h>
  5. hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
  6. __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
  7. {
  8. return refcount_inc_not_zero(&root->tdp_mmu_root_count);
  9. }
  10. void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
  11. bool shared);
  12. bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
  13. bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
  14. void kvm_tdp_mmu_zap_all(struct kvm *kvm);
  15. void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
  16. void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
  17. int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
  18. bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
  19. bool flush);
  20. bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
  21. bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
  22. bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
  23. bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
  24. const struct kvm_memory_slot *slot, int min_level);
  25. bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
  26. const struct kvm_memory_slot *slot);
  27. void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
  28. struct kvm_memory_slot *slot,
  29. gfn_t gfn, unsigned long mask,
  30. bool wrprot);
  31. void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
  32. const struct kvm_memory_slot *slot);
  33. bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
  34. struct kvm_memory_slot *slot, gfn_t gfn,
  35. int min_level);
  36. void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
  37. const struct kvm_memory_slot *slot,
  38. gfn_t start, gfn_t end,
  39. int target_level, bool shared);
  40. static inline void kvm_tdp_mmu_walk_lockless_begin(void)
  41. {
  42. rcu_read_lock();
  43. }
  44. static inline void kvm_tdp_mmu_walk_lockless_end(void)
  45. {
  46. rcu_read_unlock();
  47. }
  48. int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
  49. int *root_level);
  50. u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
  51. u64 *spte);
  52. #ifdef CONFIG_X86_64
  53. void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
  54. void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
  55. static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
  56. static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
  57. {
  58. struct kvm_mmu_page *sp;
  59. hpa_t hpa = mmu->root.hpa;
  60. if (WARN_ON(!VALID_PAGE(hpa)))
  61. return false;
  62. /*
  63. * A NULL shadow page is legal when shadowing a non-paging guest with
  64. * PAE paging, as the MMU will be direct with root_hpa pointing at the
  65. * pae_root page, not a shadow page.
  66. */
  67. sp = to_shadow_page(hpa);
  68. return sp && is_tdp_mmu_page(sp) && sp->root_count;
  69. }
  70. #else
  71. static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
  72. static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
  73. static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
  74. static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
  75. #endif
  76. #endif /* __KVM_X86_MMU_TDP_MMU_H */