kvm_mm.h 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. #ifndef __KVM_MM_H__
  3. #define __KVM_MM_H__ 1
  4. /*
  5. * Architectures can choose whether to use an rwlock or spinlock
  6. * for the mmu_lock. These macros, for use in common code
  7. * only, avoids using #ifdefs in places that must deal with
  8. * multiple architectures.
  9. */
  10. #ifdef KVM_HAVE_MMU_RWLOCK
  11. #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
  12. #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
  13. #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
  14. #define KVM_MMU_READ_LOCK(kvm) read_lock(&(kvm)->mmu_lock)
  15. #define KVM_MMU_READ_UNLOCK(kvm) read_unlock(&(kvm)->mmu_lock)
  16. #else
  17. #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
  18. #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
  19. #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
  20. #define KVM_MMU_READ_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
  21. #define KVM_MMU_READ_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
  22. #endif /* KVM_HAVE_MMU_RWLOCK */
  23. kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
  24. bool write_fault, bool *writable);
  25. #ifdef CONFIG_HAVE_KVM_PFNCACHE
  26. void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
  27. unsigned long start,
  28. unsigned long end,
  29. bool may_block);
  30. #else
  31. static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
  32. unsigned long start,
  33. unsigned long end,
  34. bool may_block)
  35. {
  36. }
  37. #endif /* HAVE_KVM_PFNCACHE */
  38. #endif /* __KVM_MM_H__ */