kvm-arm: Cleanup stage2 pgd handling
Now that we don't have any fake page table levels for arm64, cleanup the common code to get rid of the dead code. Cc: Marc Zyngier <marc.zyngier@arm.com> Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
This commit is contained in:
committed by
Christoffer Dall
parent
da04fa04dc
commit
9163ee23e7
@@ -150,25 +150,6 @@ static inline bool kvm_page_empty(void *ptr)
|
|||||||
#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
|
#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
|
||||||
#define hyp_pud_table_empty(pudp) false
|
#define hyp_pud_table_empty(pudp) false
|
||||||
|
|
||||||
static inline void *kvm_get_hwpgd(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
return kvm->arch.pgd;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int kvm_get_hwpgd_size(void)
|
|
||||||
{
|
|
||||||
return PTRS_PER_S2_PGD * sizeof(pgd_t);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
|
|
||||||
{
|
|
||||||
return hwpgd;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void kvm_free_fake_pgd(pgd_t *pgd)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
struct kvm;
|
struct kvm;
|
||||||
|
|
||||||
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
|
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
|
||||||
|
|||||||
@@ -448,7 +448,7 @@ static void update_vttbr(struct kvm *kvm)
|
|||||||
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
|
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
|
||||||
|
|
||||||
/* update vttbr to be used with the new vmid */
|
/* update vttbr to be used with the new vmid */
|
||||||
pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
|
pgd_phys = virt_to_phys(kvm->arch.pgd);
|
||||||
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
|
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
|
||||||
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
|
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
|
||||||
kvm->arch.vttbr = pgd_phys | vmid;
|
kvm->arch.vttbr = pgd_phys | vmid;
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ static unsigned long hyp_idmap_start;
|
|||||||
static unsigned long hyp_idmap_end;
|
static unsigned long hyp_idmap_end;
|
||||||
static phys_addr_t hyp_idmap_vector;
|
static phys_addr_t hyp_idmap_vector;
|
||||||
|
|
||||||
|
#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
|
||||||
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
|
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
|
||||||
|
|
||||||
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
|
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
|
||||||
@@ -736,20 +737,6 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
|
|||||||
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
|
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free the HW pgd, one page at a time */
|
|
||||||
static void kvm_free_hwpgd(void *hwpgd)
|
|
||||||
{
|
|
||||||
free_pages_exact(hwpgd, kvm_get_hwpgd_size());
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Allocate the HW PGD, making sure that each page gets its own refcount */
|
|
||||||
static void *kvm_alloc_hwpgd(void)
|
|
||||||
{
|
|
||||||
unsigned int size = kvm_get_hwpgd_size();
|
|
||||||
|
|
||||||
return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
|
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
|
||||||
* @kvm: The KVM struct pointer for the VM.
|
* @kvm: The KVM struct pointer for the VM.
|
||||||
@@ -764,29 +751,17 @@ static void *kvm_alloc_hwpgd(void)
|
|||||||
int kvm_alloc_stage2_pgd(struct kvm *kvm)
|
int kvm_alloc_stage2_pgd(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
void *hwpgd;
|
|
||||||
|
|
||||||
if (kvm->arch.pgd != NULL) {
|
if (kvm->arch.pgd != NULL) {
|
||||||
kvm_err("kvm_arch already initialized?\n");
|
kvm_err("kvm_arch already initialized?\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
hwpgd = kvm_alloc_hwpgd();
|
/* Allocate the HW PGD, making sure that each page gets its own refcount */
|
||||||
if (!hwpgd)
|
pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
|
||||||
|
if (!pgd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/*
|
|
||||||
* When the kernel uses more levels of page tables than the
|
|
||||||
* guest, we allocate a fake PGD and pre-populate it to point
|
|
||||||
* to the next-level page table, which will be the real
|
|
||||||
* initial page table pointed to by the VTTBR.
|
|
||||||
*/
|
|
||||||
pgd = kvm_setup_fake_pgd(hwpgd);
|
|
||||||
if (IS_ERR(pgd)) {
|
|
||||||
kvm_free_hwpgd(hwpgd);
|
|
||||||
return PTR_ERR(pgd);
|
|
||||||
}
|
|
||||||
|
|
||||||
kvm_clean_pgd(pgd);
|
kvm_clean_pgd(pgd);
|
||||||
kvm->arch.pgd = pgd;
|
kvm->arch.pgd = pgd;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -874,8 +849,8 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
|
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
|
||||||
kvm_free_hwpgd(kvm_get_hwpgd(kvm));
|
/* Free the HW pgd, one page at a time */
|
||||||
kvm_free_fake_pgd(kvm->arch.pgd);
|
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
|
||||||
kvm->arch.pgd = NULL;
|
kvm->arch.pgd = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -141,24 +141,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
|
|||||||
return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
|
return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *kvm_get_hwpgd(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
return kvm->arch.pgd;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int kvm_get_hwpgd_size(void)
|
|
||||||
{
|
|
||||||
return PTRS_PER_S2_PGD * sizeof(pgd_t);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
|
|
||||||
{
|
|
||||||
return hwpgd;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void kvm_free_fake_pgd(pgd_t *pgd)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
static inline bool kvm_page_empty(void *ptr)
|
static inline bool kvm_page_empty(void *ptr)
|
||||||
{
|
{
|
||||||
struct page *ptr_page = virt_to_page(ptr);
|
struct page *ptr_page = virt_to_page(ptr);
|
||||||
|
|||||||
Reference in New Issue
Block a user