FROMLIST: KVM: arm64: Always zero invalid PTEs
kvm_set_invalid_pte() currently only clears bit 0 from a PTE because stage2_map_walk_table_post() needs to be able to follow the anchor. In preparation for re-using bits 63-01 from invalid PTEs, make sure to zero it entirely by ensuring to cache the anchor's child upfront. Acked-by: Will Deacon <will@kernel.org> Suggested-by: Will Deacon <will@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20210315143536.214621-29-qperret@google.com Bug: 178098380 Change-Id: Ia370eb397c69af690d82ab408d4076c9a5b95382
This commit is contained in:
@@ -156,10 +156,9 @@ static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_op
|
|||||||
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
|
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_set_invalid_pte(kvm_pte_t *ptep)
|
static void kvm_clear_pte(kvm_pte_t *ptep)
|
||||||
{
|
{
|
||||||
kvm_pte_t pte = *ptep;
|
WRITE_ONCE(*ptep, 0);
|
||||||
WRITE_ONCE(*ptep, pte & ~KVM_PTE_VALID);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
|
static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
|
||||||
@@ -444,6 +443,7 @@ struct stage2_map_data {
|
|||||||
kvm_pte_t attr;
|
kvm_pte_t attr;
|
||||||
|
|
||||||
kvm_pte_t *anchor;
|
kvm_pte_t *anchor;
|
||||||
|
kvm_pte_t *childp;
|
||||||
|
|
||||||
struct kvm_s2_mmu *mmu;
|
struct kvm_s2_mmu *mmu;
|
||||||
void *memcache;
|
void *memcache;
|
||||||
@@ -533,7 +533,7 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
|
|||||||
* There's an existing different valid leaf entry, so perform
|
* There's an existing different valid leaf entry, so perform
|
||||||
* break-before-make.
|
* break-before-make.
|
||||||
*/
|
*/
|
||||||
kvm_set_invalid_pte(ptep);
|
kvm_clear_pte(ptep);
|
||||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
|
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
|
||||||
mm_ops->put_page(ptep);
|
mm_ops->put_page(ptep);
|
||||||
}
|
}
|
||||||
@@ -554,7 +554,8 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
|
|||||||
if (!kvm_block_mapping_supported(addr, end, data->phys, level))
|
if (!kvm_block_mapping_supported(addr, end, data->phys, level))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
kvm_set_invalid_pte(ptep);
|
data->childp = kvm_pte_follow(*ptep, data->mm_ops);
|
||||||
|
kvm_clear_pte(ptep);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invalidate the whole stage-2, as we may have numerous leaf
|
* Invalidate the whole stage-2, as we may have numerous leaf
|
||||||
@@ -600,7 +601,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
|||||||
* will be mapped lazily.
|
* will be mapped lazily.
|
||||||
*/
|
*/
|
||||||
if (kvm_pte_valid(pte)) {
|
if (kvm_pte_valid(pte)) {
|
||||||
kvm_set_invalid_pte(ptep);
|
kvm_clear_pte(ptep);
|
||||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
|
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
|
||||||
mm_ops->put_page(ptep);
|
mm_ops->put_page(ptep);
|
||||||
}
|
}
|
||||||
@@ -616,19 +617,24 @@ static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
|
|||||||
struct stage2_map_data *data)
|
struct stage2_map_data *data)
|
||||||
{
|
{
|
||||||
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
|
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
|
||||||
|
kvm_pte_t *childp;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!data->anchor)
|
if (!data->anchor)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mm_ops->put_page(kvm_pte_follow(*ptep, mm_ops));
|
|
||||||
mm_ops->put_page(ptep);
|
|
||||||
|
|
||||||
if (data->anchor == ptep) {
|
if (data->anchor == ptep) {
|
||||||
|
childp = data->childp;
|
||||||
data->anchor = NULL;
|
data->anchor = NULL;
|
||||||
|
data->childp = NULL;
|
||||||
ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
|
ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
|
||||||
|
} else {
|
||||||
|
childp = kvm_pte_follow(*ptep, mm_ops);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mm_ops->put_page(childp);
|
||||||
|
mm_ops->put_page(ptep);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -737,7 +743,7 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
|||||||
* block entry and rely on the remaining portions being faulted
|
* block entry and rely on the remaining portions being faulted
|
||||||
* back lazily.
|
* back lazily.
|
||||||
*/
|
*/
|
||||||
kvm_set_invalid_pte(ptep);
|
kvm_clear_pte(ptep);
|
||||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
|
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
|
||||||
mm_ops->put_page(ptep);
|
mm_ops->put_page(ptep);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user