KVM: x86: improve reexecute_instruction

The current reexecute_instruction can not well detect the failed instruction
emulation. It allows guest to retry all the instructions except it accesses
on error pfn

For example, some cases are nested-write-protect - if the page we want to
write is used as PDE but it chains to itself. Under this case, we should
stop the emulation and report the case to userspace

Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Xiao Guangrong
2013-01-13 23:49:07 +08:00
committed by Marcelo Tosatti
parent 95b3cf69bd
commit 93c05d3ef2
3 changed files with 45 additions and 11 deletions

View File

@@ -497,26 +497,34 @@ out_gpte_changed:
* created when kvm establishes shadow page table that stop kvm using large
* page size. Do it early can avoid unnecessary #PF and emulation.
*
* @write_fault_to_shadow_pgtable will return true if the fault gfn is
* currently used as its page table.
*
* Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
* since the PDPT is always shadowed, that means, we can not use large page
* size to map the gfn which is used as PDPT.
*/
static bool
FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
struct guest_walker *walker, int user_fault)
struct guest_walker *walker, int user_fault,
bool *write_fault_to_shadow_pgtable)
{
int level;
gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
bool self_changed = false;
if (!(walker->pte_access & ACC_WRITE_MASK ||
(!is_write_protection(vcpu) && !user_fault)))
return false;
for (level = walker->level; level <= walker->max_level; level++)
if (!((walker->gfn ^ walker->table_gfn[level - 1]) & mask))
return true;
for (level = walker->level; level <= walker->max_level; level++) {
gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
return false;
self_changed |= !(gfn & mask);
*write_fault_to_shadow_pgtable |= !gfn;
}
return self_changed;
}
/*
@@ -544,7 +552,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int level = PT_PAGE_TABLE_LEVEL;
int force_pt_level;
unsigned long mmu_seq;
bool map_writable;
bool map_writable, is_self_change_mapping;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -572,9 +580,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return 0;
}
vcpu->arch.write_fault_to_shadow_pgtable = false;
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
if (walker.level >= PT_DIRECTORY_LEVEL)
force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
|| FNAME(is_self_change_mapping)(vcpu, &walker, user_fault);
|| is_self_change_mapping;
else
force_pt_level = 1;
if (!force_pt_level) {