KVM: MMU: Modify kvm_shadow_walk.entry to accept u64 addr
EPT is 4 level by default in 32pae(48 bits), but the addr parameter of kvm_shadow_walk->entry() only accept unsigned long as virtual address, which is 32bit in 32pae. This result in SHADOW_PT_INDEX() overflow when try to fetch level 4 index. Fix it by extend kvm_shadow_walk->entry() to accept 64bit addr in parameter. Signed-off-by: Sheng Yang <sheng.yang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
此提交包含在:
@@ -286,7 +286,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
|
||||
* Fetch a shadow pte for a specific level in the paging hierarchy.
|
||||
*/
|
||||
static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
|
||||
struct kvm_vcpu *vcpu, gva_t addr,
|
||||
struct kvm_vcpu *vcpu, u64 addr,
|
||||
u64 *sptep, int level)
|
||||
{
|
||||
struct shadow_walker *sw =
|
||||
@@ -326,7 +326,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
|
||||
metaphysical = 0;
|
||||
table_gfn = gw->table_gfn[level - 2];
|
||||
}
|
||||
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
|
||||
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
|
||||
metaphysical, access, sptep);
|
||||
if (!metaphysical) {
|
||||
r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
|
||||
|
新增問題並參考
封鎖使用者