drm/i915/gvt: Factor intel_vgpu_page_track
As the data structure of "intel_vgpu_guest_page" will become much heavier in future, it's better to factor out the guest memory page track mechnisim as early as possible. Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
This commit is contained in:
@@ -117,18 +117,18 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
else
|
||||
memcpy(pt, p_data, bytes);
|
||||
|
||||
} else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
|
||||
struct intel_vgpu_guest_page *gp;
|
||||
} else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
||||
struct intel_vgpu_page_track *t;
|
||||
|
||||
/* Since we enter the failsafe mode early during guest boot,
|
||||
* guest may not have chance to set up its ppgtt table, so
|
||||
* there should not be any wp pages for guest. Keep the wp
|
||||
* related code here in case we need to handle it in furture.
|
||||
*/
|
||||
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (gp) {
|
||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (t) {
|
||||
/* remove write protection to prevent furture traps */
|
||||
intel_vgpu_clean_guest_page(vgpu, gp);
|
||||
intel_vgpu_clean_page_track(vgpu, t);
|
||||
if (read)
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
||||
p_data, bytes);
|
||||
@@ -170,17 +170,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
|
||||
struct intel_vgpu_guest_page *gp;
|
||||
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
||||
struct intel_vgpu_page_track *t;
|
||||
|
||||
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (gp) {
|
||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (t) {
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
||||
p_data, bytes);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest page read error %d, "
|
||||
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
|
||||
ret, gp->gfn, pa, *(u32 *)p_data,
|
||||
ret, t->gfn, pa, *(u32 *)p_data,
|
||||
bytes);
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
@@ -267,17 +267,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
|
||||
struct intel_vgpu_guest_page *gp;
|
||||
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
||||
struct intel_vgpu_page_track *t;
|
||||
|
||||
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (gp) {
|
||||
ret = gp->handler(gp, pa, p_data, bytes);
|
||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (t) {
|
||||
ret = t->handler(t, pa, p_data, bytes);
|
||||
if (ret) {
|
||||
gvt_err("guest page write error %d, "
|
||||
"gfn 0x%lx, pa 0x%llx, "
|
||||
"var 0x%x, len %d\n",
|
||||
ret, gp->gfn, pa,
|
||||
ret, t->gfn, pa,
|
||||
*(u32 *)p_data, bytes);
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
Reference in New Issue
Block a user