drm/radeon: fence virtual address and free it once idle v4

Virtual address need to be fenced to know when we can safely remove it.
This patch also properly clear the pagetable. Previously it was
serouisly broken.

Kernel 3.5/3.4 need a similar patch but adapted for difference in mutex locking.

v2: For to update pagetable when unbinding bo (don't bailout if
    bo_va->valid is true).
v3: Add kernel 3.5/3.4 comment.
v4: Fix compilation warnings.

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Jerome Glisse
2012-08-06 12:32:21 -04:00
committed by Alex Deucher
parent 69b62ad8a4
commit e43b5ec05a
5 changed files with 55 additions and 21 deletions

View File

@@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_device *rdev = rbo->rdev;
struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
struct radeon_bo_va *bo_va, *tmp;
if (rdev->family < CHIP_CAYMAN) {
return;
}
if (radeon_bo_reserve(rbo, false)) {
dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
return;
}
list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
if (bo_va->vm == vm) {
/* remove from this vm address space */
mutex_lock(&vm->mutex);
list_del(&bo_va->vm_list);
mutex_unlock(&vm->mutex);
list_del(&bo_va->bo_list);
kfree(bo_va);
}
}
radeon_vm_bo_rmv(rdev, vm, rbo);
radeon_bo_unreserve(rbo);
}