drm/radeon: switch to drm_*{get,put} helpers
drm_*_reference() and drm_*_unreference() functions are just compatibility alias for drm_*_get() and drm_*_put() adn should not be used by new code. So convert all users of compatibility functions to use the new APIs. Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Cihangir Akturk <cakturk@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:

committed by
Alex Deucher

parent
f62facc2eb
commit
07f65bb22f
@@ -271,7 +271,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
if (r) {
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
@@ -352,7 +352,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
if (r)
|
||||
goto handle_lockup;
|
||||
|
||||
@@ -361,7 +361,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
|
||||
release_object:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
|
||||
handle_lockup:
|
||||
up_read(&rdev->exclusive_lock);
|
||||
@@ -395,7 +395,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_gem_handle_lockup(robj->rdev, r);
|
||||
return r;
|
||||
@@ -414,11 +414,11 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
|
||||
}
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return -EPERM;
|
||||
}
|
||||
*offset_p = radeon_bo_mmap_offset(robj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -453,7 +453,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
|
||||
args->domain = radeon_mem_type_to_domain(cur_placement);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -485,7 +485,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
if (rdev->asic->mmio_hdp_flush &&
|
||||
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
|
||||
robj->rdev->asic->mmio_hdp_flush(rdev);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
@@ -504,7 +504,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -527,7 +527,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
|
||||
radeon_bo_unreserve(rbo);
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -661,14 +661,14 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
r = radeon_bo_reserve(rbo, false);
|
||||
if (r) {
|
||||
args->operation = RADEON_VA_RESULT_ERROR;
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
|
||||
if (!bo_va) {
|
||||
args->operation = RADEON_VA_RESULT_ERROR;
|
||||
radeon_bo_unreserve(rbo);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
@@ -695,7 +695,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
args->operation = RADEON_VA_RESULT_ERROR;
|
||||
}
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -736,7 +736,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
radeon_bo_unreserve(robj);
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -762,7 +762,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
|
||||
|
||||
r = drm_gem_handle_create(file_priv, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
Reference in New Issue
Block a user