drm/ttm: remove use_ticket parameter from ttm_bo_reserve
Not used any more. Reviewed-by: Sinclair Yeh <syeh@vmware.com> Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Цей коміт міститься в:

зафіксовано
Alex Deucher

джерело
5ee7b41a8b
коміт
dfd5e50ea4
@@ -421,7 +421,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
|
||||
}
|
||||
|
||||
bo = &buf->base;
|
||||
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
|
||||
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
|
||||
|
||||
ret = ttm_bo_wait(old_bo, false, false, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
|
@@ -56,7 +56,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
@@ -98,7 +98,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
@@ -174,7 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
|
||||
return ret;
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock;
|
||||
|
||||
@@ -225,7 +225,7 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
|
@@ -326,7 +326,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
vmw_bo_pin_reserved(vbo, true);
|
||||
|
||||
|
@@ -98,7 +98,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
|
||||
kmap_offset = 0;
|
||||
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
|
||||
ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("reserve failed\n");
|
||||
return -EINVAL;
|
||||
@@ -318,7 +318,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
||||
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
|
||||
kmap_num = (64*64*4) >> PAGE_SHIFT;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, false, false, NULL);
|
||||
ret = ttm_bo_reserve(bo, true, false, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("reserve failed\n");
|
||||
return;
|
||||
@@ -1859,7 +1859,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
int ret;
|
||||
|
||||
ttm_bo_reserve(bo, false, false, interruptible, NULL);
|
||||
ttm_bo_reserve(bo, false, false, NULL);
|
||||
ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
|
||||
validate_as_mob);
|
||||
if (ret)
|
||||
|
@@ -222,7 +222,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
|
||||
if (bo) {
|
||||
int ret;
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(bo, false, true, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
vmw_fence_single_bo(bo, NULL);
|
||||
@@ -262,7 +262,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_bo;
|
||||
|
||||
ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
|
||||
if (unlikely(ret != 0))
|
||||
@@ -357,7 +357,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
|
||||
vmw_takedown_otable_base(dev_priv, i,
|
||||
&batch->otables[i]);
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(bo, false, true, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
vmw_fence_single_bo(bo, NULL);
|
||||
@@ -440,7 +440,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
|
||||
|
||||
BUG_ON(ret != 0);
|
||||
ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
|
||||
@@ -545,7 +545,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
|
||||
const struct vmw_sg_table *vsgt;
|
||||
int ret;
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(bo, false, true, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
vsgt = vmw_bo_sg_table(bo);
|
||||
@@ -595,7 +595,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo = mob->pt_bo;
|
||||
|
||||
if (bo) {
|
||||
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(bo, false, true, NULL);
|
||||
/*
|
||||
* Noone else should be using this buffer.
|
||||
*/
|
||||
|
@@ -129,7 +129,7 @@ static void vmw_resource_release(struct kref *kref)
|
||||
if (res->backup) {
|
||||
struct ttm_buffer_object *bo = &res->backup->base;
|
||||
|
||||
ttm_bo_reserve(bo, false, false, false, NULL);
|
||||
ttm_bo_reserve(bo, false, false, NULL);
|
||||
if (!list_empty(&res->mob_head) &&
|
||||
res->func->unbind != NULL) {
|
||||
struct ttm_validate_buffer val_buf;
|
||||
@@ -1717,8 +1717,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
|
||||
if (res->backup) {
|
||||
vbo = res->backup;
|
||||
|
||||
ttm_bo_reserve(&vbo->base, interruptible, false, false,
|
||||
NULL);
|
||||
ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
|
||||
if (!vbo->pin_count) {
|
||||
ret = ttm_bo_validate
|
||||
(&vbo->base,
|
||||
@@ -1773,7 +1772,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
|
||||
if (--res->pin_count == 0 && res->backup) {
|
||||
struct vmw_dma_buffer *vbo = res->backup;
|
||||
|
||||
ttm_bo_reserve(&vbo->base, false, false, false, NULL);
|
||||
ttm_bo_reserve(&vbo->base, false, false, NULL);
|
||||
vmw_bo_pin_reserved(vbo, false);
|
||||
ttm_bo_unreserve(&vbo->base);
|
||||
}
|
||||
|
@@ -988,7 +988,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
|
||||
ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(&buf->base, false, true, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto no_reserve;
|
||||
|
||||
|
Посилання в новій задачі
Заблокувати користувача