drm/i915: Start returning an error from i915_vma_move_to_active()
Handling such a late error in request construction is tricky, but to accommodate future patches which may allocate here, we potentially could err. To handle the error after already adjusting global state to track the new request, we must finish and submit the request. But we don't want to use the request as not everything is being tracked by it, so we opt to cancel the commands inside the request. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-3-chris@chris-wilson.co.uk
此提交包含在:
@@ -985,7 +985,10 @@ static int gpu_write(struct i915_vma *vma,
|
||||
goto err_request;
|
||||
}
|
||||
|
||||
i915_vma_move_to_active(batch, rq, 0);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_gem_object_set_active_reference(batch->obj);
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_close(batch);
|
||||
@@ -996,7 +999,9 @@ static int gpu_write(struct i915_vma *vma,
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
if (err)
|
||||
i915_request_skip(rq, err);
|
||||
|
||||
err_request:
|
||||
i915_request_add(rq);
|
||||
|
@@ -222,12 +222,12 @@ static int gpu_set(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool always_valid(struct drm_i915_private *i915)
|
||||
|
@@ -170,18 +170,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_vma_move_to_active(batch, rq, 0);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_gem_object_set_active_reference(batch->obj);
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_close(batch);
|
||||
|
||||
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
return 0;
|
||||
|
||||
skip_request:
|
||||
i915_request_skip(rq, err);
|
||||
err_request:
|
||||
i915_request_add(rq);
|
||||
err_batch:
|
||||
|
@@ -464,13 +464,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
|
||||
return PTR_ERR(rq);
|
||||
}
|
||||
|
||||
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
i915_gem_object_set_active_reference(obj);
|
||||
__i915_gem_object_release_unless_active(obj);
|
||||
i915_vma_unpin(vma);
|
||||
return 0;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool assert_mmap_offset(struct drm_i915_private *i915,
|
||||
|
@@ -675,7 +675,9 @@ static int live_all_engines(void *arg)
|
||||
i915_gem_object_set_active_reference(batch->obj);
|
||||
}
|
||||
|
||||
i915_vma_move_to_active(batch, request[id], 0);
|
||||
err = i915_vma_move_to_active(batch, request[id], 0);
|
||||
GEM_BUG_ON(err);
|
||||
|
||||
i915_request_get(request[id]);
|
||||
i915_request_add(request[id]);
|
||||
}
|
||||
@@ -785,7 +787,9 @@ static int live_sequential_engines(void *arg)
|
||||
GEM_BUG_ON(err);
|
||||
request[id]->batch = batch;
|
||||
|
||||
i915_vma_move_to_active(batch, request[id], 0);
|
||||
err = i915_vma_move_to_active(batch, request[id], 0);
|
||||
GEM_BUG_ON(err);
|
||||
|
||||
i915_gem_object_set_active_reference(batch->obj);
|
||||
i915_vma_get(batch);
|
||||
|
||||
|
@@ -130,13 +130,19 @@ static int emit_recurse_batch(struct hang *h,
|
||||
if (err)
|
||||
goto unpin_vma;
|
||||
|
||||
i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
if (err)
|
||||
goto unpin_hws;
|
||||
|
||||
if (!i915_gem_object_has_active_reference(vma->obj)) {
|
||||
i915_gem_object_get(vma->obj);
|
||||
i915_gem_object_set_active_reference(vma->obj);
|
||||
}
|
||||
|
||||
i915_vma_move_to_active(hws, rq, 0);
|
||||
err = i915_vma_move_to_active(hws, rq, 0);
|
||||
if (err)
|
||||
goto unpin_hws;
|
||||
|
||||
if (!i915_gem_object_has_active_reference(hws->obj)) {
|
||||
i915_gem_object_get(hws->obj);
|
||||
i915_gem_object_set_active_reference(hws->obj);
|
||||
@@ -205,6 +211,7 @@ static int emit_recurse_batch(struct hang *h,
|
||||
|
||||
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
|
||||
|
||||
unpin_hws:
|
||||
i915_vma_unpin(hws);
|
||||
unpin_vma:
|
||||
i915_vma_unpin(vma);
|
||||
|
@@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin,
|
||||
if (err)
|
||||
goto unpin_vma;
|
||||
|
||||
i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
if (err)
|
||||
goto unpin_hws;
|
||||
|
||||
if (!i915_gem_object_has_active_reference(vma->obj)) {
|
||||
i915_gem_object_get(vma->obj);
|
||||
i915_gem_object_set_active_reference(vma->obj);
|
||||
}
|
||||
|
||||
i915_vma_move_to_active(hws, rq, 0);
|
||||
err = i915_vma_move_to_active(hws, rq, 0);
|
||||
if (err)
|
||||
goto unpin_hws;
|
||||
|
||||
if (!i915_gem_object_has_active_reference(hws->obj)) {
|
||||
i915_gem_object_get(hws->obj);
|
||||
i915_gem_object_set_active_reference(hws->obj);
|
||||
@@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin,
|
||||
|
||||
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
|
||||
|
||||
unpin_hws:
|
||||
i915_vma_unpin(hws);
|
||||
unpin_vma:
|
||||
i915_vma_unpin(vma);
|
||||
|
@@ -49,6 +49,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
|
||||
goto err_pin;
|
||||
}
|
||||
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
if (err)
|
||||
goto err_req;
|
||||
|
||||
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
if (INTEL_GEN(ctx->i915) >= 8)
|
||||
srm++;
|
||||
@@ -67,8 +71,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
|
||||
}
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
|
||||
i915_gem_object_get(result);
|
||||
i915_gem_object_set_active_reference(result);
|
||||
|
||||
|
新增問題並參考
封鎖使用者