drm/i915: Move GEM object domain management from struct_mutex to local
Use the per-object local lock to control the cache domain of the individual GEM objects, not struct_mutex. This is a huge leap forward for us in terms of object-level synchronisation; execbuffers are coordinated using the ww_mutex and pread/pwrite is finally fully serialised again. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-10-chris@chris-wilson.co.uk
Этот коммит содержится в:
@@ -960,10 +960,6 @@ static int gpu_write(struct i915_vma *vma,
|
||||
|
||||
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
|
||||
|
||||
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
batch = gpu_write_dw(vma, dword * sizeof(u32), value);
|
||||
if (IS_ERR(batch))
|
||||
return PTR_ERR(batch);
|
||||
@@ -974,13 +970,19 @@ static int gpu_write(struct i915_vma *vma,
|
||||
goto err_batch;
|
||||
}
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_gem_object_set_active_reference(batch->obj);
|
||||
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_lock(vma);
|
||||
err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
|
@@ -78,7 +78,9 @@ static int gtt_set(struct drm_i915_gem_object *obj,
|
||||
u32 __iomem *map;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -105,7 +107,9 @@ static int gtt_get(struct drm_i915_gem_object *obj,
|
||||
u32 __iomem *map;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -131,7 +135,9 @@ static int wc_set(struct drm_i915_gem_object *obj,
|
||||
u32 *map;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_wc_domain(obj, true);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -152,7 +158,9 @@ static int wc_get(struct drm_i915_gem_object *obj,
|
||||
u32 *map;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_wc_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -176,7 +184,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
|
||||
u32 *cs;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -215,7 +225,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
i915_request_add(rq);
|
||||
|
@@ -209,7 +209,9 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
|
||||
i915_gem_object_flush_map(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
@@ -261,7 +263,9 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -302,11 +306,15 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
@@ -754,7 +762,9 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -780,11 +790,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
@@ -1345,7 +1359,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
@@ -1440,7 +1456,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
@@ -1449,7 +1467,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_cpu_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
|
@@ -110,7 +110,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
|
||||
GEM_BUG_ON(view.partial.size > nreal);
|
||||
cond_resched();
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err) {
|
||||
pr_err("Failed to flush to GTT write domain; err=%d\n",
|
||||
err);
|
||||
@@ -142,7 +144,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
|
||||
if (offset >= obj->base.size)
|
||||
continue;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
|
||||
cpu = kmap(p) + offset_in_page(offset);
|
||||
@@ -344,7 +348,9 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
|
||||
return PTR_ERR(rq);
|
||||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
|
@@ -46,9 +46,9 @@ static int mock_phys_object(void *arg)
|
||||
}
|
||||
|
||||
/* Make the object dirty so that put_pages must do copy back the data */
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err) {
|
||||
pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
|
||||
err);
|
||||
|
Ссылка в новой задаче
Block a user