Merge tag 'drm-intel-gt-next-2020-09-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
(Same content as drm-intel-gt-next-2020-09-04-3, S-o-b's added) UAPI Changes: (- Potential implicit changes from WW locking refactoring) Cross-subsystem Changes: (- WW locking changes should align the i915 locking more with others) Driver Changes: - MAJOR: Apply WW locking across the driver (Maarten) - Reverts for 5 commits to make applying WW locking faster (Maarten) - Disable preparser around invalidations on Tigerlake for non-RCS engines (Chris) - Add missing dma_fence_put() for error case of syncobj timeline (Chris) - Parse command buffer earlier in eb_relocate(slow) to facilitate backoff (Maarten) - Pin engine before pinning all objects (Maarten) - Rework intel_context pinning to do everything outside of pin_mutex (Maarten) - Avoid tracking GEM context until registered (Cc: stable, Chris) - Provide a fastpath for waiting on vma bindings (Chris) - Fixes to preempt-to-busy mechanism (Chris) - Distinguish the virtual breadcrumbs from the irq breadcrumbs (Chris) - Switch to object allocations for page directories (Chris) - Hold context/request reference while breadcrumbs are active (Chris) - Make sure execbuffer always passes ww state to i915_vma_pin (Maarten) - Code refactoring to facilitate use of WW locking (Maarten) - Locking refactoring to use more granular locking (Maarten, Chris) - Support for multiple pinned timelines per engine (Chris) - Move complication of I915_GEM_THROTTLE to the ioctl from general code (Chris) - Make active tracking/vma page-directory stash work preallocated (Chris) - Avoid flushing submission tasklet too often (Chris) - Reduce context termination list iteration guard to RCU (Chris) - Reductions to locking contention (Chris) - Fixes for issues found by CI (Chris) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <jlahtine@jlahtine-mobl.ger.corp.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200907130039.GA27766@jlahtine-mobl.ger.corp.intel.com
This commit is contained in:
@@ -32,12 +32,13 @@ static void vma_clear_pages(struct i915_vma *vma)
|
||||
vma->pages = NULL;
|
||||
}
|
||||
|
||||
static int vma_bind(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
static void vma_bind(struct i915_address_space *vm,
|
||||
struct i915_vm_pt_stash *stash,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
return vm->vma_ops.bind_vma(vm, vma, cache_level, flags);
|
||||
vm->vma_ops.bind_vma(vm, stash, vma, cache_level, flags);
|
||||
}
|
||||
|
||||
static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
|
||||
@@ -157,6 +158,7 @@ static void clear_pages_worker(struct work_struct *work)
|
||||
struct clear_pages_work *w = container_of(work, typeof(*w), work);
|
||||
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
|
||||
struct i915_vma *vma = w->sleeve->vma;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *batch;
|
||||
int err = w->dma.error;
|
||||
@@ -172,17 +174,20 @@ static void clear_pages_worker(struct work_struct *work)
|
||||
obj->read_domains = I915_GEM_GPU_DOMAINS;
|
||||
obj->write_domain = 0;
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
i915_gem_ww_ctx_init(&ww, false);
|
||||
intel_engine_pm_get(w->ce->engine);
|
||||
retry:
|
||||
err = intel_context_pin_ww(w->ce, &ww);
|
||||
if (err)
|
||||
goto out_signal;
|
||||
|
||||
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
|
||||
batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_unpin;
|
||||
goto out_ctx;
|
||||
}
|
||||
|
||||
rq = intel_context_create_request(w->ce);
|
||||
rq = i915_request_create(w->ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto out_batch;
|
||||
@@ -224,9 +229,19 @@ out_request:
|
||||
i915_request_add(rq);
|
||||
out_batch:
|
||||
intel_emit_vma_release(w->ce, batch);
|
||||
out_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
out_ctx:
|
||||
intel_context_unpin(w->ce);
|
||||
out_signal:
|
||||
if (err == -EDEADLK) {
|
||||
err = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
|
||||
i915_vma_unpin(w->sleeve->vma);
|
||||
intel_engine_pm_put(w->ce->engine);
|
||||
|
||||
if (unlikely(err)) {
|
||||
dma_fence_set_error(&w->dma, err);
|
||||
dma_fence_signal(&w->dma);
|
||||
@@ -234,6 +249,44 @@ out_signal:
|
||||
}
|
||||
}
|
||||
|
||||
static int pin_wait_clear_pages_work(struct clear_pages_work *w,
|
||||
struct intel_context *ce)
|
||||
{
|
||||
struct i915_vma *vma = w->sleeve->vma;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
int err;
|
||||
|
||||
i915_gem_ww_ctx_init(&ww, false);
|
||||
retry:
|
||||
err = i915_gem_object_lock(vma->obj, &ww);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
goto out;
|
||||
|
||||
err = i915_sw_fence_await_reservation(&w->wait,
|
||||
vma->obj->base.resv, NULL,
|
||||
true, 0, I915_FENCE_GFP);
|
||||
if (err)
|
||||
goto err_unpin_vma;
|
||||
|
||||
dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
|
||||
|
||||
err_unpin_vma:
|
||||
if (err)
|
||||
i915_vma_unpin(vma);
|
||||
out:
|
||||
if (err == -EDEADLK) {
|
||||
err = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __i915_sw_fence_call
|
||||
clear_pages_work_notify(struct i915_sw_fence *fence,
|
||||
enum i915_sw_fence_notify state)
|
||||
@@ -287,17 +340,9 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
|
||||
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
|
||||
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_sw_fence_await_reservation(&work->wait,
|
||||
obj->base.resv, NULL, true, 0,
|
||||
I915_FENCE_GFP);
|
||||
if (err < 0) {
|
||||
err = pin_wait_clear_pages_work(work, ce);
|
||||
if (err < 0)
|
||||
dma_fence_set_error(&work->dma, err);
|
||||
} else {
|
||||
dma_resv_add_excl_fence(obj->base.resv, &work->dma);
|
||||
err = 0;
|
||||
}
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
dma_fence_get(&work->dma);
|
||||
i915_sw_fence_commit(&work->wait);
|
||||
|
@@ -439,29 +439,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
|
||||
return __reset_engine(engine);
|
||||
}
|
||||
|
||||
static struct intel_engine_cs *__active_engine(struct i915_request *rq)
|
||||
static bool
|
||||
__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
|
||||
{
|
||||
struct intel_engine_cs *engine, *locked;
|
||||
bool ret = false;
|
||||
|
||||
/*
|
||||
* Serialise with __i915_request_submit() so that it sees
|
||||
* is-banned?, or we know the request is already inflight.
|
||||
*
|
||||
* Note that rq->engine is unstable, and so we double
|
||||
* check that we have acquired the lock on the final engine.
|
||||
*/
|
||||
locked = READ_ONCE(rq->engine);
|
||||
spin_lock_irq(&locked->active.lock);
|
||||
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
|
||||
spin_unlock(&locked->active.lock);
|
||||
spin_lock(&engine->active.lock);
|
||||
locked = engine;
|
||||
spin_lock(&locked->active.lock);
|
||||
}
|
||||
|
||||
engine = NULL;
|
||||
if (i915_request_is_active(rq) && rq->fence.error != -EIO)
|
||||
engine = rq->engine;
|
||||
if (!i915_request_completed(rq)) {
|
||||
if (i915_request_is_active(rq) && rq->fence.error != -EIO)
|
||||
*active = locked;
|
||||
ret = true;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&locked->active.lock);
|
||||
|
||||
return engine;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct intel_engine_cs *active_engine(struct intel_context *ce)
|
||||
@@ -472,17 +479,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
|
||||
if (!ce->timeline)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&ce->timeline->mutex);
|
||||
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
|
||||
if (i915_request_completed(rq))
|
||||
break;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
|
||||
if (i915_request_is_active(rq) && i915_request_completed(rq))
|
||||
continue;
|
||||
|
||||
/* Check with the backend if the request is inflight */
|
||||
engine = __active_engine(rq);
|
||||
if (engine)
|
||||
if (__active_engine(rq, &engine))
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ce->timeline->mutex);
|
||||
rcu_read_unlock();
|
||||
|
||||
return engine;
|
||||
}
|
||||
@@ -713,6 +719,7 @@ __create_context(struct drm_i915_private *i915)
|
||||
ctx->i915 = i915;
|
||||
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
|
||||
mutex_init(&ctx->mutex);
|
||||
INIT_LIST_HEAD(&ctx->link);
|
||||
|
||||
spin_lock_init(&ctx->stale.lock);
|
||||
INIT_LIST_HEAD(&ctx->stale.engines);
|
||||
@@ -740,10 +747,6 @@ __create_context(struct drm_i915_private *i915)
|
||||
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
|
||||
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
|
||||
|
||||
spin_lock(&i915->gem.contexts.lock);
|
||||
list_add_tail(&ctx->link, &i915->gem.contexts.list);
|
||||
spin_unlock(&i915->gem.contexts.lock);
|
||||
|
||||
return ctx;
|
||||
|
||||
err_free:
|
||||
@@ -889,7 +892,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
|
||||
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
|
||||
struct intel_timeline *timeline;
|
||||
|
||||
timeline = intel_timeline_create(&i915->gt, NULL);
|
||||
timeline = intel_timeline_create(&i915->gt);
|
||||
if (IS_ERR(timeline)) {
|
||||
context_close(ctx);
|
||||
return ERR_CAST(timeline);
|
||||
@@ -931,6 +934,7 @@ static int gem_context_register(struct i915_gem_context *ctx,
|
||||
struct drm_i915_file_private *fpriv,
|
||||
u32 *id)
|
||||
{
|
||||
struct drm_i915_private *i915 = ctx->i915;
|
||||
struct i915_address_space *vm;
|
||||
int ret;
|
||||
|
||||
@@ -949,8 +953,16 @@ static int gem_context_register(struct i915_gem_context *ctx,
|
||||
/* And finally expose ourselves to userspace via the idr */
|
||||
ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
|
||||
if (ret)
|
||||
put_pid(fetch_and_zero(&ctx->pid));
|
||||
goto err_pid;
|
||||
|
||||
spin_lock(&i915->gem.contexts.lock);
|
||||
list_add_tail(&ctx->link, &i915->gem.contexts.list);
|
||||
spin_unlock(&i915->gem.contexts.lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_pid:
|
||||
put_pid(fetch_and_zero(&ctx->pid));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1094,6 +1106,7 @@ I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
|
||||
static int context_barrier_task(struct i915_gem_context *ctx,
|
||||
intel_engine_mask_t engines,
|
||||
bool (*skip)(struct intel_context *ce, void *data),
|
||||
int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
|
||||
int (*emit)(struct i915_request *rq, void *data),
|
||||
void (*task)(void *data),
|
||||
void *data)
|
||||
@@ -1101,6 +1114,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
|
||||
struct context_barrier_task *cb;
|
||||
struct i915_gem_engines_iter it;
|
||||
struct i915_gem_engines *e;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
struct intel_context *ce;
|
||||
int err = 0;
|
||||
|
||||
@@ -1138,10 +1152,21 @@ static int context_barrier_task(struct i915_gem_context *ctx,
|
||||
if (skip && skip(ce, data))
|
||||
continue;
|
||||
|
||||
rq = intel_context_create_request(ce);
|
||||
i915_gem_ww_ctx_init(&ww, true);
|
||||
retry:
|
||||
err = intel_context_pin_ww(ce, &ww);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
if (pin)
|
||||
err = pin(ce, &ww, data);
|
||||
if (err)
|
||||
goto err_unpin;
|
||||
|
||||
rq = i915_request_create(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
break;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
@@ -1151,6 +1176,16 @@ static int context_barrier_task(struct i915_gem_context *ctx,
|
||||
err = i915_active_add_request(&cb->base, rq);
|
||||
|
||||
i915_request_add(rq);
|
||||
err_unpin:
|
||||
intel_context_unpin(ce);
|
||||
err:
|
||||
if (err == -EDEADLK) {
|
||||
err = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
@@ -1206,6 +1241,17 @@ static void set_ppgtt_barrier(void *data)
|
||||
i915_vm_close(old);
|
||||
}
|
||||
|
||||
static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
|
||||
{
|
||||
struct i915_address_space *vm = ce->vm;
|
||||
|
||||
if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
|
||||
/* ppGTT is not part of the legacy context image */
|
||||
return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int emit_ppgtt_update(struct i915_request *rq, void *data)
|
||||
{
|
||||
struct i915_address_space *vm = rq->context->vm;
|
||||
@@ -1262,20 +1308,10 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
|
||||
|
||||
static bool skip_ppgtt_update(struct intel_context *ce, void *data)
|
||||
{
|
||||
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
|
||||
return true;
|
||||
|
||||
if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
|
||||
return false;
|
||||
|
||||
if (!atomic_read(&ce->pin_count))
|
||||
return true;
|
||||
|
||||
/* ppGTT is not part of the legacy context image */
|
||||
if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return !ce->state;
|
||||
else
|
||||
return !atomic_read(&ce->pin_count);
|
||||
}
|
||||
|
||||
static int set_ppgtt(struct drm_i915_file_private *file_priv,
|
||||
@@ -1326,6 +1362,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
|
||||
*/
|
||||
err = context_barrier_task(ctx, ALL_ENGINES,
|
||||
skip_ppgtt_update,
|
||||
pin_ppgtt_update,
|
||||
emit_ppgtt_update,
|
||||
set_ppgtt_barrier,
|
||||
old);
|
||||
|
@@ -128,7 +128,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i915_gem_object_lock_interruptible(obj);
|
||||
err = i915_gem_object_lock_interruptible(obj, NULL);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -149,7 +149,7 @@ static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direct
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i915_gem_object_lock_interruptible(obj);
|
||||
err = i915_gem_object_lock_interruptible(obj, NULL);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@@ -32,11 +32,17 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
|
||||
if (!i915_gem_object_is_framebuffer(obj))
|
||||
return;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
__i915_gem_object_flush_for_display(obj);
|
||||
i915_gem_object_unlock(obj);
|
||||
}
|
||||
|
||||
void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (i915_gem_object_is_framebuffer(obj))
|
||||
__i915_gem_object_flush_for_display(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves a single object to the WC read, and possibly write domain.
|
||||
* @obj: object to act on
|
||||
@@ -197,18 +203,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_lock_interruptible(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Always invalidate stale cachelines */
|
||||
if (obj->cache_level != cache_level) {
|
||||
i915_gem_object_set_cache_coherency(obj, cache_level);
|
||||
obj->cache_dirty = true;
|
||||
}
|
||||
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
/* The cache-level will be applied when each vma is rebound. */
|
||||
return i915_gem_object_unbind(obj,
|
||||
I915_GEM_OBJECT_UNBIND_ACTIVE |
|
||||
@@ -293,7 +293,12 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_lock_interruptible(obj, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = i915_gem_object_set_cache_level(obj, level);
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
out:
|
||||
i915_gem_object_put(obj);
|
||||
@@ -313,6 +318,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct i915_gem_ww_ctx ww;
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
@@ -320,6 +326,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
i915_gem_ww_ctx_init(&ww, true);
|
||||
retry:
|
||||
ret = i915_gem_object_lock(obj, &ww);
|
||||
if (ret)
|
||||
goto err;
|
||||
/*
|
||||
* The display engine is not coherent with the LLC cache on gen6. As
|
||||
* a result, we make sure that the pinning that is about to occur is
|
||||
@@ -334,7 +345,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
HAS_WT(i915) ?
|
||||
I915_CACHE_WT : I915_CACHE_NONE);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* As the user may map the buffer once pinned in the display plane
|
||||
@@ -347,18 +358,31 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
vma = ERR_PTR(-ENOSPC);
|
||||
if ((flags & PIN_MAPPABLE) == 0 &&
|
||||
(!view || view->type == I915_GGTT_VIEW_NORMAL))
|
||||
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
|
||||
flags |
|
||||
PIN_MAPPABLE |
|
||||
PIN_NONBLOCK);
|
||||
if (IS_ERR(vma))
|
||||
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
|
||||
if (IS_ERR(vma))
|
||||
return vma;
|
||||
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0, alignment,
|
||||
flags | PIN_MAPPABLE |
|
||||
PIN_NONBLOCK);
|
||||
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK))
|
||||
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0,
|
||||
alignment, flags);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto err;
|
||||
}
|
||||
|
||||
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
|
||||
|
||||
i915_gem_object_flush_if_display(obj);
|
||||
i915_gem_object_flush_if_display_locked(obj);
|
||||
|
||||
err:
|
||||
if (ret == -EDEADLK) {
|
||||
ret = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!ret)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return vma;
|
||||
}
|
||||
@@ -536,7 +560,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = i915_gem_object_lock_interruptible(obj);
|
||||
err = i915_gem_object_lock_interruptible(obj, NULL);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
|
||||
@@ -576,19 +600,17 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
|
||||
if (!i915_gem_object_has_struct_page(obj))
|
||||
return -ENODEV;
|
||||
|
||||
ret = i915_gem_object_lock_interruptible(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
assert_object_held(obj);
|
||||
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_pin_pages(obj);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
return ret;
|
||||
|
||||
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
|
||||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
@@ -616,8 +638,6 @@ out:
|
||||
|
||||
err_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
err_unlock:
|
||||
i915_gem_object_unlock(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -630,20 +650,18 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
|
||||
if (!i915_gem_object_has_struct_page(obj))
|
||||
return -ENODEV;
|
||||
|
||||
ret = i915_gem_object_lock_interruptible(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
assert_object_held(obj);
|
||||
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_ALL,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_pin_pages(obj);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
return ret;
|
||||
|
||||
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
|
||||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
@@ -680,7 +698,5 @@ out:
|
||||
|
||||
err_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
err_unlock:
|
||||
i915_gem_object_unlock(obj);
|
||||
return ret;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -283,37 +283,46 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
|
||||
struct intel_runtime_pm *rpm = &i915->runtime_pm;
|
||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
||||
bool write = area->vm_flags & VM_WRITE;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
intel_wakeref_t wakeref;
|
||||
struct i915_vma *vma;
|
||||
pgoff_t page_offset;
|
||||
int srcu;
|
||||
int ret;
|
||||
|
||||
/* Sanity check that we allow writing into this object */
|
||||
if (i915_gem_object_is_readonly(obj) && write)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
/* We don't use vmf->pgoff since that has the fake offset */
|
||||
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
|
||||
|
||||
trace_i915_gem_object_fault(obj, page_offset, true, write);
|
||||
|
||||
ret = i915_gem_object_pin_pages(obj);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
wakeref = intel_runtime_pm_get(rpm);
|
||||
|
||||
ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
|
||||
i915_gem_ww_ctx_init(&ww, true);
|
||||
retry:
|
||||
ret = i915_gem_object_lock(obj, &ww);
|
||||
if (ret)
|
||||
goto err_rpm;
|
||||
|
||||
/* Sanity check that we allow writing into this object */
|
||||
if (i915_gem_object_is_readonly(obj) && write) {
|
||||
ret = -EFAULT;
|
||||
goto err_rpm;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_pin_pages(obj);
|
||||
if (ret)
|
||||
goto err_rpm;
|
||||
|
||||
ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
|
||||
if (ret)
|
||||
goto err_pages;
|
||||
|
||||
/* Now pin it into the GTT as needed */
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
|
||||
PIN_MAPPABLE |
|
||||
PIN_NONBLOCK /* NOWARN */ |
|
||||
PIN_NOEVICT);
|
||||
if (IS_ERR(vma)) {
|
||||
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
|
||||
PIN_MAPPABLE |
|
||||
PIN_NONBLOCK /* NOWARN */ |
|
||||
PIN_NOEVICT);
|
||||
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
|
||||
/* Use a partial view if it is bigger than available space */
|
||||
struct i915_ggtt_view view =
|
||||
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
|
||||
@@ -328,11 +337,11 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
|
||||
* all hope that the hardware is able to track future writes.
|
||||
*/
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
|
||||
if (IS_ERR(vma)) {
|
||||
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
|
||||
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
|
||||
flags = PIN_MAPPABLE;
|
||||
view.type = I915_GGTT_VIEW_PARTIAL;
|
||||
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
|
||||
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
|
||||
}
|
||||
|
||||
/* The entire mappable GGTT is pinned? Unexpected! */
|
||||
@@ -389,10 +398,16 @@ err_unpin:
|
||||
__i915_vma_unpin(vma);
|
||||
err_reset:
|
||||
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
|
||||
err_rpm:
|
||||
intel_runtime_pm_put(rpm, wakeref);
|
||||
err_pages:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
err:
|
||||
err_rpm:
|
||||
if (ret == -EDEADLK) {
|
||||
ret = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!ret)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
intel_runtime_pm_put(rpm, wakeref);
|
||||
return i915_error_to_vmf_fault(ret);
|
||||
}
|
||||
|
||||
|
@@ -110,9 +110,39 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
|
||||
|
||||
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
|
||||
|
||||
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
|
||||
static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
|
||||
struct i915_gem_ww_ctx *ww,
|
||||
bool intr)
|
||||
{
|
||||
dma_resv_lock(obj->base.resv, NULL);
|
||||
int ret;
|
||||
|
||||
if (intr)
|
||||
ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
|
||||
else
|
||||
ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
|
||||
|
||||
if (!ret && ww)
|
||||
list_add_tail(&obj->obj_link, &ww->obj_list);
|
||||
if (ret == -EALREADY)
|
||||
ret = 0;
|
||||
|
||||
if (ret == -EDEADLK)
|
||||
ww->contended = obj;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
|
||||
struct i915_gem_ww_ctx *ww)
|
||||
{
|
||||
return __i915_gem_object_lock(obj, ww, ww && ww->intr);
|
||||
}
|
||||
|
||||
static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
|
||||
struct i915_gem_ww_ctx *ww)
|
||||
{
|
||||
WARN_ON(ww && !ww->intr);
|
||||
return __i915_gem_object_lock(obj, ww, true);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
|
||||
@@ -120,12 +150,6 @@ static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
|
||||
return dma_resv_trylock(obj->base.resv);
|
||||
}
|
||||
|
||||
static inline int
|
||||
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return dma_resv_lock_interruptible(obj->base.resv, NULL);
|
||||
}
|
||||
|
||||
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
dma_resv_unlock(obj->base.resv);
|
||||
@@ -412,7 +436,6 @@ static inline void
|
||||
i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
i915_gem_object_unlock(obj);
|
||||
}
|
||||
|
||||
static inline struct intel_engine_cs *
|
||||
@@ -435,6 +458,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
|
||||
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
|
||||
unsigned int cache_level);
|
||||
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
|
||||
|
||||
int __must_check
|
||||
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
|
||||
|
@@ -14,6 +14,7 @@
|
||||
|
||||
struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
|
||||
struct i915_vma *vma,
|
||||
struct i915_gem_ww_ctx *ww,
|
||||
u32 value)
|
||||
{
|
||||
struct drm_i915_private *i915 = ce->vm->i915;
|
||||
@@ -39,10 +40,24 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
|
||||
goto out_pm;
|
||||
}
|
||||
|
||||
err = i915_gem_object_lock(pool->obj, ww);
|
||||
if (err)
|
||||
goto out_put;
|
||||
|
||||
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
goto out_put;
|
||||
|
||||
cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
|
||||
if (IS_ERR(cmd)) {
|
||||
err = PTR_ERR(cmd);
|
||||
goto out_put;
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
rem = vma->size;
|
||||
@@ -84,19 +99,11 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
|
||||
|
||||
intel_gt_chipset_flush(ce->vm->gt);
|
||||
|
||||
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(batch, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
goto out_put;
|
||||
|
||||
batch->private = pool;
|
||||
return batch;
|
||||
|
||||
out_unpin:
|
||||
i915_vma_unpin(batch);
|
||||
out_put:
|
||||
intel_gt_buffer_pool_put(pool);
|
||||
out_pm:
|
||||
@@ -108,11 +115,9 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
|
||||
{
|
||||
int err;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
i915_vma_unlock(vma);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
@@ -141,6 +146,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
|
||||
struct intel_context *ce,
|
||||
u32 value)
|
||||
{
|
||||
struct i915_gem_ww_ctx ww;
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *batch;
|
||||
struct i915_vma *vma;
|
||||
@@ -150,17 +156,28 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
i915_gem_ww_ctx_init(&ww, true);
|
||||
intel_engine_pm_get(ce->engine);
|
||||
retry:
|
||||
err = i915_gem_object_lock(obj, &ww);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
batch = intel_emit_vma_fill_blt(ce, vma, value);
|
||||
err = intel_context_pin_ww(ce, &ww);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_ctx;
|
||||
|
||||
batch = intel_emit_vma_fill_blt(ce, vma, &ww, value);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_unpin;
|
||||
goto out_vma;
|
||||
}
|
||||
|
||||
rq = intel_context_create_request(ce);
|
||||
rq = i915_request_create(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto out_batch;
|
||||
@@ -170,11 +187,9 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = move_obj_to_gpu(vma->obj, rq, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
||||
@@ -193,8 +208,18 @@ out_request:
|
||||
i915_request_add(rq);
|
||||
out_batch:
|
||||
intel_emit_vma_release(ce, batch);
|
||||
out_unpin:
|
||||
out_vma:
|
||||
i915_vma_unpin(vma);
|
||||
out_ctx:
|
||||
intel_context_unpin(ce);
|
||||
out:
|
||||
if (err == -EDEADLK) {
|
||||
err = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
intel_engine_pm_put(ce->engine);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -210,6 +235,7 @@ static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
|
||||
}
|
||||
|
||||
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
|
||||
struct i915_gem_ww_ctx *ww,
|
||||
struct i915_vma *src,
|
||||
struct i915_vma *dst)
|
||||
{
|
||||
@@ -236,10 +262,24 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
|
||||
goto out_pm;
|
||||
}
|
||||
|
||||
err = i915_gem_object_lock(pool->obj, ww);
|
||||
if (err)
|
||||
goto out_put;
|
||||
|
||||
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
goto out_put;
|
||||
|
||||
cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
|
||||
if (IS_ERR(cmd)) {
|
||||
err = PTR_ERR(cmd);
|
||||
goto out_put;
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
rem = src->size;
|
||||
@@ -296,20 +336,11 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
|
||||
i915_gem_object_unpin_map(pool->obj);
|
||||
|
||||
intel_gt_chipset_flush(ce->vm->gt);
|
||||
|
||||
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(batch, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
goto out_put;
|
||||
|
||||
batch->private = pool;
|
||||
return batch;
|
||||
|
||||
out_unpin:
|
||||
i915_vma_unpin(batch);
|
||||
out_put:
|
||||
intel_gt_buffer_pool_put(pool);
|
||||
out_pm:
|
||||
@@ -321,10 +352,9 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
|
||||
struct drm_i915_gem_object *dst,
|
||||
struct intel_context *ce)
|
||||
{
|
||||
struct drm_gem_object *objs[] = { &src->base, &dst->base };
|
||||
struct i915_address_space *vm = ce->vm;
|
||||
struct i915_vma *vma[2], *batch;
|
||||
struct ww_acquire_ctx acquire;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
struct i915_request *rq;
|
||||
int err, i;
|
||||
|
||||
@@ -332,25 +362,36 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
|
||||
if (IS_ERR(vma[0]))
|
||||
return PTR_ERR(vma[0]);
|
||||
|
||||
err = i915_vma_pin(vma[0], 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
vma[1] = i915_vma_instance(dst, vm, NULL);
|
||||
if (IS_ERR(vma[1]))
|
||||
goto out_unpin_src;
|
||||
return PTR_ERR(vma);
|
||||
|
||||
err = i915_vma_pin(vma[1], 0, 0, PIN_USER);
|
||||
i915_gem_ww_ctx_init(&ww, true);
|
||||
intel_engine_pm_get(ce->engine);
|
||||
retry:
|
||||
err = i915_gem_object_lock(src, &ww);
|
||||
if (!err)
|
||||
err = i915_gem_object_lock(dst, &ww);
|
||||
if (!err)
|
||||
err = intel_context_pin_ww(ce, &ww);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = i915_vma_pin_ww(vma[0], &ww, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_ctx;
|
||||
|
||||
err = i915_vma_pin_ww(vma[1], &ww, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
goto out_unpin_src;
|
||||
|
||||
batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]);
|
||||
batch = intel_emit_vma_copy_blt(ce, &ww, vma[0], vma[1]);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_unpin_dst;
|
||||
}
|
||||
|
||||
rq = intel_context_create_request(ce);
|
||||
rq = i915_request_create(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto out_batch;
|
||||
@@ -360,14 +401,10 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
||||
err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire);
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vma); i++) {
|
||||
err = move_obj_to_gpu(vma[i]->obj, rq, i);
|
||||
if (unlikely(err))
|
||||
goto out_unlock;
|
||||
goto out_request;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vma); i++) {
|
||||
@@ -375,20 +412,19 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
|
||||
|
||||
err = i915_vma_move_to_active(vma[i], rq, flags);
|
||||
if (unlikely(err))
|
||||
goto out_unlock;
|
||||
goto out_request;
|
||||
}
|
||||
|
||||
if (rq->engine->emit_init_breadcrumb) {
|
||||
err = rq->engine->emit_init_breadcrumb(rq);
|
||||
if (unlikely(err))
|
||||
goto out_unlock;
|
||||
goto out_request;
|
||||
}
|
||||
|
||||
err = rq->engine->emit_bb_start(rq,
|
||||
batch->node.start, batch->node.size,
|
||||
0);
|
||||
out_unlock:
|
||||
drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
|
||||
|
||||
out_request:
|
||||
if (unlikely(err))
|
||||
i915_request_set_error_once(rq, err);
|
||||
@@ -400,6 +436,16 @@ out_unpin_dst:
|
||||
i915_vma_unpin(vma[1]);
|
||||
out_unpin_src:
|
||||
i915_vma_unpin(vma[0]);
|
||||
out_ctx:
|
||||
intel_context_unpin(ce);
|
||||
out:
|
||||
if (err == -EDEADLK) {
|
||||
err = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
intel_engine_pm_put(ce->engine);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -13,12 +13,15 @@
|
||||
#include "i915_vma.h"
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
struct i915_gem_ww_ctx;
|
||||
|
||||
struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
|
||||
struct i915_vma *vma,
|
||||
struct i915_gem_ww_ctx *ww,
|
||||
u32 value);
|
||||
|
||||
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
|
||||
struct i915_gem_ww_ctx *ww,
|
||||
struct i915_vma *src,
|
||||
struct i915_vma *dst);
|
||||
|
||||
|
@@ -123,6 +123,15 @@ struct drm_i915_gem_object {
|
||||
struct list_head lut_list;
|
||||
spinlock_t lut_lock; /* guards lut_list */
|
||||
|
||||
/**
|
||||
* @obj_link: Link into @i915_gem_ww_ctx.obj_list
|
||||
*
|
||||
* When we lock this object through i915_gem_object_lock() with a
|
||||
* context, we add it to the list to ensure we can unlock everything
|
||||
* when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
|
||||
*/
|
||||
struct list_head obj_link;
|
||||
|
||||
/** Stolen memory for this object, instead of being backed by shmem. */
|
||||
struct drm_mm_node *stolen;
|
||||
union {
|
||||
@@ -282,6 +291,7 @@ struct drm_i915_gem_object {
|
||||
} userptr;
|
||||
|
||||
unsigned long scratch;
|
||||
u64 encode;
|
||||
|
||||
void *gvt_info;
|
||||
};
|
||||
|
@@ -84,7 +84,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
|
||||
|
||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
drm_WARN_ON(&i915->drm,
|
||||
i915_gem_object_set_to_gtt_domain(obj, false));
|
||||
i915_gem_object_unlock(obj);
|
||||
|
@@ -9,6 +9,7 @@
|
||||
#include <drm/drm_file.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_context.h"
|
||||
#include "i915_gem_ioctls.h"
|
||||
#include "i915_gem_object.h"
|
||||
|
||||
@@ -35,9 +36,10 @@ int
|
||||
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
|
||||
struct i915_request *request, *target = NULL;
|
||||
struct i915_gem_context *ctx;
|
||||
unsigned long idx;
|
||||
long ret;
|
||||
|
||||
/* ABI: return -EIO if already wedged */
|
||||
@@ -45,27 +47,54 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
|
||||
if (time_after_eq(request->emitted_jiffies, recent_enough))
|
||||
break;
|
||||
rcu_read_lock();
|
||||
xa_for_each(&file_priv->context_xa, idx, ctx) {
|
||||
struct i915_gem_engines_iter it;
|
||||
struct intel_context *ce;
|
||||
|
||||
if (target && xchg(&target->file_priv, NULL))
|
||||
list_del(&target->client_link);
|
||||
if (!kref_get_unless_zero(&ctx->ref))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
|
||||
target = request;
|
||||
for_each_gem_engine(ce,
|
||||
i915_gem_context_lock_engines(ctx),
|
||||
it) {
|
||||
struct i915_request *rq, *target = NULL;
|
||||
|
||||
if (!ce->timeline)
|
||||
continue;
|
||||
|
||||
mutex_lock(&ce->timeline->mutex);
|
||||
list_for_each_entry_reverse(rq,
|
||||
&ce->timeline->requests,
|
||||
link) {
|
||||
if (i915_request_completed(rq))
|
||||
break;
|
||||
|
||||
if (time_after(rq->emitted_jiffies,
|
||||
recent_enough))
|
||||
continue;
|
||||
|
||||
target = i915_request_get(rq);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ce->timeline->mutex);
|
||||
if (!target)
|
||||
continue;
|
||||
|
||||
ret = i915_request_wait(target,
|
||||
I915_WAIT_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
i915_request_put(target);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
i915_gem_context_put(ctx);
|
||||
|
||||
rcu_read_lock();
|
||||
}
|
||||
if (target)
|
||||
i915_request_get(target);
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
|
||||
if (!target)
|
||||
return 0;
|
||||
|
||||
ret = i915_request_wait(target,
|
||||
I915_WAIT_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
i915_request_put(target);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
@@ -249,7 +249,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
||||
* whilst executing a fenced command for an untiled object.
|
||||
*/
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
if (i915_gem_object_is_framebuffer(obj)) {
|
||||
i915_gem_object_unlock(obj);
|
||||
return -EBUSY;
|
||||
|
@@ -393,7 +393,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
|
||||
*/
|
||||
|
||||
for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
|
||||
unsigned int combination = 0;
|
||||
unsigned int combination = SZ_4K; /* Required for ppGTT */
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
|
||||
if (i & BIT(j))
|
||||
@@ -947,7 +947,7 @@ static int gpu_write(struct intel_context *ce,
|
||||
{
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(vma->obj);
|
||||
i915_gem_object_lock(vma->obj, NULL);
|
||||
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
|
||||
i915_gem_object_unlock(vma->obj);
|
||||
if (err)
|
||||
@@ -964,9 +964,10 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
||||
unsigned long n;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
err = i915_gem_object_prepare_read(obj, &needs_flush);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_unlock;
|
||||
|
||||
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
|
||||
u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
|
||||
@@ -986,6 +987,8 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
||||
}
|
||||
|
||||
i915_gem_object_finish_access(obj);
|
||||
err_unlock:
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@@ -75,7 +75,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
|
||||
if (err)
|
||||
goto err_unpin;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
err = i915_gem_object_set_to_cpu_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
|
@@ -27,9 +27,10 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
u32 *cpu;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(ctx->obj, NULL);
|
||||
err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
|
||||
map = kmap_atomic(page);
|
||||
@@ -46,7 +47,9 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
kunmap_atomic(map);
|
||||
i915_gem_object_finish_access(ctx->obj);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
i915_gem_object_unlock(ctx->obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
|
||||
@@ -57,9 +60,10 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
|
||||
u32 *cpu;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(ctx->obj, NULL);
|
||||
err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
|
||||
map = kmap_atomic(page);
|
||||
@@ -73,7 +77,9 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
|
||||
kunmap_atomic(map);
|
||||
i915_gem_object_finish_access(ctx->obj);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
i915_gem_object_unlock(ctx->obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
@@ -82,7 +88,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
u32 __iomem *map;
|
||||
int err = 0;
|
||||
|
||||
i915_gem_object_lock(ctx->obj);
|
||||
i915_gem_object_lock(ctx->obj, NULL);
|
||||
err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
|
||||
i915_gem_object_unlock(ctx->obj);
|
||||
if (err)
|
||||
@@ -115,7 +121,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
|
||||
u32 __iomem *map;
|
||||
int err = 0;
|
||||
|
||||
i915_gem_object_lock(ctx->obj);
|
||||
i915_gem_object_lock(ctx->obj, NULL);
|
||||
err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
|
||||
i915_gem_object_unlock(ctx->obj);
|
||||
if (err)
|
||||
@@ -147,7 +153,7 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
u32 *map;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(ctx->obj);
|
||||
i915_gem_object_lock(ctx->obj, NULL);
|
||||
err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
|
||||
i915_gem_object_unlock(ctx->obj);
|
||||
if (err)
|
||||
@@ -170,7 +176,7 @@ static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
|
||||
u32 *map;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(ctx->obj);
|
||||
i915_gem_object_lock(ctx->obj, NULL);
|
||||
err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
|
||||
i915_gem_object_unlock(ctx->obj);
|
||||
if (err)
|
||||
@@ -193,27 +199,27 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
u32 *cs;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(ctx->obj);
|
||||
i915_gem_object_lock(ctx->obj, NULL);
|
||||
err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
|
||||
i915_gem_object_unlock(ctx->obj);
|
||||
if (err)
|
||||
return err;
|
||||
goto out_unlock;
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
rq = intel_engine_create_kernel_request(ctx->engine);
|
||||
if (IS_ERR(rq)) {
|
||||
i915_vma_unpin(vma);
|
||||
return PTR_ERR(rq);
|
||||
err = PTR_ERR(rq);
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
cs = intel_ring_begin(rq, 4);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_request_add(rq);
|
||||
i915_vma_unpin(vma);
|
||||
return PTR_ERR(cs);
|
||||
err = PTR_ERR(cs);
|
||||
goto out_rq;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(ctx->engine->i915) >= 8) {
|
||||
@@ -234,14 +240,16 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
}
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
out_rq:
|
||||
i915_request_add(rq);
|
||||
out_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
out_unlock:
|
||||
i915_gem_object_unlock(ctx->obj);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@@ -461,9 +461,10 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
|
||||
unsigned int n, m, need_flush;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
err = i915_gem_object_prepare_write(obj, &need_flush);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
for (n = 0; n < real_page_count(obj); n++) {
|
||||
u32 *map;
|
||||
@@ -479,7 +480,9 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
|
||||
i915_gem_object_finish_access(obj);
|
||||
obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
|
||||
obj->write_domain = 0;
|
||||
return 0;
|
||||
out:
|
||||
i915_gem_object_unlock(obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
static noinline int cpu_check(struct drm_i915_gem_object *obj,
|
||||
@@ -488,9 +491,10 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
|
||||
unsigned int n, m, needs_flush;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
err = i915_gem_object_prepare_read(obj, &needs_flush);
|
||||
if (err)
|
||||
return err;
|
||||
goto out_unlock;
|
||||
|
||||
for (n = 0; n < real_page_count(obj); n++) {
|
||||
u32 *map;
|
||||
@@ -527,6 +531,8 @@ out_unmap:
|
||||
}
|
||||
|
||||
i915_gem_object_finish_access(obj);
|
||||
out_unlock:
|
||||
i915_gem_object_unlock(obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -887,24 +893,15 @@ out_file:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
|
||||
static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
u32 *cmd;
|
||||
int err;
|
||||
|
||||
if (INTEL_GEN(vma->vm->i915) < 8)
|
||||
return ERR_PTR(-EINVAL);
|
||||
GEM_BUG_ON(INTEL_GEN(vma->vm->i915) < 8);
|
||||
|
||||
obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
||||
if (IS_ERR(cmd)) {
|
||||
err = PTR_ERR(cmd);
|
||||
goto err;
|
||||
}
|
||||
cmd = i915_gem_object_pin_map(rpcs, I915_MAP_WB);
|
||||
if (IS_ERR(cmd))
|
||||
return PTR_ERR(cmd);
|
||||
|
||||
*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
|
||||
*cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
|
||||
@@ -912,26 +909,12 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
|
||||
*cmd++ = upper_32_bits(vma->node.start);
|
||||
*cmd = MI_BATCH_BUFFER_END;
|
||||
|
||||
__i915_gem_object_flush_map(obj, 0, 64);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
__i915_gem_object_flush_map(rpcs, 0, 64);
|
||||
i915_gem_object_unpin_map(rpcs);
|
||||
|
||||
intel_gt_chipset_flush(vma->vm->gt);
|
||||
|
||||
vma = i915_vma_instance(obj, vma->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
return vma;
|
||||
|
||||
err:
|
||||
i915_gem_object_put(obj);
|
||||
return ERR_PTR(err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -939,52 +922,68 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
|
||||
struct intel_context *ce,
|
||||
struct i915_request **rq_out)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct i915_request *rq;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
struct i915_vma *batch;
|
||||
struct i915_vma *vma;
|
||||
struct drm_i915_gem_object *rpcs;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
|
||||
|
||||
if (INTEL_GEN(i915) < 8)
|
||||
return -EINVAL;
|
||||
|
||||
vma = i915_vma_instance(obj, ce->vm, NULL);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
return err;
|
||||
rpcs = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(rpcs))
|
||||
return PTR_ERR(rpcs);
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
batch = rpcs_query_batch(vma);
|
||||
batch = i915_vma_instance(rpcs, ce->vm, NULL);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto err_vma;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
i915_gem_ww_ctx_init(&ww, false);
|
||||
retry:
|
||||
err = i915_gem_object_lock(obj, &ww);
|
||||
if (!err)
|
||||
err = i915_gem_object_lock(rpcs, &ww);
|
||||
if (!err)
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
if (!err)
|
||||
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto err_put;
|
||||
|
||||
err = i915_vma_pin_ww(batch, &ww, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto err_vma;
|
||||
|
||||
err = rpcs_query_batch(rpcs, vma);
|
||||
if (err)
|
||||
goto err_batch;
|
||||
|
||||
rq = i915_request_create(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_batch;
|
||||
}
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
@@ -1000,23 +999,24 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_vma_unpin_and_release(&batch, 0);
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
*rq_out = i915_request_get(rq);
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
return 0;
|
||||
|
||||
skip_request:
|
||||
i915_request_set_error_once(rq, err);
|
||||
if (err)
|
||||
i915_request_set_error_once(rq, err);
|
||||
i915_request_add(rq);
|
||||
err_batch:
|
||||
i915_vma_unpin_and_release(&batch, 0);
|
||||
i915_vma_unpin(batch);
|
||||
err_vma:
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
err_put:
|
||||
if (err == -EDEADLK) {
|
||||
err = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
i915_gem_object_put(rpcs);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1709,7 +1709,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
err = i915_gem_object_set_to_cpu_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
@@ -1748,7 +1748,7 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
|
||||
if (!vm)
|
||||
return -ENODEV;
|
||||
|
||||
page = vm->scratch[0].base.page;
|
||||
page = __px_page(vm->scratch[0]);
|
||||
if (!page) {
|
||||
pr_err("No scratch page!\n");
|
||||
return -EINVAL;
|
||||
@@ -1914,8 +1914,8 @@ static int mock_context_barrier(void *arg)
|
||||
return -ENOMEM;
|
||||
|
||||
counter = 0;
|
||||
err = context_barrier_task(ctx, 0,
|
||||
NULL, NULL, mock_barrier_task, &counter);
|
||||
err = context_barrier_task(ctx, 0, NULL, NULL, NULL,
|
||||
mock_barrier_task, &counter);
|
||||
if (err) {
|
||||
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
|
||||
goto out;
|
||||
@@ -1927,11 +1927,8 @@ static int mock_context_barrier(void *arg)
|
||||
}
|
||||
|
||||
counter = 0;
|
||||
err = context_barrier_task(ctx, ALL_ENGINES,
|
||||
skip_unused_engines,
|
||||
NULL,
|
||||
mock_barrier_task,
|
||||
&counter);
|
||||
err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
|
||||
NULL, NULL, mock_barrier_task, &counter);
|
||||
if (err) {
|
||||
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
|
||||
goto out;
|
||||
@@ -1951,8 +1948,8 @@ static int mock_context_barrier(void *arg)
|
||||
|
||||
counter = 0;
|
||||
context_barrier_inject_fault = BIT(RCS0);
|
||||
err = context_barrier_task(ctx, ALL_ENGINES,
|
||||
NULL, NULL, mock_barrier_task, &counter);
|
||||
err = context_barrier_task(ctx, ALL_ENGINES, NULL, NULL, NULL,
|
||||
mock_barrier_task, &counter);
|
||||
context_barrier_inject_fault = 0;
|
||||
if (err == -ENXIO)
|
||||
err = 0;
|
||||
@@ -1966,11 +1963,8 @@ static int mock_context_barrier(void *arg)
|
||||
goto out;
|
||||
|
||||
counter = 0;
|
||||
err = context_barrier_task(ctx, ALL_ENGINES,
|
||||
skip_unused_engines,
|
||||
NULL,
|
||||
mock_barrier_task,
|
||||
&counter);
|
||||
err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
|
||||
NULL, NULL, mock_barrier_task, &counter);
|
||||
if (err) {
|
||||
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
|
||||
goto out;
|
||||
|
@@ -32,46 +32,39 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
|
||||
err = i915_gem_object_lock(obj, &eb->ww);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* 8-Byte aligned */
|
||||
if (!__reloc_entry_gpu(eb, vma,
|
||||
offsets[0] * sizeof(u32),
|
||||
0)) {
|
||||
err = -EIO;
|
||||
goto unpin_vma;
|
||||
}
|
||||
err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
|
||||
if (err <= 0)
|
||||
goto reloc_err;
|
||||
|
||||
/* !8-Byte aligned */
|
||||
if (!__reloc_entry_gpu(eb, vma,
|
||||
offsets[1] * sizeof(u32),
|
||||
1)) {
|
||||
err = -EIO;
|
||||
goto unpin_vma;
|
||||
}
|
||||
err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
|
||||
if (err <= 0)
|
||||
goto reloc_err;
|
||||
|
||||
/* Skip to the end of the cmd page */
|
||||
i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
|
||||
i = PAGE_SIZE / sizeof(u32) - 1;
|
||||
i -= eb->reloc_cache.rq_size;
|
||||
memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
|
||||
MI_NOOP, i);
|
||||
eb->reloc_cache.rq_size += i;
|
||||
|
||||
/* Force batch chaining */
|
||||
if (!__reloc_entry_gpu(eb, vma,
|
||||
offsets[2] * sizeof(u32),
|
||||
2)) {
|
||||
err = -EIO;
|
||||
goto unpin_vma;
|
||||
}
|
||||
/* Force next batch */
|
||||
err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
|
||||
if (err <= 0)
|
||||
goto reloc_err;
|
||||
|
||||
GEM_BUG_ON(!eb->reloc_cache.rq);
|
||||
rq = i915_request_get(eb->reloc_cache.rq);
|
||||
err = reloc_gpu_flush(&eb->reloc_cache);
|
||||
if (err)
|
||||
goto put_rq;
|
||||
reloc_gpu_flush(eb, &eb->reloc_cache);
|
||||
GEM_BUG_ON(eb->reloc_cache.rq);
|
||||
|
||||
err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
|
||||
@@ -103,6 +96,11 @@ put_rq:
|
||||
unpin_vma:
|
||||
i915_vma_unpin(vma);
|
||||
return err;
|
||||
|
||||
reloc_err:
|
||||
if (!err)
|
||||
err = -EIO;
|
||||
goto unpin_vma;
|
||||
}
|
||||
|
||||
static int igt_gpu_reloc(void *arg)
|
||||
@@ -124,6 +122,8 @@ static int igt_gpu_reloc(void *arg)
|
||||
goto err_scratch;
|
||||
}
|
||||
|
||||
intel_gt_pm_get(&eb.i915->gt);
|
||||
|
||||
for_each_uabi_engine(eb.engine, eb.i915) {
|
||||
reloc_cache_init(&eb.reloc_cache, eb.i915);
|
||||
memset(map, POISON_INUSE, 4096);
|
||||
@@ -134,15 +134,29 @@ static int igt_gpu_reloc(void *arg)
|
||||
err = PTR_ERR(eb.context);
|
||||
goto err_pm;
|
||||
}
|
||||
eb.reloc_pool = NULL;
|
||||
eb.reloc_context = NULL;
|
||||
|
||||
err = intel_context_pin(eb.context);
|
||||
if (err)
|
||||
goto err_put;
|
||||
i915_gem_ww_ctx_init(&eb.ww, false);
|
||||
retry:
|
||||
err = intel_context_pin_ww(eb.context, &eb.ww);
|
||||
if (!err) {
|
||||
err = __igt_gpu_reloc(&eb, scratch);
|
||||
|
||||
err = __igt_gpu_reloc(&eb, scratch);
|
||||
intel_context_unpin(eb.context);
|
||||
}
|
||||
if (err == -EDEADLK) {
|
||||
err = i915_gem_ww_ctx_backoff(&eb.ww);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&eb.ww);
|
||||
|
||||
if (eb.reloc_pool)
|
||||
intel_gt_buffer_pool_put(eb.reloc_pool);
|
||||
if (eb.reloc_context)
|
||||
intel_context_put(eb.reloc_context);
|
||||
|
||||
intel_context_unpin(eb.context);
|
||||
err_put:
|
||||
intel_context_put(eb.context);
|
||||
err_pm:
|
||||
intel_engine_pm_put(eb.engine);
|
||||
@@ -153,6 +167,7 @@ err_pm:
|
||||
if (igt_flush_test(eb.i915))
|
||||
err = -EIO;
|
||||
|
||||
intel_gt_pm_put(&eb.i915->gt);
|
||||
err_scratch:
|
||||
i915_gem_object_put(scratch);
|
||||
return err;
|
||||
|
@@ -103,7 +103,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
|
||||
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
|
||||
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err) {
|
||||
@@ -188,7 +188,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
|
||||
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
|
||||
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err) {
|
||||
@@ -528,31 +528,42 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
|
||||
for_each_uabi_engine(engine, i915) {
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *vma;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
int err;
|
||||
|
||||
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
i915_gem_ww_ctx_init(&ww, false);
|
||||
retry:
|
||||
err = i915_gem_object_lock(obj, &ww);
|
||||
if (!err)
|
||||
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
return err;
|
||||
goto err;
|
||||
|
||||
rq = intel_engine_create_kernel_request(engine);
|
||||
if (IS_ERR(rq)) {
|
||||
i915_vma_unpin(vma);
|
||||
return PTR_ERR(rq);
|
||||
err = PTR_ERR(rq);
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq,
|
||||
EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
i915_request_add(rq);
|
||||
err_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
err:
|
||||
if (err == -EDEADLK) {
|
||||
err = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@@ -1123,6 +1134,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
|
||||
for_each_uabi_engine(engine, i915) {
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *vma;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
|
||||
vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
@@ -1130,9 +1142,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
i915_gem_ww_ctx_init(&ww, false);
|
||||
retry:
|
||||
err = i915_gem_object_lock(obj, &ww);
|
||||
if (!err)
|
||||
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_unmap;
|
||||
goto out_ww;
|
||||
|
||||
rq = i915_request_create(engine->kernel_context);
|
||||
if (IS_ERR(rq)) {
|
||||
@@ -1140,11 +1156,9 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
|
||||
i915_request_get(rq);
|
||||
@@ -1166,6 +1180,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
|
||||
|
||||
out_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
out_ww:
|
||||
if (err == -EDEADLK) {
|
||||
err = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!err)
|
||||
goto retry;
|
||||
}
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
if (err)
|
||||
goto out_unmap;
|
||||
}
|
||||
|
@@ -44,7 +44,7 @@ static int mock_phys_object(void *arg)
|
||||
}
|
||||
|
||||
/* Make the object dirty so that put_pages must do copy back the data */
|
||||
i915_gem_object_lock(obj);
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err) {
|
||||
|
Reference in New Issue
Block a user