drm/i915: Keep contexts pinned until after the next kernel context switch

We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.

The sequence of operations for keeping the context pinned until saved is:

 - On context activation, we preallocate a node for each physical engine
   the context may operate on. This is to avoid allocations during
   unpinning, which may be from inside FS_RECLAIM context (aka the
   shrinker)

 - On context deactivation on retirement of the last active request (which
   is before we know the context has been saved), we add the
   preallocated node onto a barrier list on each engine

 - On engine idling, we emit a switch to kernel context. When this
   switch completes, we know that all previous contexts must have been
   saved, and so on retiring this request we can finally unpin all the
   contexts that were marked as deactivated prior to the switch.

We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.

v2: intel_context_active_acquire/_release

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson
2019-06-14 17:46:04 +01:00
parent 58a111f03a
commit ce476c80b8
20 changed files with 219 additions and 195 deletions

View File

@@ -692,17 +692,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
return 0;
}
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id)
intel_engine_lost_context(engine);
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -1203,10 +1192,6 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
if (ret)
goto out_add;
ret = gen8_emit_rpcs_config(rq, ce, sseu);
if (ret)
goto out_add;
/*
* Guarantee context image and the timeline remains pinned until the
* modifying request is retired by setting the ce activity tracker.
@@ -1214,9 +1199,12 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
* But we only need to take one pin on the account of it. Or in other
* words transfer the pinned ce object to tracked active request.
*/
if (!i915_active_request_isset(&ce->active_tracker))
__intel_context_pin(ce);
__i915_active_request_set(&ce->active_tracker, rq);
GEM_BUG_ON(i915_active_is_idle(&ce->active));
ret = i915_active_ref(&ce->active, rq->fence.context, rq);
if (ret)
goto out_add;
ret = gen8_emit_rpcs_config(rq, ce, sseu);
out_add:
i915_request_add(rq);

View File

@@ -134,7 +134,6 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
/* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
int i915_gem_context_open(struct drm_i915_private *i915,

View File

@@ -10,6 +10,22 @@
#include "i915_drv.h"
#include "i915_globals.h"
static void call_idle_barriers(struct intel_engine_cs *engine)
{
struct llist_node *node, *next;
llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
struct i915_active_request *active =
container_of((struct list_head *)node,
typeof(*active), link);
INIT_LIST_HEAD(&active->link);
RCU_INIT_POINTER(active->request, NULL);
active->retire(active, NULL);
}
}
static void i915_gem_park(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -17,8 +33,10 @@ static void i915_gem_park(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id)
for_each_engine(engine, i915, id) {
call_idle_barriers(engine); /* cleanup after wedging */
i915_gem_batch_pool_fini(&engine->batch_pool);
}
i915_timelines_park(i915);
i915_vma_parked(i915);

View File

@@ -160,18 +160,13 @@ i915_gem_shrink(struct drm_i915_private *i915,
return 0;
/*
* When shrinking the active list, also consider active contexts.
* Active contexts are pinned until they are retired, and so can
* not be simply unbound to retire and unpin their pages. To shrink
* the contexts, we must wait until the gpu is idle.
*
* We don't care about errors here; if we cannot wait upon the GPU,
* we will free as much as we can and hope to get a second chance.
* When shrinking the active list, we should also consider active
* contexts. Active contexts are pinned until they are retired, and
* so can not be simply unbound to retire and unpin their pages. To
* shrink the contexts, we must wait until the gpu is idle and
* completed its switch to the kernel context. In short, we do
* not have a good mechanism for idling a specific context.
*/
if (shrink & I915_SHRINK_ACTIVE)
i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
trace_i915_gem_shrink(i915, target, shrink);
i915_retire_requests(i915);