drm/i915: Remove has-kernel-context
We can no longer assume execution ordering, and in particular we cannot assume which context will execute last. One side-effect of this is that we cannot determine if the kernel-context is resident on the GPU, so remove the routines that claimed to do so. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190308093657.8640-4-chris@chris-wilson.co.uk
This commit is contained in:
@@ -108,19 +108,6 @@ i915_active_request_set_retire_fn(struct i915_active_request *active,
|
|||||||
active->retire = fn ?: i915_active_retire_noop;
|
active->retire = fn ?: i915_active_retire_noop;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct i915_request *
|
|
||||||
__i915_active_request_peek(const struct i915_active_request *active)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Inside the error capture (running with the driver in an unknown
|
|
||||||
* state), we want to bend the rules slightly (a lot).
|
|
||||||
*
|
|
||||||
* Work is in progress to make it safer, in the meantime this keeps
|
|
||||||
* the known issue from spamming the logs.
|
|
||||||
*/
|
|
||||||
return rcu_dereference_protected(active->request, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i915_active_request_raw - return the active request
|
* i915_active_request_raw - return the active request
|
||||||
* @active - the active tracker
|
* @active - the active tracker
|
||||||
|
@@ -2828,23 +2828,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
|
|||||||
round_jiffies_up_relative(HZ));
|
round_jiffies_up_relative(HZ));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void assert_kernel_context_is_current(struct drm_i915_private *i915)
|
|
||||||
{
|
|
||||||
struct intel_engine_cs *engine;
|
|
||||||
enum intel_engine_id id;
|
|
||||||
|
|
||||||
if (i915_reset_failed(i915))
|
|
||||||
return;
|
|
||||||
|
|
||||||
i915_retire_requests(i915);
|
|
||||||
|
|
||||||
for_each_engine(engine, i915, id) {
|
|
||||||
GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
|
|
||||||
GEM_BUG_ON(engine->last_retired_context !=
|
|
||||||
to_intel_context(i915->kernel_context, engine));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
|
static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
|
||||||
unsigned long mask)
|
unsigned long mask)
|
||||||
{
|
{
|
||||||
@@ -2864,9 +2847,7 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
|
|||||||
I915_GEM_IDLE_TIMEOUT))
|
I915_GEM_IDLE_TIMEOUT))
|
||||||
result = false;
|
result = false;
|
||||||
|
|
||||||
if (result) {
|
if (!result) {
|
||||||
assert_kernel_context_is_current(i915);
|
|
||||||
} else {
|
|
||||||
/* Forcibly cancel outstanding work and leave the gpu quiet. */
|
/* Forcibly cancel outstanding work and leave the gpu quiet. */
|
||||||
dev_err(i915->drm.dev,
|
dev_err(i915->drm.dev,
|
||||||
"Failed to idle engines, declaring wedged!\n");
|
"Failed to idle engines, declaring wedged!\n");
|
||||||
|
@@ -38,25 +38,15 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
|
|||||||
|
|
||||||
static bool ggtt_is_idle(struct drm_i915_private *i915)
|
static bool ggtt_is_idle(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
return !i915->gt.active_requests;
|
||||||
enum intel_engine_id id;
|
|
||||||
|
|
||||||
if (i915->gt.active_requests)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
for_each_engine(engine, i915, id) {
|
|
||||||
if (!intel_engine_has_kernel_context(engine))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ggtt_flush(struct drm_i915_private *i915)
|
static int ggtt_flush(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Not everything in the GGTT is tracked via vma (otherwise we
|
/*
|
||||||
|
* Not everything in the GGTT is tracked via vma (otherwise we
|
||||||
* could evict as required with minimal stalling) so we are forced
|
* could evict as required with minimal stalling) so we are forced
|
||||||
* to idle the GPU and explicitly retire outstanding requests in
|
* to idle the GPU and explicitly retire outstanding requests in
|
||||||
* the hopes that we can then remove contexts and the like only
|
* the hopes that we can then remove contexts and the like only
|
||||||
|
@@ -1090,37 +1090,6 @@ bool intel_engines_are_idle(struct drm_i915_private *i915)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* intel_engine_has_kernel_context:
|
|
||||||
* @engine: the engine
|
|
||||||
*
|
|
||||||
* Returns true if the last context to be executed on this engine, or has been
|
|
||||||
* executed if the engine is already idle, is the kernel context
|
|
||||||
* (#i915.kernel_context).
|
|
||||||
*/
|
|
||||||
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
|
|
||||||
{
|
|
||||||
const struct intel_context *kernel_context =
|
|
||||||
to_intel_context(engine->i915->kernel_context, engine);
|
|
||||||
struct i915_request *rq;
|
|
||||||
|
|
||||||
lockdep_assert_held(&engine->i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
if (!engine->context_size)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check the last context seen by the engine. If active, it will be
|
|
||||||
* the last request that remains in the timeline. When idle, it is
|
|
||||||
* the last executed context as tracked by retirement.
|
|
||||||
*/
|
|
||||||
rq = __i915_active_request_peek(&engine->timeline.last_request);
|
|
||||||
if (rq)
|
|
||||||
return rq->hw_context == kernel_context;
|
|
||||||
else
|
|
||||||
return engine->last_retired_context == kernel_context;
|
|
||||||
}
|
|
||||||
|
|
||||||
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
|
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
|
@@ -935,7 +935,6 @@ void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
|
|||||||
bool intel_engine_is_idle(struct intel_engine_cs *engine);
|
bool intel_engine_is_idle(struct intel_engine_cs *engine);
|
||||||
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
|
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
|
|
||||||
void intel_engine_lost_context(struct intel_engine_cs *engine);
|
void intel_engine_lost_context(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
void intel_engines_park(struct drm_i915_private *i915);
|
void intel_engines_park(struct drm_i915_private *i915);
|
||||||
|
Reference in New Issue
Block a user