drm/i915: Reduce presumption of request ordering for barriers

Currently we assume that we know the order in which requests run and so
can determine if we need to reissue a switch-to-kernel-context prior to
idling. That assumption does not hold for the future, so instead of
tracking which barriers have been used, simply determine if we have ever
switched away from the kernel context by using the engine and before
idling ensure that all engines that have been used since the last idle
are synchronously switched back to the kernel context for safety (and
else of shrinking memory while idle).

v2: Use intel_engine_mask_t and ALL_ENGINES

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190308093657.8640-3-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson
2019-03-08 09:36:56 +00:00
parent 604c37d766
commit c6eeb4797e
10 changed files with 27 additions and 72 deletions

View File

@@ -2845,7 +2845,8 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
}
}
static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
unsigned long mask)
{
bool result = true;
@@ -2854,7 +2855,7 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
* to save itself before we report the failure. Yes, this may be a
* false positive due to e.g. ENOMEM, caveat emptor!
*/
if (i915_gem_switch_to_kernel_context(i915))
if (i915_gem_switch_to_kernel_context(i915, mask))
result = false;
if (i915_gem_wait_for_idle(i915,
@@ -2879,7 +2880,8 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
static bool load_power_context(struct drm_i915_private *i915)
{
if (!switch_to_kernel_context_sync(i915))
/* Force loading the kernel context on all engines */
if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
return false;
/*
@@ -2927,7 +2929,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
!i915->gt.active_requests) {
++i915->gt.active_requests; /* don't requeue idle */
switch_to_kernel_context_sync(i915);
switch_to_kernel_context_sync(i915, i915->gt.active_engines);
if (!--i915->gt.active_requests) {
__i915_gem_park(i915);
@@ -4380,7 +4382,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
switch_to_kernel_context_sync(i915);
switch_to_kernel_context_sync(i915, i915->gt.active_engines);
mutex_unlock(&i915->drm.struct_mutex);
i915_reset_flush(i915);