drm/i915: Try harder to finish the idle-worker
If a worker requeues itself, it may switch to a different kworker pool, which flush_work() considers as complete. To be strict, we then need to keep flushing the work until it is no longer pending. References: https://bugs.freedesktop.org/show_bug.cgi?id=102456 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171006104038.22337-1-chris@chris-wilson.co.uk
This commit is contained in:
@@ -4259,8 +4259,7 @@ fault_irq_set(struct drm_i915_private *i915,
|
|||||||
mutex_unlock(&i915->drm.struct_mutex);
|
mutex_unlock(&i915->drm.struct_mutex);
|
||||||
|
|
||||||
/* Flush idle worker to disarm irq */
|
/* Flush idle worker to disarm irq */
|
||||||
while (flush_delayed_work(&i915->gt.idle_work))
|
drain_delayed_work(&i915->gt.idle_work);
|
||||||
;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@@ -4547,8 +4547,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
|||||||
/* As the idle_work is rearming if it detects a race, play safe and
|
/* As the idle_work is rearming if it detects a race, play safe and
|
||||||
* repeat the flush until it is definitely idle.
|
* repeat the flush until it is definitely idle.
|
||||||
*/
|
*/
|
||||||
while (flush_delayed_work(&dev_priv->gt.idle_work))
|
drain_delayed_work(&dev_priv->gt.idle_work);
|
||||||
;
|
|
||||||
|
|
||||||
/* Assert that we sucessfully flushed all the work and
|
/* Assert that we sucessfully flushed all the work and
|
||||||
* reset the GPU back to its idle, low power state.
|
* reset the GPU back to its idle, low power state.
|
||||||
|
@@ -124,4 +124,17 @@ static inline void __list_del_many(struct list_head *head,
|
|||||||
WRITE_ONCE(head->next, first);
|
WRITE_ONCE(head->next, first);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait until the work is finally complete, even if it tries to postpone
|
||||||
|
* by requeueing itself. Note, that if the worker never cancels itself,
|
||||||
|
* we will spin forever.
|
||||||
|
*/
|
||||||
|
static inline void drain_delayed_work(struct delayed_work *dw)
|
||||||
|
{
|
||||||
|
do {
|
||||||
|
while (flush_delayed_work(dw))
|
||||||
|
;
|
||||||
|
} while (delayed_work_pending(dw));
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* !__I915_UTILS_H */
|
#endif /* !__I915_UTILS_H */
|
||||||
|
Reference in New Issue
Block a user