drm/i915: Only spin whilst waiting on the current request
Limit busywaiting only to the request currently being processed by the GPU. If the request is not currently being processed by the GPU, there is a very low likelihood of it being completed within the 2 microsecond spin timeout and so we will just be wasting CPU cycles. v2: Check for logical inversion when rebasing - we were incorrectly checking for this request being active, and instead busywaiting for when the GPU was not yet processing the request of interest. v3: Try another colour for the seqno names. v4: Another colour for the function names. v5: Remove the forced coherency when checking for the active request. On reflection and plenty of recent experimentation, the issue is not a cache coherency problem - but an irq/seqno ordering problem (timing issue). Here, we do not need the w/a to force ordering of the read with an interrupt. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Cc: "Rogozhkin, Dmitry V" <dmitry.v.rogozhkin@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Cc: Eero Tamminen <eero.t.tamminen@intel.com> Cc: "Rantala, Valtteri" <valtteri.rantala@intel.com> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1449833608-22125-4-git-send-email-chris@chris-wilson.co.uk
Šī revīzija ir iekļauta:

revīziju iesūtīja
Daniel Vetter

vecāks
ca5b721e23
revīzija
821485dc2a
@@ -1193,9 +1193,13 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
|
||||
* takes to sleep on a request, on the order of a microsecond.
|
||||
*/
|
||||
|
||||
if (i915_gem_request_get_ring(req)->irq_refcount)
|
||||
if (req->ring->irq_refcount)
|
||||
return -EBUSY;
|
||||
|
||||
/* Only spin if we know the GPU is processing this request */
|
||||
if (!i915_gem_request_started(req, true))
|
||||
return -EAGAIN;
|
||||
|
||||
timeout = local_clock_us(&cpu) + 5;
|
||||
while (!need_resched()) {
|
||||
if (i915_gem_request_completed(req, true))
|
||||
@@ -1209,6 +1213,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
}
|
||||
|
||||
if (i915_gem_request_completed(req, false))
|
||||
return 0;
|
||||
|
||||
@@ -2600,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
request->batch_obj = obj;
|
||||
|
||||
request->emitted_jiffies = jiffies;
|
||||
request->previous_seqno = ring->last_submitted_seqno;
|
||||
ring->last_submitted_seqno = request->seqno;
|
||||
list_add_tail(&request->list, &ring->request_list);
|
||||
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user