drm/i915: argument to control retiring behavior

Sometimes it may be the case when we idle the gpu or wait on something
we don't actually want to process the retiring list. This patch allows
callers to choose the behavior.

Reviewed-by: Keith Packard <keithp@keithp.com>
Reviewed-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Ben Widawsky
2012-01-25 15:39:34 -08:00
committed by Daniel Vetter
parent fc74d8e011
commit b93f9cf14e
7 changed files with 27 additions and 20 deletions

View File

@@ -1943,7 +1943,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno)
uint32_t seqno,
bool do_retire)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
@@ -2027,7 +2028,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
* buffer to have made it to the inactive list, and we would need
* a separate wait queue to handle that.
*/
if (ret == 0)
if (ret == 0 && do_retire)
i915_gem_retire_requests_ring(ring);
return ret;
@@ -2051,7 +2052,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
true);
if (ret)
return ret;
}
@@ -2172,7 +2174,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0;
}
static int i915_ring_idle(struct intel_ring_buffer *ring)
static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
{
int ret;
@@ -2186,18 +2188,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret;
}
return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
do_retire);
}
int
i915_gpu_idle(struct drm_device *dev)
int i915_gpu_idle(struct drm_device *dev, bool do_retire)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
ret = i915_ring_idle(&dev_priv->ring[i]);
ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
if (ret)
return ret;
}
@@ -2400,7 +2402,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
obj->last_fenced_seqno);
obj->last_fenced_seqno,
true);
if (ret)
return ret;
}
@@ -2541,7 +2544,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
reg->setup_seqno);
reg->setup_seqno,
true);
if (ret)
return ret;
}
@@ -3710,7 +3714,7 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
ret = i915_gpu_idle(dev);
ret = i915_gpu_idle(dev, true);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4201,7 +4205,7 @@ rescan:
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
if (i915_gpu_idle(dev) == 0)
if (i915_gpu_idle(dev, true) == 0)
goto rescan;
}
mutex_unlock(&dev->struct_mutex);