drm/i915: Preallocate next seqno before touching the ring

Based on the work by Mika Kuoppala, we realised that we need to handle
seqno wraparound prior to committing our changes to the ring. The most
obvious point then is to grab the seqno inside intel_ring_begin(), and
then to reuse that seqno for all ring operations until the next request.
As intel_ring_begin() can fail, the callers must already be prepared to
handle such failure and so we can safely add further checks.

This patch looks like it should be split up into the interface
changes and the tweaks to move seqno wrapping from the execbuffer into
the core seqno increment. However, I found no easy way to break it into
incremental steps without introducing further broken behaviour.

v2: Mika found a silly mistake and a subtle error in the existing code;
inside i915_gem_retire_requests() we were resetting the sync_seqno of
the target ring based on the seqno from this ring - which are only
related by the order of their allocation, not retirement. Hence we were
applying the optimisation that the rings were synchronised too early,
fortunately the only real casualty there is the handling of seqno
wrapping.

v3: Do not forget to reset the sync_seqno upon module reinitialisation,
ala resume.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=863861
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> [v2]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Chris Wilson
2012-11-27 16:22:52 +00:00
committed by Daniel Vetter
parent 45e2b5f640
commit 9d7730914f
6 changed files with 89 additions and 82 deletions

View File

@@ -555,12 +555,11 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
static void
update_mboxes(struct intel_ring_buffer *ring,
u32 seqno,
u32 mmio_offset)
u32 mmio_offset)
{
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, ring->outstanding_lazy_request);
}
/**
@@ -573,8 +572,7 @@ update_mboxes(struct intel_ring_buffer *ring,
* This acts like a signal in the canonical semaphore.
*/
static int
gen6_add_request(struct intel_ring_buffer *ring,
u32 *seqno)
gen6_add_request(struct intel_ring_buffer *ring)
{
u32 mbox1_reg;
u32 mbox2_reg;
@@ -587,13 +585,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
*seqno = i915_gem_next_request_seqno(ring);
update_mboxes(ring, *seqno, mbox1_reg);
update_mboxes(ring, *seqno, mbox2_reg);
update_mboxes(ring, mbox1_reg);
update_mboxes(ring, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, *seqno);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
@@ -650,10 +646,8 @@ do { \
} while (0)
static int
pc_render_add_request(struct intel_ring_buffer *ring,
u32 *result)
pc_render_add_request(struct intel_ring_buffer *ring)
{
u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
@@ -674,7 +668,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
@@ -693,11 +687,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
*result = seqno;
return 0;
}
@@ -885,25 +878,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
i9xx_add_request(struct intel_ring_buffer *ring,
u32 *result)
i9xx_add_request(struct intel_ring_buffer *ring)
{
u32 seqno;
int ret;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
seqno = i915_gem_next_request_seqno(ring);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
*result = seqno;
return 0;
}
@@ -1110,6 +1098,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = 32 * PAGE_SIZE;
memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
init_waitqueue_head(&ring->irq_queue);
@@ -1338,6 +1327,15 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return -EBUSY;
}
static int
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
{
if (ring->outstanding_lazy_request)
return 0;
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
}
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
@@ -1349,6 +1347,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
return ret;
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))