drm/amdgpu: remove v_seq handling from the scheduler v2

Simply not used any more. Only keep 32bit atomic for fence sequence numbering.

v2: trivial rebase

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1)
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com> (v1)
Reviewed-by: Chunming Zhou <david1.zhou@amd.com> (v1)
This commit is contained in:
Christian König
2015-08-19 15:00:55 +02:00
committed by Alex Deucher
parent 4ce9891ee1
commit ce882e6dc2
9 changed files with 21 additions and 57 deletions

View File

@@ -1047,7 +1047,7 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence, uint64_t queued_seq);
struct fence *fence);
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);

View File

@@ -866,11 +866,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
kfree(job);
goto out;
}
job->ibs[parser->num_ibs - 1].sequence =
cs->out.handle =
amdgpu_ctx_add_fence(job->ctx, ring,
&job->base.s_fence->base,
job->base.s_fence->v_seq);
cs->out.handle = job->base.s_fence->v_seq;
&job->base.s_fence->base);
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,

View File

@@ -236,17 +236,13 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence, uint64_t queued_seq)
struct fence *fence)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = 0;
uint64_t seq = cring->sequence;
unsigned idx = 0;
struct fence *other = NULL;
if (amdgpu_enable_scheduler)
seq = queued_seq;
else
seq = cring->sequence;
idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
other = cring->fences[idx];
if (other) {
@@ -260,8 +256,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
if (!amdgpu_enable_scheduler)
cring->sequence++;
cring->sequence++;
spin_unlock(&ctx->ring_lock);
fence_put(other);
@@ -274,21 +269,16 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
struct fence *fence;
uint64_t queued_seq;
spin_lock(&ctx->ring_lock);
if (amdgpu_enable_scheduler)
queued_seq = amd_sched_next_queued_seq(&cring->entity);
else
queued_seq = cring->sequence;
if (seq >= queued_seq) {
if (seq >= cring->sequence) {
spin_unlock(&ctx->ring_lock);
return ERR_PTR(-EINVAL);
}
if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) {
if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
spin_unlock(&ctx->ring_lock);
return NULL;
}

View File

@@ -126,7 +126,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx, *old_ctx;
struct amdgpu_vm *vm;
uint64_t sequence;
unsigned i;
int r = 0;
@@ -199,12 +198,9 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
return r;
}
sequence = amdgpu_enable_scheduler ? ib->sequence : 0;
if (!amdgpu_enable_scheduler && ib->ctx)
ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
&ib->fence->base,
sequence);
&ib->fence->base);
/* wrap the last IB with fence */
if (ib->user) {

View File

@@ -435,8 +435,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
seq_printf(m, " protected by 0x%016llx on ring %d",
a_fence->seq, a_fence->ring->idx);
if (s_fence)
seq_printf(m, " protected by 0x%016llx on ring %d",
s_fence->v_seq,
seq_printf(m, " protected by 0x%016x on ring %d",
s_fence->base.seqno,
s_fence->entity->scheduler->ring_id);
}

View File

@@ -111,7 +111,6 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
kfree(job);
return r;
}
ibs[num_ibs - 1].sequence = job->base.s_fence->v_seq;
*f = fence_get(&job->base.s_fence->base);
mutex_unlock(&job->job_lock);
} else {