drm/scheduler: modify API to avoid redundancy
entity has a scheduler field and we don't need the sched argument in any of the functions where entity is provided. Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> Reviewed-by: Christian König <christian.koenig@amd.com> Reviewed-by: Eric Anholt <eric@anholt.net> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Цей коміт міститься в:

зафіксовано
Alex Deucher

джерело
bf314ca3f1
коміт
cdc5017659
@@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
job = p->job;
|
||||
p->job = NULL;
|
||||
|
||||
r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
|
||||
r = drm_sched_job_init(&job->base, entity, p->filp);
|
||||
if (r) {
|
||||
amdgpu_job_free(job);
|
||||
amdgpu_mn_unlock(p->mn);
|
||||
|
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||
|
||||
failed:
|
||||
for (j = 0; j < i; j++)
|
||||
drm_sched_entity_destroy(&adev->rings[j]->sched,
|
||||
&ctx->rings[j].entity);
|
||||
drm_sched_entity_destroy(&ctx->rings[j].entity);
|
||||
kfree(ctx->fences);
|
||||
ctx->fences = NULL;
|
||||
return r;
|
||||
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
|
||||
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
|
||||
continue;
|
||||
|
||||
drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
drm_sched_entity_destroy(&ctx->rings[i].entity);
|
||||
}
|
||||
|
||||
amdgpu_ctx_fini(ref);
|
||||
@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
|
||||
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
|
||||
continue;
|
||||
|
||||
max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity, max_wait);
|
||||
max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
|
||||
max_wait);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
|
||||
continue;
|
||||
|
||||
if (kref_read(&ctx->refcount) == 1)
|
||||
drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
drm_sched_entity_fini(&ctx->rings[i].entity);
|
||||
else
|
||||
DRM_ERROR("ctx %p is still alive\n", ctx);
|
||||
}
|
||||
|
@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
|
||||
r = drm_sched_job_init(&job->base, entity, owner);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
drm_sched_entity_destroy(adev->mman.entity.sched,
|
||||
&adev->mman.entity);
|
||||
drm_sched_entity_destroy(&adev->mman.entity);
|
||||
dma_fence_put(man->move);
|
||||
man->move = NULL;
|
||||
}
|
||||
|
@@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
|
||||
&adev->uvd.entity);
|
||||
drm_sched_entity_destroy(&adev->uvd.entity);
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||
kfree(adev->uvd.inst[j].saved_bo);
|
||||
|
@@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
||||
if (adev->vce.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
|
||||
drm_sched_entity_destroy(&adev->vce.entity);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
|
||||
(void **)&adev->vce.cpu_addr);
|
||||
|
@@ -2642,7 +2642,7 @@ error_free_root:
|
||||
vm->root.base.bo = NULL;
|
||||
|
||||
error_free_sched_entity:
|
||||
drm_sched_entity_destroy(&ring->sched, &vm->entity);
|
||||
drm_sched_entity_destroy(&vm->entity);
|
||||
|
||||
return r;
|
||||
}
|
||||
@@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
|
||||
drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
|
||||
drm_sched_entity_destroy(&vm->entity);
|
||||
|
||||
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
|
||||
dev_err(adev->dev, "still active bo inside vm\n");
|
||||
|
Посилання в новій задачі
Заблокувати користувача