drm/scheduler: improve job distribution with multiple queues
This patch uses score to select a new drm scheduler for better loadbalance between multiple drm schedulers instead of num_jobs. Below are test results after running amdgpu_test for ~10 times. Before this patch: sched_name num of many times it got schedule ========= ================================== sdma0 1463 sdma1 198 comp_1.0.1 280 After this patch: sched_name num of many times it got schedule ========= ================================== sdma0 925 sdma1 928 comp_1.0.1 177 comp_1.1.1 44 comp_1.2.1 43 comp_1.3.1 44 Signed-off-by: Nirmoy Das <nirmoy.das@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/373000/ Signed-off-by: Christian König <christian.koenig@amd.com>
This commit is contained in:

committed by
Christian König

parent
0dc9b286b8
commit
d41a39dda1
@@ -263,7 +263,7 @@ struct drm_sched_backend_ops {
|
||||
* @job_list_lock: lock to protect the ring_mirror_list.
|
||||
* @hang_limit: once the hangs by a job crosses this limit then it is marked
|
||||
* guilty and it will be considered for scheduling further.
|
||||
* @num_jobs: the number of jobs in queue in the scheduler
|
||||
* @score: score to help loadbalancer pick a idle sched
|
||||
* @ready: marks if the underlying HW is ready to work
|
||||
* @free_guilty: A hit to time out handler to free the guilty job.
|
||||
*
|
||||
@@ -284,8 +284,8 @@ struct drm_gpu_scheduler {
|
||||
struct list_head ring_mirror_list;
|
||||
spinlock_t job_list_lock;
|
||||
int hang_limit;
|
||||
atomic_t num_jobs;
|
||||
bool ready;
|
||||
atomic_t score;
|
||||
bool ready;
|
||||
bool free_guilty;
|
||||
};
|
||||
|
||||
|
Reference in New Issue
Block a user