|
|
|
@@ -359,7 +359,7 @@ add_queue_to_list:
|
|
|
|
|
list_add(&q->list, &qpd->queues_list);
|
|
|
|
|
qpd->queue_count++;
|
|
|
|
|
if (q->properties.is_active)
|
|
|
|
|
dqm->queue_count++;
|
|
|
|
|
dqm->active_queue_count++;
|
|
|
|
|
|
|
|
|
|
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
|
|
|
|
|
dqm->sdma_queue_count++;
|
|
|
|
@@ -494,7 +494,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
|
|
|
|
|
}
|
|
|
|
|
qpd->queue_count--;
|
|
|
|
|
if (q->properties.is_active)
|
|
|
|
|
dqm->queue_count--;
|
|
|
|
|
dqm->active_queue_count--;
|
|
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
|
}
|
|
|
|
@@ -563,13 +563,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
|
|
|
|
/*
|
|
|
|
|
* check active state vs. the previous state and modify
|
|
|
|
|
* counter accordingly. map_queues_cpsch uses the
|
|
|
|
|
* dqm->queue_count to determine whether a new runlist must be
|
|
|
|
|
* dqm->active_queue_count to determine whether a new runlist must be
|
|
|
|
|
* uploaded.
|
|
|
|
|
*/
|
|
|
|
|
if (q->properties.is_active && !prev_active)
|
|
|
|
|
dqm->queue_count++;
|
|
|
|
|
dqm->active_queue_count++;
|
|
|
|
|
else if (!q->properties.is_active && prev_active)
|
|
|
|
|
dqm->queue_count--;
|
|
|
|
|
dqm->active_queue_count--;
|
|
|
|
|
|
|
|
|
|
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
|
|
|
|
|
retval = map_queues_cpsch(dqm);
|
|
|
|
@@ -618,7 +618,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|
|
|
|
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
|
|
|
|
|
q->properties.type)];
|
|
|
|
|
q->properties.is_active = false;
|
|
|
|
|
dqm->queue_count--;
|
|
|
|
|
dqm->active_queue_count--;
|
|
|
|
|
|
|
|
|
|
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
|
|
|
|
|
continue;
|
|
|
|
@@ -662,7 +662,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
q->properties.is_active = false;
|
|
|
|
|
dqm->queue_count--;
|
|
|
|
|
dqm->active_queue_count--;
|
|
|
|
|
}
|
|
|
|
|
retval = execute_queues_cpsch(dqm,
|
|
|
|
|
qpd->is_debug ?
|
|
|
|
@@ -731,7 +731,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|
|
|
|
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
|
|
|
|
|
q->properties.type)];
|
|
|
|
|
q->properties.is_active = true;
|
|
|
|
|
dqm->queue_count++;
|
|
|
|
|
dqm->active_queue_count++;
|
|
|
|
|
|
|
|
|
|
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
|
|
|
|
|
continue;
|
|
|
|
@@ -786,7 +786,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
q->properties.is_active = true;
|
|
|
|
|
dqm->queue_count++;
|
|
|
|
|
dqm->active_queue_count++;
|
|
|
|
|
}
|
|
|
|
|
retval = execute_queues_cpsch(dqm,
|
|
|
|
|
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
|
|
|
@@ -899,7 +899,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
|
|
|
|
|
|
|
|
|
|
mutex_init(&dqm->lock_hidden);
|
|
|
|
|
INIT_LIST_HEAD(&dqm->queues);
|
|
|
|
|
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
|
|
|
|
|
dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
|
|
|
|
|
dqm->sdma_queue_count = 0;
|
|
|
|
|
dqm->xgmi_sdma_queue_count = 0;
|
|
|
|
|
|
|
|
|
@@ -924,7 +924,7 @@ static void uninitialize(struct device_queue_manager *dqm)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
|
|
|
|
|
WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
|
|
|
|
|
|
|
|
|
|
kfree(dqm->allocated_queues);
|
|
|
|
|
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
|
|
|
|
@@ -1064,7 +1064,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
|
|
|
|
|
|
|
|
|
|
mutex_init(&dqm->lock_hidden);
|
|
|
|
|
INIT_LIST_HEAD(&dqm->queues);
|
|
|
|
|
dqm->queue_count = dqm->processes_count = 0;
|
|
|
|
|
dqm->active_queue_count = dqm->processes_count = 0;
|
|
|
|
|
dqm->sdma_queue_count = 0;
|
|
|
|
|
dqm->xgmi_sdma_queue_count = 0;
|
|
|
|
|
dqm->active_runlist = false;
|
|
|
|
@@ -1158,7 +1158,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
|
|
|
|
dqm->total_queue_count);
|
|
|
|
|
|
|
|
|
|
list_add(&kq->list, &qpd->priv_queue_list);
|
|
|
|
|
dqm->queue_count++;
|
|
|
|
|
dqm->active_queue_count++;
|
|
|
|
|
qpd->is_debug = true;
|
|
|
|
|
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
|
|
|
|
dqm_unlock(dqm);
|
|
|
|
@@ -1172,7 +1172,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
|
|
|
|
{
|
|
|
|
|
dqm_lock(dqm);
|
|
|
|
|
list_del(&kq->list);
|
|
|
|
|
dqm->queue_count--;
|
|
|
|
|
dqm->active_queue_count--;
|
|
|
|
|
qpd->is_debug = false;
|
|
|
|
|
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
|
|
|
|
|
/*
|
|
|
|
@@ -1244,7 +1244,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|
|
|
|
dqm->xgmi_sdma_queue_count++;
|
|
|
|
|
|
|
|
|
|
if (q->properties.is_active) {
|
|
|
|
|
dqm->queue_count++;
|
|
|
|
|
dqm->active_queue_count++;
|
|
|
|
|
retval = execute_queues_cpsch(dqm,
|
|
|
|
|
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
|
|
|
|
}
|
|
|
|
@@ -1319,7 +1319,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
|
|
|
|
|
|
|
|
|
|
if (!dqm->sched_running)
|
|
|
|
|
return 0;
|
|
|
|
|
if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
|
|
|
|
|
if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
|
|
|
|
|
return 0;
|
|
|
|
|
if (dqm->active_runlist)
|
|
|
|
|
return 0;
|
|
|
|
@@ -1438,7 +1438,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
|
|
|
|
|
list_del(&q->list);
|
|
|
|
|
qpd->queue_count--;
|
|
|
|
|
if (q->properties.is_active) {
|
|
|
|
|
dqm->queue_count--;
|
|
|
|
|
dqm->active_queue_count--;
|
|
|
|
|
retval = execute_queues_cpsch(dqm,
|
|
|
|
|
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
|
|
|
|
if (retval == -ETIME)
|
|
|
|
@@ -1648,7 +1648,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
|
|
|
|
|
/* Clean all kernel queues */
|
|
|
|
|
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
|
|
|
|
|
list_del(&kq->list);
|
|
|
|
|
dqm->queue_count--;
|
|
|
|
|
dqm->active_queue_count--;
|
|
|
|
|
qpd->is_debug = false;
|
|
|
|
|
dqm->total_queue_count--;
|
|
|
|
|
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
|
|
|
|
@@ -1665,7 +1665,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (q->properties.is_active)
|
|
|
|
|
dqm->queue_count--;
|
|
|
|
|
dqm->active_queue_count--;
|
|
|
|
|
|
|
|
|
|
dqm->total_queue_count--;
|
|
|
|
|
}
|
|
|
|
|