msm: eva: fix deadlock to acquire core lock

During message handling path.
Remove DCVS for FD.

Change-Id: Iace13914014b4305553729ac305c141433b79a8f
Signed-off-by: George Shen <quic_sqiao@quicinc.com>
Šī revīzija ir iekļauta:
George Shen
2023-06-07 22:13:12 -07:00
vecāks 887ec8b2a2
revīzija efc1db7f27
6 mainīti faili ar 44 papildinājumiem un 303 dzēšanām

Parādīt failu

@@ -112,16 +112,6 @@ static int read_platform_resources(struct msm_cvp_core *core,
return rc;
}
static void init_cycle_info(struct cvp_cycle_info *info)
{
memset(info->sum_fps, 0, HFI_MAX_HW_THREADS*sizeof(u32));
memset(info->hi_ctrl_lim, 0, HFI_MAX_HW_THREADS*sizeof(u32));
memset(info->lo_ctrl_lim, 0, HFI_MAX_HW_THREADS*sizeof(u32));
memset(info->cycle, 0,
HFI_MAX_HW_THREADS*sizeof(struct cvp_cycle_stat));
info->conf_freq = 0;
}
static int msm_cvp_initialize_core(struct platform_device *pdev,
struct msm_cvp_core *core)
{
@@ -147,7 +137,6 @@ static int msm_cvp_initialize_core(struct platform_device *pdev,
}
INIT_WORK(&core->ssr_work, msm_cvp_ssr_handler);
init_cycle_info(&core->dyn_clk);
core->ssr_count = 0;
return rc;

Parādīt failu

@@ -46,9 +46,6 @@ static void aggregate_power_update(struct msm_cvp_core *core,
unsigned long op_blocks_max[2][HFI_MAX_HW_THREADS] = {0};
unsigned long op_fw_max[2] = {0}, bw_sum[2] = {0}, op_bw_max[2] = {0};
for (j = 0; j < HFI_MAX_HW_THREADS; j++)
core->dyn_clk.sum_fps[j] = 0;
list_for_each_entry(inst, &core->instances, list) {
if (inst->state == MSM_CVP_CORE_INVALID ||
inst->state == MSM_CVP_CORE_UNINIT ||
@@ -97,13 +94,8 @@ static void aggregate_power_update(struct msm_cvp_core *core,
if (inst->prop.fps[j])
dprintk(CVP_PWR, "fps %s %d ", hw_names[j],
inst->prop.fps[j]);
core->dyn_clk.sum_fps[j] += inst->prop.fps[j];
}
for (j = 0; j < HFI_MAX_HW_THREADS; j++)
if (core->dyn_clk.sum_fps[j])
dprintk(CVP_PWR, "sum_fps %s %d ", hw_names[j],
core->dyn_clk.sum_fps[j]);
}
for (i = 0; i < 2; i++) {
@@ -140,43 +132,25 @@ static void aggregate_power_update(struct msm_cvp_core *core,
*
* Ensure caller acquires clk_lock!
*/
static int adjust_bw_freqs(void)
static int adjust_bw_freqs(unsigned int max_bw, unsigned int min_bw)
{
struct msm_cvp_core *core;
struct iris_hfi_device *hdev;
struct bus_info *bus = NULL;
struct clock_set *clocks;
struct clock_info *cl;
struct allowed_clock_rates_table *tbl = NULL;
unsigned int tbl_size;
unsigned int cvp_min_rate, cvp_max_rate, max_bw = 0, min_bw = 0;
unsigned int cvp_min_rate, cvp_max_rate;
struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
unsigned long tmp, core_sum, op_core_sum, bw_sum;
int i, rc = 0, bus_count = 0;
unsigned long ctrl_freq;
int i;
core = cvp_driver->cvp_core;
hdev = core->device->hfi_device_data;
clocks = &core->resources.clock_set;
cl = &clocks->clock_tbl[clocks->count - 1];
tbl = core->resources.allowed_clks_tbl;
tbl_size = core->resources.allowed_clks_tbl_size;
cvp_min_rate = tbl[0].clock_rate;
cvp_max_rate = tbl[tbl_size - 1].clock_rate;
for (bus_count = 0; bus_count < core->resources.bus_set.count; bus_count++) {
if (!strcmp(core->resources.bus_set.bus_tbl[bus_count].name, "cvp-ddr")) {
bus = &core->resources.bus_set.bus_tbl[bus_count];
max_bw = bus->range[1];
min_bw = max_bw/10;
}
}
if (!bus) {
dprintk(CVP_ERR, "bus node is NULL for cvp-ddr\n");
return -EINVAL;
}
aggregate_power_update(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
dprintk(CVP_PWR, "PwrUpdate nrt %u %u rt %u %u\n",
nrt_pwr.core_sum, nrt_pwr.op_core_sum,
@@ -213,36 +187,15 @@ static int adjust_bw_freqs(void)
dprintk(CVP_PWR, "%s %lld %lld\n", __func__,
core_sum, bw_sum);
if (!cl->has_scaling) {
dprintk(CVP_ERR, "Cannot scale CVP clock\n");
return -EINVAL;
}
tmp = core->curr_freq;
core->curr_freq = core_sum;
core->orig_core_sum = core_sum;
rc = msm_cvp_set_clocks(core);
if (rc) {
dprintk(CVP_ERR,
"Failed to set clock rate %u %s: %d %s\n",
core_sum, cl->name, rc, __func__);
core->curr_freq = tmp;
return rc;
}
ctrl_freq = (core->curr_freq*3)>>1;
core->dyn_clk.conf_freq = core->curr_freq;
for (i = 0; i < HFI_MAX_HW_THREADS; ++i) {
core->dyn_clk.hi_ctrl_lim[i] = core->dyn_clk.sum_fps[i] ?
ctrl_freq/core->dyn_clk.sum_fps[i] : 0;
core->dyn_clk.lo_ctrl_lim[i] =
core->dyn_clk.hi_ctrl_lim[i];
}
core->orig_core_sum = tmp;
hdev->clk_freq = core->curr_freq;
rc = msm_cvp_set_bw(bus, bw_sum);
core->bw_sum = bw_sum;
return rc;
return 0;
}
int msm_cvp_update_power(struct msm_cvp_inst *inst)
@@ -250,6 +203,11 @@ int msm_cvp_update_power(struct msm_cvp_inst *inst)
int rc = 0;
struct msm_cvp_core *core;
struct msm_cvp_inst *s;
struct bus_info *bus = NULL;
struct clock_set *clocks;
struct clock_info *cl;
int bus_count = 0;
unsigned int max_bw = 0, min_bw = 0;
if (!inst) {
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -261,179 +219,43 @@ int msm_cvp_update_power(struct msm_cvp_inst *inst)
return -ECONNRESET;
core = inst->core;
clocks = &core->resources.clock_set;
cl = &clocks->clock_tbl[clocks->count - 1];
if (!cl->has_scaling) {
dprintk(CVP_ERR, "Cannot scale CVP clock\n");
rc = -EINVAL;
goto adjust_exit;
}
for (bus_count = 0; bus_count < core->resources.bus_set.count; bus_count++) {
if (!strcmp(core->resources.bus_set.bus_tbl[bus_count].name, "cvp-ddr")) {
bus = &core->resources.bus_set.bus_tbl[bus_count];
max_bw = bus->range[1];
min_bw = max_bw/10;
}
}
if (!bus) {
dprintk(CVP_ERR, "bus node is NULL for cvp-ddr\n");
rc = -EINVAL;
goto adjust_exit;
}
mutex_lock(&core->clk_lock);
rc = adjust_bw_freqs();
rc = adjust_bw_freqs(max_bw, min_bw);
mutex_unlock(&core->clk_lock);
cvp_put_inst(s);
if (rc)
goto adjust_exit;
return rc;
}
static int cvp_readjust_clock(struct msm_cvp_core *core,
u32 avg_cycles, enum hfi_hw_thread i)
{
int rc = 0;
struct allowed_clock_rates_table *tbl = NULL;
unsigned int tbl_size = 0;
unsigned int cvp_min_rate = 0, cvp_max_rate = 0;
unsigned long tmp = core->curr_freq;
unsigned long lo_freq = 0;
u32 j;
tbl = core->resources.allowed_clks_tbl;
tbl_size = core->resources.allowed_clks_tbl_size;
cvp_min_rate = tbl[0].clock_rate;
cvp_max_rate = tbl[tbl_size - 1].clock_rate;
if (!((avg_cycles > core->dyn_clk.hi_ctrl_lim[i] &&
core->curr_freq != cvp_max_rate) ||
(avg_cycles <= core->dyn_clk.lo_ctrl_lim[i] &&
core->curr_freq != cvp_min_rate))) {
return rc;
}
core->curr_freq = ((avg_cycles * core->dyn_clk.sum_fps[i]) << 1)/3;
dprintk(CVP_PWR,
"%s - cycles tot %u, avg %u. sum_fps %u, cur_freq %u\n",
__func__,
core->dyn_clk.cycle[i].total,
avg_cycles,
core->dyn_clk.sum_fps[i],
core->curr_freq);
if (core->curr_freq > cvp_max_rate) {
core->curr_freq = cvp_max_rate;
lo_freq = (tbl_size > 1) ?
tbl[tbl_size - 2].clock_rate :
cvp_min_rate;
} else if (core->curr_freq <= cvp_min_rate) {
core->curr_freq = cvp_min_rate;
lo_freq = cvp_min_rate;
} else {
for (j = 1; j < tbl_size; j++)
if (core->curr_freq <= tbl[j].clock_rate)
break;
core->curr_freq = tbl[j].clock_rate;
lo_freq = tbl[j-1].clock_rate;
}
if (core->orig_core_sum > core->curr_freq) {
dprintk(CVP_PWR,
"%s - %d - Cancel readjust, core %u, freq %u\n",
__func__, i, core->orig_core_sum, core->curr_freq);
core->curr_freq = tmp;
return rc;
}
dprintk(CVP_PWR,
"%s:%d - %d - Readjust to %u\n",
__func__, __LINE__, i, core->curr_freq);
rc = msm_cvp_set_clocks(core);
if (rc) {
dprintk(CVP_ERR,
"Failed to set clock rate %u: %d %s\n",
core->curr_freq, rc, __func__);
core->curr_freq = tmp;
} else {
lo_freq = (lo_freq < core->dyn_clk.conf_freq) ?
core->dyn_clk.conf_freq : lo_freq;
core->dyn_clk.hi_ctrl_lim[i] = core->dyn_clk.sum_fps[i] ?
((core->curr_freq*3)>>1)/core->dyn_clk.sum_fps[i] : 0;
core->dyn_clk.lo_ctrl_lim[i] =
core->dyn_clk.sum_fps[i] ?
((lo_freq*3)>>1)/core->dyn_clk.sum_fps[i] : 0;
dprintk(CVP_PWR,
"%s - Readjust clk to %u. New lim [%d] hi %u lo %u\n",
__func__, core->curr_freq, i,
core->dyn_clk.hi_ctrl_lim[i],
core->dyn_clk.lo_ctrl_lim[i]);
"Failed to set clock rate %u %s: %d %s\n",
core->curr_freq, cl->name, rc, __func__);
core->curr_freq = core->orig_core_sum;
goto adjust_exit;
}
rc = msm_cvp_set_bw(bus, core->bw_sum);
return rc;
}
int cvp_check_clock(struct msm_cvp_inst *inst,
struct cvp_hfi_msg_session_hdr_ext *hdr)
{
int rc = 0;
u32 i, j;
u32 hw_cycles[HFI_MAX_HW_THREADS] = {0};
u32 fw_cycles = 0;
struct msm_cvp_core *core = inst->core;
for (i = 0; i < HFI_MAX_HW_ACTIVATIONS_PER_FRAME; ++i)
fw_cycles += hdr->fw_cycles[i];
for (i = 0; i < HFI_MAX_HW_THREADS; ++i)
for (j = 0; j < HFI_MAX_HW_ACTIVATIONS_PER_FRAME; ++j)
hw_cycles[i] += hdr->hw_cycles[i][j];
dprintk(CVP_PWR, "%s - cycles fw %u. FDU %d MPU %d ODU %d ICA %d\n",
__func__, fw_cycles, hw_cycles[0],
hw_cycles[1], hw_cycles[2], hw_cycles[3]);
mutex_lock(&core->clk_lock);
for (i = 0; i < HFI_MAX_HW_THREADS; ++i) {
dprintk(CVP_PWR, "%s - %d: hw_cycles %u, tens_thresh %u\n",
__func__, i, hw_cycles[i],
core->dyn_clk.hi_ctrl_lim[i]);
if (core->dyn_clk.hi_ctrl_lim[i]) {
if (core->dyn_clk.cycle[i].size < CVP_CYCLE_STAT_SIZE)
core->dyn_clk.cycle[i].size++;
else
core->dyn_clk.cycle[i].total -=
core->dyn_clk.cycle[i].busy[
core->dyn_clk.cycle[i].idx];
if (hw_cycles[i]) {
core->dyn_clk.cycle[i].busy[
core->dyn_clk.cycle[i].idx]
= hw_cycles[i] + fw_cycles;
core->dyn_clk.cycle[i].total
+= hw_cycles[i] + fw_cycles;
dprintk(CVP_PWR,
"%s: busy (hw + fw) cycles = %u\n",
__func__,
core->dyn_clk.cycle[i].busy[
core->dyn_clk.cycle[i].idx]);
dprintk(CVP_PWR, "total cycles %u\n",
core->dyn_clk.cycle[i].total);
} else {
core->dyn_clk.cycle[i].busy[
core->dyn_clk.cycle[i].idx] =
hdr->busy_cycles;
core->dyn_clk.cycle[i].total +=
hdr->busy_cycles;
dprintk(CVP_PWR,
"%s - busy cycles = %u total %u\n",
__func__,
core->dyn_clk.cycle[i].busy[
core->dyn_clk.cycle[i].idx],
core->dyn_clk.cycle[i].total);
}
core->dyn_clk.cycle[i].idx =
(core->dyn_clk.cycle[i].idx ==
CVP_CYCLE_STAT_SIZE-1) ?
0 : core->dyn_clk.cycle[i].idx+1;
dprintk(CVP_PWR, "%s - %d: size %u, tens_thresh %u\n",
__func__, i, core->dyn_clk.cycle[i].size,
core->dyn_clk.hi_ctrl_lim[i]);
if (core->dyn_clk.cycle[i].size == CVP_CYCLE_STAT_SIZE
&& core->dyn_clk.hi_ctrl_lim[i] != 0) {
u32 avg_cycles =
core->dyn_clk.cycle[i].total>>3;
rc = cvp_readjust_clock(core,
avg_cycles,
i);
}
}
}
mutex_unlock(&core->clk_lock);
adjust_exit:
cvp_put_inst(s);
return rc;
}
@@ -514,36 +336,3 @@ unsigned int msm_cvp_get_hw_aggregate_cycles(enum hfi_hw_thread hwblk)
return (unsigned int)cycles_sum;
}
bool check_clock_required(struct msm_cvp_inst *inst,
struct eva_kmd_hfi_packet *hdr)
{
struct cvp_hfi_msg_session_hdr_ext *ehdr =
(struct cvp_hfi_msg_session_hdr_ext *)hdr;
bool clock_check = false;
if (!msm_cvp_dcvs_disable &&
ehdr->packet_type == HFI_MSG_SESSION_CVP_FD) {
if (ehdr->size == sizeof(struct cvp_hfi_msg_session_hdr_ext)
+ sizeof(struct cvp_hfi_buf_type)) {
struct msm_cvp_core *core = inst->core;
dprintk(CVP_PWR, "busy cycle %d, total %d\n",
ehdr->busy_cycles, ehdr->total_cycles);
if (core->dyn_clk.sum_fps[HFI_HW_FDU] ||
core->dyn_clk.sum_fps[HFI_HW_MPU] ||
core->dyn_clk.sum_fps[HFI_HW_OD] ||
core->dyn_clk.sum_fps[HFI_HW_ICA]) {
clock_check = true;
}
} else {
dprintk(CVP_WARN, "dcvs is disabled, %d != %d + %d\n",
ehdr->size, sizeof(struct cvp_hfi_msg_session_hdr_ext),
sizeof(struct cvp_hfi_buf_type));
}
}
return clock_check;
}

Parādīt failu

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CVP_POWER_H_
@@ -20,8 +20,4 @@ struct cvp_power_level {
int msm_cvp_update_power(struct msm_cvp_inst *inst);
unsigned int msm_cvp_get_hw_aggregate_cycles(enum hfi_hw_thread hwblk);
int cvp_check_clock(struct msm_cvp_inst *inst,
struct cvp_hfi_msg_session_hdr_ext *hdr);
bool check_clock_required(struct msm_cvp_inst *inst,
struct eva_kmd_hfi_packet *hdr);
#endif

Parādīt failu

@@ -138,7 +138,6 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
struct cvp_session_queue *sq;
struct msm_cvp_inst *s;
int rc = 0;
bool clock_check = false;
if (!inst) {
dprintk(CVP_ERR, "%s invalid session\n", __func__);
@@ -155,11 +154,6 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
rc = cvp_wait_process_message(inst, sq, NULL, wait_time, out_pkt);
clock_check = check_clock_required(inst, out_pkt);
if (clock_check)
cvp_check_clock(inst,
(struct cvp_hfi_msg_session_hdr_ext *)out_pkt);
cvp_put_inst(inst);
return rc;
}
@@ -311,7 +305,6 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
struct cvp_session_queue *sq;
u32 hfi_err = HFI_ERR_NONE;
struct cvp_hfi_msg_session_hdr_ext hdr;
bool clock_check = false;
dprintk(CVP_SYNX, "%s %s\n", current->comm, __func__);
@@ -343,10 +336,6 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
rc = cvp_wait_process_message(inst, sq, &ktid, timeout,
(struct eva_kmd_hfi_packet *)&hdr);
/* Only FD support dcvs at certain FW */
clock_check = check_clock_required(inst,
(struct eva_kmd_hfi_packet *)&hdr);
hfi_err = hdr.error_type;
if (rc) {
dprintk(CVP_ERR, "%s %s: cvp_wait_process_message rc %d\n",
@@ -371,9 +360,6 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
exit:
rc = inst->core->synx_ftbl->cvp_synx_ops(inst, CVP_OUTPUT_SYNX,
fc, &synx_state);
if (clock_check)
cvp_check_clock(inst,
(struct cvp_hfi_msg_session_hdr_ext *)&hdr);
return rc;
}
@@ -1421,7 +1407,7 @@ int cvp_clean_session_queues(struct msm_cvp_inst *inst)
q = &inst->fence_cmd_queue;
mutex_lock(&q->lock);
if (q->state == QUEUE_START) {
if (q->state == QUEUE_START || q->state == QUEUE_ACTIVE) {
mutex_unlock(&q->lock);
cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_CANCEL);
} else {

Parādīt failu

@@ -1107,7 +1107,7 @@ static int eva_fastrpc_dev_unmap_dma(struct fastrpc_device *frpc_device,
rc = __fastrpc_driver_invoke(frpc_device, FASTRPC_DEV_UNMAP_DMA,
(unsigned long)(&frpc_unmap_buf));
if (rc) {
dprintk(CVP_ERR, "%s Failed to unmap buffer 0x%x\n",
dprintk_rl(CVP_ERR, "%s Failed to unmap buffer %d\n",
__func__, rc);
return rc;
}
@@ -2015,9 +2015,6 @@ static void __dsp_cvp_mem_free(struct cvp_dsp_cmd_msg *cmd)
rc = eva_fastrpc_dev_unmap_dma(frpc_device, buf);
if (rc) {
dprintk_rl(CVP_ERR,
"%s Failed to unmap buffer 0x%x\n",
__func__, rc);
cmd->ret = -1;
goto fail_fastrpc_dev_unmap_dma;
}

Parādīt failu

@@ -212,22 +212,6 @@ struct cvp_session_queue {
wait_queue_head_t wq;
};
#define CVP_CYCLE_STAT_SIZE 8
struct cvp_cycle_stat {
u32 busy[CVP_CYCLE_STAT_SIZE];
u32 total;
u32 idx;
u32 size;
};
struct cvp_cycle_info {
u32 sum_fps[HFI_MAX_HW_THREADS];
u32 hi_ctrl_lim[HFI_MAX_HW_THREADS];
u32 lo_ctrl_lim[HFI_MAX_HW_THREADS];
struct cvp_cycle_stat cycle[HFI_MAX_HW_THREADS];
unsigned long conf_freq;
};
struct cvp_session_prop {
u32 type;
u32 kernel_mask;
@@ -356,7 +340,7 @@ struct msm_cvp_core {
bool trigger_ssr;
unsigned long curr_freq;
unsigned long orig_core_sum;
struct cvp_cycle_info dyn_clk;
unsigned long bw_sum;
atomic64_t kernel_trans_id;
struct cvp_debug_log log;
};