diff --git a/drivers/cam_ope/cam_ope_context.c b/drivers/cam_ope/cam_ope_context.c index 83dd07be9d..0ff45243ed 100644 --- a/drivers/cam_ope/cam_ope_context.c +++ b/drivers/cam_ope/cam_ope_context.c @@ -40,7 +40,7 @@ static int cam_ope_context_dump_active_request(void *data, unsigned long iova, mutex_lock(&ctx->ctx_mutex); if (ctx->state < CAM_CTX_ACQUIRED || ctx->state > CAM_CTX_ACTIVATED) { - CAM_ERR(CAM_ICP, "Invalid state icp ctx %d state %d", + CAM_ERR(CAM_OPE, "Invalid state ope ctx %d state %d", ctx->ctx_id, ctx->state); goto end; } diff --git a/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c b/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c index bb73190b14..dfa457adf3 100644 --- a/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c +++ b/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c @@ -125,11 +125,30 @@ static int cam_ope_mgr_reset_hw(void) return rc; } +static void cam_ope_device_timer_stop(struct cam_ope_hw_mgr *hw_mgr) +{ + if (hw_mgr->clk_info.watch_dog) { + hw_mgr->clk_info.watch_dog_reset_counter = 0; + crm_timer_exit(&hw_mgr->clk_info.watch_dog); + hw_mgr->clk_info.watch_dog = NULL; + } +} + +static void cam_ope_device_timer_reset(struct cam_ope_hw_mgr *hw_mgr) +{ + + if (hw_mgr->clk_info.watch_dog) { + CAM_DBG(CAM_OPE, "reset timer"); + crm_timer_reset(hw_mgr->clk_info.watch_dog); + hw_mgr->clk_info.watch_dog_reset_counter++; + } +} + static int cam_ope_req_timer_modify(struct cam_ope_ctx *ctx_data, int32_t expires) { if (ctx_data->req_watch_dog) { - CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id); + CAM_DBG(CAM_OPE, "stop timer : ctx_id = %d", ctx_data->ctx_id); crm_timer_modify(ctx_data->req_watch_dog, expires); } return 0; @@ -138,7 +157,8 @@ static int cam_ope_req_timer_modify(struct cam_ope_ctx *ctx_data, static int cam_ope_req_timer_stop(struct cam_ope_ctx *ctx_data) { if (ctx_data->req_watch_dog) { - CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id); + CAM_DBG(CAM_OPE, "stop timer : ctx_id = %d", ctx_data->ctx_id); + ctx_data->req_watch_dog_reset_counter = 0; crm_timer_exit(&ctx_data->req_watch_dog); ctx_data->req_watch_dog = NULL; } @@ -147,8 +167,13 @@ static int cam_ope_req_timer_stop(struct cam_ope_ctx *ctx_data) static int cam_ope_req_timer_reset(struct cam_ope_ctx *ctx_data) { - if (ctx_data && ctx_data->req_watch_dog) + if (ctx_data && ctx_data->req_watch_dog) { + ctx_data->req_watch_dog_reset_counter++; + CAM_DBG(CAM_OPE, "reset timer : ctx_id = %d, counter=%d", + ctx_data->ctx_id, + ctx_data->req_watch_dog_reset_counter); crm_timer_reset(ctx_data->req_watch_dog); + } return 0; } @@ -192,14 +217,123 @@ static int32_t cam_ope_process_request_timer(void *priv, void *data) { struct ope_clk_work_data *task_data = (struct ope_clk_work_data *)data; struct cam_ope_ctx *ctx_data = (struct cam_ope_ctx *)task_data->data; + struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr; + uint32_t id; + struct cam_hw_intf *dev_intf = NULL; + struct cam_ope_clk_info *clk_info; + struct cam_ope_dev_bw_update clk_update; + int i = 0; + int device_share_ratio = 1; + int path_index; + + if (!ctx_data) { + CAM_ERR(CAM_OPE, "ctx_data is NULL, failed to update clk"); + return -EINVAL; + } + + mutex_lock(&ctx_data->ctx_mutex); + if ((ctx_data->ctx_state != OPE_CTX_STATE_ACQUIRED) || + (ctx_data->req_watch_dog_reset_counter == 0)) { + CAM_DBG(CAM_OPE, "state %d counter = %d", ctx_data->ctx_state, + ctx_data->req_watch_dog_reset_counter); + mutex_unlock(&ctx_data->ctx_mutex); + return 0; + } if (cam_ope_is_pending_request(ctx_data)) { CAM_DBG(CAM_OPE, "pending requests means, issue is with HW"); cam_cdm_handle_error(ctx_data->ope_cdm.cdm_handle); cam_ope_req_timer_reset(ctx_data); - } else { - cam_ope_req_timer_modify(ctx_data, ~0); + mutex_unlock(&ctx_data->ctx_mutex); + return 0; } + + cam_ope_req_timer_modify(ctx_data, ~0); + /* Remove context BW */ + dev_intf = hw_mgr->ope_dev_intf[0]; + if (!dev_intf) { + CAM_ERR(CAM_OPE, "OPE dev intf is NULL"); + mutex_unlock(&ctx_data->ctx_mutex); + return -EINVAL; + } + + clk_info = &hw_mgr->clk_info; + id = OPE_HW_BW_UPDATE; + device_share_ratio = hw_mgr->num_ope; + + clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC; + clk_update.ahb_vote.vote.freq = 0; + clk_update.ahb_vote_valid = false; + + /* + * Remove previous vote of this context from hw mgr first. + * hw_mgr_clk_info has all valid paths, with each path in its + * own index. BW that we wanted to vote now is after removing + * current context's vote from hw mgr consolidated vote + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = ctx_data->clk_info.axi_path[i] + .path_data_type - + CAM_AXI_PATH_DATA_OPE_START_OFFSET; + + if (path_index >= CAM_OPE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_OPE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i] + .path_data_type, + CAM_AXI_PATH_DATA_OPE_START_OFFSET, + CAM_OPE_MAX_PER_PATH_VOTES); + continue; + } + + clk_info->axi_path[path_index].camnoc_bw -= + ctx_data->clk_info.axi_path[i].camnoc_bw; + clk_info->axi_path[path_index].mnoc_ab_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + clk_info->axi_path[path_index].mnoc_ib_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + clk_info->axi_path[path_index].ddr_ab_bw -= + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + clk_info->axi_path[path_index].ddr_ib_bw -= + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + } + + memset(&ctx_data->clk_info.axi_path[0], 0, + CAM_OPE_MAX_PER_PATH_VOTES * + sizeof(struct cam_axi_per_path_bw_vote)); + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + + clk_update.axi_vote.num_paths = clk_info->num_paths; + memcpy(&clk_update.axi_vote.axi_path[0], + &clk_info->axi_path[0], + clk_update.axi_vote.num_paths * + sizeof(struct cam_axi_per_path_bw_vote)); + + if (device_share_ratio > 1) { + for (i = 0; i < clk_update.axi_vote.num_paths; i++) { + clk_update.axi_vote.axi_path[i].camnoc_bw /= + device_share_ratio; + clk_update.axi_vote.axi_path[i].mnoc_ab_bw /= + device_share_ratio; + clk_update.axi_vote.axi_path[i].mnoc_ib_bw /= + device_share_ratio; + clk_update.axi_vote.axi_path[i].ddr_ab_bw /= + device_share_ratio; + clk_update.axi_vote.axi_path[i].ddr_ib_bw /= + device_share_ratio; + } + } + + clk_update.axi_vote_valid = true; + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id, + &clk_update, sizeof(clk_update)); + + CAM_DBG(CAM_OPE, "X :ctx_id = %d curr_fc = %u bc = %u", + ctx_data->ctx_id, ctx_data->clk_info.curr_fc, + ctx_data->clk_info.base_clk); + mutex_unlock(&ctx_data->ctx_mutex); + return 0; } @@ -235,7 +369,153 @@ static int cam_ope_start_req_timer(struct cam_ope_ctx *ctx_data) rc = crm_timer_init(&ctx_data->req_watch_dog, 200, ctx_data, &cam_ope_req_timer_cb); if (rc) - CAM_ERR(CAM_ICP, "Failed to start timer"); + CAM_ERR(CAM_OPE, "Failed to start timer"); + + ctx_data->req_watch_dog_reset_counter = 0; + + return rc; +} + +static int cam_ope_supported_clk_rates(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data) +{ + int i; + struct cam_hw_soc_info *soc_info; + struct cam_hw_intf *dev_intf = NULL; + struct cam_hw_info *dev = NULL; + + dev_intf = hw_mgr->ope_dev_intf[0]; + if (!dev_intf) { + CAM_ERR(CAM_OPE, "dev_intf is invalid"); + return -EINVAL; + } + + dev = (struct cam_hw_info *)dev_intf->hw_priv; + soc_info = &dev->soc_info; + + for (i = 0; i < CAM_MAX_VOTE; i++) { + ctx_data->clk_info.clk_rate[i] = + soc_info->clk_rate[i][soc_info->src_clk_idx]; + CAM_DBG(CAM_OPE, "clk_info[%d] = %d", + i, ctx_data->clk_info.clk_rate[i]); + } + + return 0; +} + +static int cam_ope_ctx_clk_info_init(struct cam_ope_ctx *ctx_data) +{ + int i; + + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + ctx_data->clk_info.uncompressed_bw = 0; + ctx_data->clk_info.compressed_bw = 0; + + for (i = 0; i < CAM_OPE_MAX_PER_PATH_VOTES; i++) { + ctx_data->clk_info.axi_path[i].camnoc_bw = 0; + ctx_data->clk_info.axi_path[i].mnoc_ab_bw = 0; + ctx_data->clk_info.axi_path[i].mnoc_ib_bw = 0; + } + + cam_ope_supported_clk_rates(ope_hw_mgr, ctx_data); + + return 0; +} + +static int32_t cam_ope_deinit_idle_clk(void *priv, void *data) +{ + struct cam_ope_hw_mgr *hw_mgr = (struct cam_ope_hw_mgr *)priv; + struct ope_clk_work_data *task_data = (struct ope_clk_work_data *)data; + struct cam_ope_clk_info *clk_info = + (struct cam_ope_clk_info *)task_data->data; + uint32_t id; + uint32_t i; + struct cam_ope_ctx *ctx_data; + struct cam_hw_intf *dev_intf = NULL; + int rc = 0; + bool busy = false; + + clk_info->base_clk = 0; + clk_info->curr_clk = 0; + clk_info->over_clked = 0; + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + for (i = 0; i < OPE_CTX_MAX; i++) { + ctx_data = &hw_mgr->ctx[i]; + mutex_lock(&ctx_data->ctx_mutex); + if (ctx_data->ctx_state == OPE_CTX_STATE_ACQUIRED) { + busy = cam_ope_is_pending_request(ctx_data); + if (busy) { + mutex_unlock(&ctx_data->ctx_mutex); + break; + } + cam_ope_ctx_clk_info_init(ctx_data); + } + mutex_unlock(&ctx_data->ctx_mutex); + } + + if (busy) { + cam_ope_device_timer_reset(hw_mgr); + rc = -EBUSY; + goto done; + } + + dev_intf = hw_mgr->ope_dev_intf[0]; + id = OPE_HW_CLK_DISABLE; + + CAM_DBG(CAM_OPE, "Disable %d", clk_info->hw_type); + + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id, NULL, 0); + +done: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static void cam_ope_device_timer_cb(struct timer_list *timer_data) +{ + unsigned long flags; + struct crm_workq_task *task; + struct ope_clk_work_data *task_data; + struct cam_req_mgr_timer *timer = + container_of(timer_data, struct cam_req_mgr_timer, sys_timer); + + spin_lock_irqsave(&ope_hw_mgr->hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(ope_hw_mgr->timer_work); + if (!task) { + CAM_ERR(CAM_OPE, "no empty task"); + spin_unlock_irqrestore(&ope_hw_mgr->hw_mgr_lock, flags); + return; + } + + task_data = (struct ope_clk_work_data *)task->payload; + task_data->data = timer->parent; + task_data->type = OPE_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_ope_deinit_idle_clk; + cam_req_mgr_workq_enqueue_task(task, &ope_hw_mgr, + CRM_TASK_PRIORITY_0); + spin_unlock_irqrestore(&ope_hw_mgr->hw_mgr_lock, flags); +} + +static int cam_ope_device_timer_start(struct cam_ope_hw_mgr *hw_mgr) +{ + int rc = 0; + int i; + + for (i = 0; i < CLK_HW_MAX; i++) { + if (!hw_mgr->clk_info.watch_dog) { + rc = crm_timer_init(&hw_mgr->clk_info.watch_dog, + OPE_DEVICE_IDLE_TIMEOUT, &hw_mgr->clk_info, + &cam_ope_device_timer_cb); + + if (rc) + CAM_ERR(CAM_OPE, "Failed to start timer %d", i); + + hw_mgr->clk_info.watch_dog_reset_counter = 0; + } + } return rc; } @@ -245,7 +525,6 @@ static int cam_get_valid_ctx_id(void) struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr; int i; - for (i = 0; i < OPE_CTX_MAX; i++) { if (hw_mgr->ctx[i].ctx_state == OPE_CTX_STATE_ACQUIRED) break; @@ -254,6 +533,551 @@ static int cam_get_valid_ctx_id(void) return i; } +static int cam_ope_get_actual_clk_rate_idx( + struct cam_ope_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + for (i = 0; i < CAM_MAX_VOTE; i++) + if (ctx_data->clk_info.clk_rate[i] >= base_clk) + return i; + + /* + * Caller has to ensure returned index is within array + * size bounds while accessing that index. + */ + + return i; +} + +static bool cam_ope_is_over_clk(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info) +{ + int base_clk_idx; + int curr_clk_idx; + + base_clk_idx = cam_ope_get_actual_clk_rate_idx(ctx_data, + hw_mgr_clk_info->base_clk); + + curr_clk_idx = cam_ope_get_actual_clk_rate_idx(ctx_data, + hw_mgr_clk_info->curr_clk); + + CAM_DBG(CAM_OPE, "bc_idx = %d cc_idx = %d %d %d", + base_clk_idx, curr_clk_idx, hw_mgr_clk_info->base_clk, + hw_mgr_clk_info->curr_clk); + + if (curr_clk_idx > base_clk_idx) + return true; + + return false; +} + + +static int cam_ope_get_lower_clk_rate(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + i = cam_ope_get_actual_clk_rate_idx(ctx_data, base_clk); + + if (i > 0) + return ctx_data->clk_info.clk_rate[i - 1]; + + CAM_DBG(CAM_OPE, "Already clk at lower level"); + + return base_clk; +} + +static int cam_ope_get_next_clk_rate(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + i = cam_ope_get_actual_clk_rate_idx(ctx_data, base_clk); + + if (i < CAM_MAX_VOTE - 1) + return ctx_data->clk_info.clk_rate[i + 1]; + + CAM_DBG(CAM_OPE, "Already clk at higher level"); + + return base_clk; +} + +static int cam_ope_get_actual_clk_rate(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + for (i = 0; i < CAM_MAX_VOTE; i++) + if (ctx_data->clk_info.clk_rate[i] >= base_clk) + return ctx_data->clk_info.clk_rate[i]; + + return base_clk; +} + +static int cam_ope_calc_total_clk(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_clk_info *hw_mgr_clk_info, uint32_t dev_type) +{ + int i; + struct cam_ope_ctx *ctx_data; + + hw_mgr_clk_info->base_clk = 0; + for (i = 0; i < OPE_CTX_MAX; i++) { + ctx_data = &hw_mgr->ctx[i]; + if (ctx_data->ctx_state == OPE_CTX_STATE_ACQUIRED && + ctx_data->ope_acquire.dev_type == dev_type) + hw_mgr_clk_info->base_clk += + ctx_data->clk_info.base_clk; + } + + return 0; +} + +static uint32_t cam_ope_mgr_calc_base_clk(uint32_t frame_cycles, + uint64_t budget) +{ + uint64_t base_clk; + uint64_t mul = 1000000000; + + base_clk = (frame_cycles * mul) / budget; + + CAM_DBG(CAM_OPE, "budget = %lld fc = %d ib = %lld base_clk = %lld", + budget, frame_cycles, + (long long)(frame_cycles * mul), base_clk); + + return base_clk; +} + +static bool cam_ope_update_clk_overclk_free(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info, + struct cam_ope_clk_bw_request *clk_info, + uint32_t base_clk) +{ + int rc = false; + + /* + * In caseof no pending packets case + * 1. In caseof overclk cnt is less than threshold, increase + * overclk count and no update in the clock rate + * 2. In caseof overclk cnt is greater than or equal to threshold + * then lower clock rate by one level and update hw_mgr current + * clock value. + * a. In case of new clock rate greater than sum of clock + * rates, reset overclk count value to zero if it is + * overclock + * b. if it is less than sum of base clocks then go to next + * level of clock and make overclk count to zero + * c. if it is same as sum of base clock rates update overclock + * cnt to 0 + */ + if (hw_mgr_clk_info->over_clked < hw_mgr_clk_info->threshold) { + hw_mgr_clk_info->over_clked++; + rc = false; + } else { + hw_mgr_clk_info->curr_clk = + cam_ope_get_lower_clk_rate(hw_mgr, ctx_data, + hw_mgr_clk_info->curr_clk); + if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) { + if (cam_ope_is_over_clk(hw_mgr, ctx_data, + hw_mgr_clk_info)) + hw_mgr_clk_info->over_clked = 0; + } else if (hw_mgr_clk_info->curr_clk < + hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->curr_clk = + cam_ope_get_next_clk_rate(hw_mgr, ctx_data, + hw_mgr_clk_info->curr_clk); + hw_mgr_clk_info->over_clked = 0; + } else if (hw_mgr_clk_info->curr_clk == + hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->over_clked = 0; + } + rc = true; + } + + return rc; +} + +static bool cam_ope_update_clk_free(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info, + struct cam_ope_clk_bw_request *clk_info, + uint32_t base_clk) +{ + int rc = false; + bool over_clocked = false; + + ctx_data->clk_info.curr_fc = clk_info->frame_cycles; + ctx_data->clk_info.base_clk = base_clk; + cam_ope_calc_total_clk(hw_mgr, hw_mgr_clk_info, + ctx_data->ope_acquire.dev_type); + + /* + * Current clock is not always sum of base clocks, due to + * clock scales update to next higher or lower levels, it + * equals to one of discrete clock values supported by hardware. + * So even current clock is higher than sum of base clocks, we + * can not consider it is over clocked. if it is greater than + * discrete clock level then only it is considered as over clock. + * 1. Handle over clock case + * 2. If current clock is less than sum of base clocks + * update current clock + * 3. If current clock is same as sum of base clocks no action + */ + + over_clocked = cam_ope_is_over_clk(hw_mgr, ctx_data, + hw_mgr_clk_info); + + if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk && + over_clocked) { + rc = cam_ope_update_clk_overclk_free(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + } else if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->over_clked = 0; + rc = false; + } else if (hw_mgr_clk_info->curr_clk < hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->curr_clk = cam_ope_get_actual_clk_rate(hw_mgr, + ctx_data, hw_mgr_clk_info->base_clk); + rc = true; + } + + return rc; +} + +static bool cam_ope_update_clk_busy(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info, + struct cam_ope_clk_bw_request *clk_info, + uint32_t base_clk) +{ + uint32_t next_clk_level; + uint32_t actual_clk; + bool rc = false; + + /* 1. if current request frame cycles(fc) are more than previous + * frame fc + * Calculate the new base clock. + * if sum of base clocks are more than next available clk level + * Update clock rate, change curr_clk_rate to sum of base clock + * rates and make over_clked to zero + * else + * Update clock rate to next level, update curr_clk_rate and make + * overclked cnt to zero + * 2. if current fc is less than or equal to previous frame fc + * Still Bump up the clock to next available level + * if it is available, then update clock, make overclk cnt to + * zero. If the clock is already at highest clock rate then + * no need to update the clock + */ + ctx_data->clk_info.base_clk = base_clk; + hw_mgr_clk_info->over_clked = 0; + if (clk_info->frame_cycles > ctx_data->clk_info.curr_fc) { + cam_ope_calc_total_clk(hw_mgr, hw_mgr_clk_info, + ctx_data->ope_acquire.dev_type); + actual_clk = cam_ope_get_actual_clk_rate(hw_mgr, + ctx_data, base_clk); + if (hw_mgr_clk_info->base_clk > actual_clk) { + hw_mgr_clk_info->curr_clk = hw_mgr_clk_info->base_clk; + } else { + next_clk_level = cam_ope_get_next_clk_rate(hw_mgr, + ctx_data, hw_mgr_clk_info->curr_clk); + hw_mgr_clk_info->curr_clk = next_clk_level; + } + rc = true; + } else { + next_clk_level = + cam_ope_get_next_clk_rate(hw_mgr, ctx_data, + hw_mgr_clk_info->curr_clk); + if (hw_mgr_clk_info->curr_clk < next_clk_level) { + hw_mgr_clk_info->curr_clk = next_clk_level; + rc = true; + } + } + ctx_data->clk_info.curr_fc = clk_info->frame_cycles; + + return rc; +} + +static bool cam_ope_check_clk_update(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, int idx) +{ + bool busy = false, rc = false; + uint32_t base_clk; + struct cam_ope_clk_bw_request *clk_info; + uint64_t req_id; + struct cam_ope_clk_info *hw_mgr_clk_info; + + /* TODO: Have default clock rates update */ + /* TODO: Add support for debug clock updates */ + cam_ope_req_timer_reset(ctx_data); + cam_ope_device_timer_reset(hw_mgr); + hw_mgr_clk_info = &hw_mgr->clk_info; + req_id = ctx_data->req_list[idx]->request_id; + if (ctx_data->req_cnt > 1) + busy = true; + + CAM_DBG(CAM_OPE, "busy = %d req_id = %lld", busy, req_id); + + clk_info = &ctx_data->req_list[idx]->clk_info; + + /* Calculate base clk rate */ + base_clk = cam_ope_mgr_calc_base_clk( + clk_info->frame_cycles, clk_info->budget_ns); + ctx_data->clk_info.rt_flag = clk_info->rt_flag; + + if (busy) + rc = cam_ope_update_clk_busy(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + else + rc = cam_ope_update_clk_free(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + + CAM_DBG(CAM_OPE, "bc = %d cc = %d busy = %d overclk = %d uc = %d", + hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk, + busy, hw_mgr_clk_info->over_clked, rc); + + return rc; +} + +static int cam_ope_mgr_update_clk_rate(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data) +{ + struct cam_ope_dev_clk_update clk_upd_cmd; + int i; + + clk_upd_cmd.clk_rate = hw_mgr->clk_info.curr_clk; + + CAM_DBG(CAM_PERF, "clk_rate %u for dev_type %d", clk_upd_cmd.clk_rate, + ctx_data->ope_acquire.dev_type); + + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_CLK_UPDATE, + &clk_upd_cmd, sizeof(clk_upd_cmd)); + } + + return 0; +} + +static bool cam_ope_update_bw_v2(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info, + struct cam_ope_clk_bw_req_internal_v2 *clk_info, + bool busy) +{ + int i, path_index; + bool update_required = true; + + /* + * If current request bandwidth is different from previous frames, then + * recalculate bandwidth of all contexts of same hardware and update + * voting of bandwidth + */ + + for (i = 0; i < clk_info->num_paths; i++) + CAM_DBG(CAM_OPE, "clk_info camnoc = %lld busy = %d", + clk_info->axi_path[i].camnoc_bw, busy); + + if (clk_info->num_paths == ctx_data->clk_info.num_paths) { + update_required = false; + for (i = 0; i < clk_info->num_paths; i++) { + if ((clk_info->axi_path[i].transac_type == + ctx_data->clk_info.axi_path[i].transac_type) && + (clk_info->axi_path[i].path_data_type == + ctx_data->clk_info.axi_path[i].path_data_type) && + (clk_info->axi_path[i].camnoc_bw == + ctx_data->clk_info.axi_path[i].camnoc_bw) && + (clk_info->axi_path[i].mnoc_ab_bw == + ctx_data->clk_info.axi_path[i].mnoc_ab_bw)) { + continue; + } else { + update_required = true; + break; + } + } + } + + if (!update_required) { + CAM_DBG(CAM_OPE, + "Incoming BW hasn't changed, no update required"); + return false; + } + + if (busy) { + for (i = 0; i < clk_info->num_paths; i++) { + if (ctx_data->clk_info.axi_path[i].camnoc_bw > + clk_info->axi_path[i].camnoc_bw) + return false; + } + } + + /* + * Remove previous vote of this context from hw mgr first. + * hw_mgr_clk_info has all valid paths, with each path in its own index + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = + ctx_data->clk_info.axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_OPE_START_OFFSET; + + if (path_index >= CAM_OPE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_OPE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_OPE_START_OFFSET, + CAM_OPE_MAX_PER_PATH_VOTES); + continue; + } + + hw_mgr_clk_info->axi_path[path_index].camnoc_bw -= + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw -= + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw -= + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + } + + ctx_data->clk_info.num_paths = clk_info->num_paths; + + memcpy(&ctx_data->clk_info.axi_path[0], + &clk_info->axi_path[0], + clk_info->num_paths * sizeof(struct cam_axi_per_path_bw_vote)); + + /* + * Add new vote of this context in hw mgr. + * hw_mgr_clk_info has all paths, with each path in its own index + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = + ctx_data->clk_info.axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_OPE_START_OFFSET; + + if (path_index >= CAM_OPE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_OPE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_OPE_START_OFFSET, + CAM_OPE_MAX_PER_PATH_VOTES); + continue; + } + + hw_mgr_clk_info->axi_path[path_index].path_data_type = + ctx_data->clk_info.axi_path[i].path_data_type; + hw_mgr_clk_info->axi_path[path_index].transac_type = + ctx_data->clk_info.axi_path[i].transac_type; + hw_mgr_clk_info->axi_path[path_index].camnoc_bw += + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw += + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw += + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw += + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw += + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + CAM_DBG(CAM_OPE, + "Consolidate Path Vote : Dev[%s] i[%d] path_idx[%d] : [%s %s] [%lld %lld]", + ctx_data->ope_acquire.dev_name, + i, path_index, + cam_cpas_axi_util_trans_type_to_string( + hw_mgr_clk_info->axi_path[path_index].transac_type), + cam_cpas_axi_util_path_type_to_string( + hw_mgr_clk_info->axi_path[path_index].path_data_type), + hw_mgr_clk_info->axi_path[path_index].camnoc_bw, + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw); + } + + if (hw_mgr_clk_info->num_paths < ctx_data->clk_info.num_paths) + hw_mgr_clk_info->num_paths = ctx_data->clk_info.num_paths; + + return true; +} + +static bool cam_ope_check_bw_update(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, int idx) +{ + bool busy = false, bw_updated = false; + int i; + struct cam_ope_clk_bw_req_internal_v2 *clk_info_v2; + struct cam_ope_clk_info *hw_mgr_clk_info; + uint64_t req_id; + + hw_mgr_clk_info = &hw_mgr->clk_info; + req_id = ctx_data->req_list[idx]->request_id; + if (ctx_data->req_cnt > 1) + busy = true; + + clk_info_v2 = &ctx_data->req_list[idx]->clk_info_v2; + + bw_updated = cam_ope_update_bw_v2(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info_v2, busy); + + for (i = 0; i < hw_mgr_clk_info->num_paths; i++) { + CAM_DBG(CAM_OPE, + "Final path_type: %s, transac_type: %s, camnoc_bw = %lld mnoc_ab_bw = %lld, mnoc_ib_bw = %lld, device: %s", + cam_cpas_axi_util_path_type_to_string( + hw_mgr_clk_info->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + hw_mgr_clk_info->axi_path[i].transac_type), + hw_mgr_clk_info->axi_path[i].camnoc_bw, + hw_mgr_clk_info->axi_path[i].mnoc_ab_bw, + hw_mgr_clk_info->axi_path[i].mnoc_ib_bw, + ctx_data->ope_acquire.dev_name); + } + + return bw_updated; +} + +static int cam_ope_update_cpas_vote(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data) +{ + int i = 0; + struct cam_ope_clk_info *clk_info; + struct cam_ope_dev_bw_update bw_update = {{0}, {0}, 0, 0}; + + clk_info = &hw_mgr->clk_info; + + bw_update.ahb_vote.type = CAM_VOTE_DYNAMIC; + bw_update.ahb_vote.vote.freq = 0; + bw_update.ahb_vote_valid = false; + + + bw_update.axi_vote.num_paths = clk_info->num_paths; + memcpy(&bw_update.axi_vote.axi_path[0], + &clk_info->axi_path[0], + bw_update.axi_vote.num_paths * + sizeof(struct cam_axi_per_path_bw_vote)); + + bw_update.axi_vote_valid = true; + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_BW_UPDATE, + &bw_update, sizeof(bw_update)); + } + + return 0; +} + +static int cam_ope_mgr_ope_clk_update(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, int idx) +{ + int rc = 0; + + if (cam_ope_check_clk_update(hw_mgr, ctx_data, idx)) + rc = cam_ope_mgr_update_clk_rate(hw_mgr, ctx_data); + + if (cam_ope_check_bw_update(hw_mgr, ctx_data, idx)) + rc |= cam_ope_update_cpas_vote(hw_mgr, ctx_data); + + return rc; +} static void cam_ope_ctx_cdm_callback(uint32_t handle, void *userdata, enum cam_cdm_cb_status status, uint64_t cookie) @@ -289,6 +1113,7 @@ static void cam_ope_ctx_cdm_callback(uint32_t handle, void *userdata, handle, userdata, status, cookie, ope_req->request_id, ctx->ctx_id); cam_ope_req_timer_reset(ctx); + cam_ope_device_timer_reset(ope_hw_mgr); } else if (status == CAM_CDM_CB_STATUS_HW_RESUBMIT) { CAM_INFO(CAM_OPE, "After reset of CDM and OPE, reapply req"); rc = cam_ope_mgr_reapply_config(ope_hw_mgr, ctx, ope_req); @@ -1344,10 +2169,12 @@ static int cam_ope_mgr_acquire_hw(void *hw_priv, void *hw_acquire_args) } cam_ope_start_req_timer(ctx); + cam_ope_device_timer_start(hw_mgr); hw_mgr->ope_ctx_cnt++; ctx->context_priv = args->context_data; args->ctxt_to_hw_map = ctx; ctx->ctxt_event_cb = args->event_cb; + cam_ope_ctx_clk_info_init(ctx); ctx->ctx_state = OPE_CTX_STATE_ACQUIRED; mutex_unlock(&ctx->ctx_mutex); @@ -1502,29 +2329,26 @@ static int cam_ope_mgr_release_hw(void *hw_priv, void *hw_release_args) rc = cam_ope_mgr_release_ctx(hw_mgr, ctx_id); if (!hw_mgr->ope_ctx_cnt) { CAM_DBG(CAM_OPE, "Last Release"); - if (!hw_mgr->ope_ctx_cnt) { - for (i = 0; i < ope_hw_mgr->num_ope; i++) { - dev_intf = hw_mgr->ope_dev_intf[i]; - irq_cb.ope_hw_mgr_cb = NULL; - irq_cb.data = NULL; - rc = dev_intf->hw_ops.process_cmd( - hw_mgr->ope_dev_intf[i]->hw_priv, - OPE_HW_SET_IRQ_CB, - &irq_cb, sizeof(irq_cb)); - if (rc) - CAM_ERR(CAM_OPE, "IRQ dereg failed: %d", - rc); - } - for (i = 0; i < ope_hw_mgr->num_ope; i++) { - dev_intf = hw_mgr->ope_dev_intf[i]; - rc = dev_intf->hw_ops.deinit( - hw_mgr->ope_dev_intf[i]->hw_priv, - NULL, 0); - if (rc) - CAM_ERR(CAM_OPE, "deinit failed: %d", - rc); - } + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + dev_intf = hw_mgr->ope_dev_intf[i]; + irq_cb.ope_hw_mgr_cb = NULL; + irq_cb.data = NULL; + rc = dev_intf->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb)); + if (rc) + CAM_ERR(CAM_OPE, "IRQ dereg failed: %d", rc); } + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + dev_intf = hw_mgr->ope_dev_intf[i]; + rc = dev_intf->hw_ops.deinit( + hw_mgr->ope_dev_intf[i]->hw_priv, + NULL, 0); + if (rc) + CAM_ERR(CAM_OPE, "deinit failed: %d", rc); + } + cam_ope_device_timer_stop(hw_mgr); } mutex_unlock(&hw_mgr->hw_mgr_mutex); @@ -1533,6 +2357,123 @@ static int cam_ope_mgr_release_hw(void *hw_priv, void *hw_release_args) return rc; } +static int cam_ope_packet_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + struct cam_ope_clk_bw_request *clk_info; + struct ope_clk_bw_request_v2 *soc_req_v2; + struct cam_ope_clk_bw_req_internal_v2 *clk_info_v2; + struct ope_cmd_generic_blob *blob; + struct cam_ope_ctx *ctx_data; + uint32_t index; + size_t clk_update_size; + int rc = 0; + + if (!blob_data || (blob_size == 0)) { + CAM_ERR(CAM_OPE, "Invalid blob info %pK %d", blob_data, + blob_size); + return -EINVAL; + } + + blob = (struct ope_cmd_generic_blob *)user_data; + ctx_data = blob->ctx; + index = blob->req_idx; + + switch (blob_type) { + case OPE_CMD_GENERIC_BLOB_CLK_V2: + if (blob_size < sizeof(struct ope_clk_bw_request_v2)) { + CAM_ERR(CAM_OPE, "Mismatch blob size %d expected %lu", + blob_size, + sizeof(struct ope_clk_bw_request_v2)); + return -EINVAL; + } + + soc_req_v2 = (struct ope_clk_bw_request_v2 *)blob_data; + if (soc_req_v2->num_paths > CAM_OPE_MAX_PER_PATH_VOTES) { + CAM_ERR(CAM_OPE, "Invalid num paths: %d", + soc_req_v2->num_paths); + return -EINVAL; + } + + /* Check for integer overflow */ + if (soc_req_v2->num_paths != 1) { + if (sizeof(struct cam_axi_per_path_bw_vote) > + ((UINT_MAX - + sizeof(struct ope_clk_bw_request_v2)) / + (soc_req_v2->num_paths - 1))) { + CAM_ERR(CAM_OPE, + "Size exceeds limit paths:%u size per path:%lu", + soc_req_v2->num_paths - 1, + sizeof( + struct cam_axi_per_path_bw_vote)); + return -EINVAL; + } + } + + clk_update_size = sizeof(struct ope_clk_bw_request_v2) + + ((soc_req_v2->num_paths - 1) * + sizeof(struct cam_axi_per_path_bw_vote)); + if (blob_size < clk_update_size) { + CAM_ERR(CAM_OPE, "Invalid blob size: %u", + blob_size); + return -EINVAL; + } + + clk_info = &ctx_data->req_list[index]->clk_info; + clk_info_v2 = &ctx_data->req_list[index]->clk_info_v2; + + memcpy(clk_info_v2, soc_req_v2, clk_update_size); + + /* Use v1 structure for clk fields */ + clk_info->budget_ns = clk_info_v2->budget_ns; + clk_info->frame_cycles = clk_info_v2->frame_cycles; + clk_info->rt_flag = clk_info_v2->rt_flag; + + CAM_DBG(CAM_OPE, "budget=%llu, frame_cycle=%llu, rt_flag=%d", + clk_info_v2->budget_ns, clk_info_v2->frame_cycles, + clk_info_v2->rt_flag); + break; + + default: + CAM_WARN(CAM_OPE, "Invalid blob type %d", blob_type); + break; + } + return rc; +} + +static int cam_ope_process_generic_cmd_buffer( + struct cam_packet *packet, + struct cam_ope_ctx *ctx_data, + int32_t index, + uint64_t *io_buf_addr) +{ + int i, rc = 0; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct ope_cmd_generic_blob cmd_generic_blob; + + cmd_generic_blob.ctx = ctx_data; + cmd_generic_blob.req_idx = index; + cmd_generic_blob.io_buf_addr = io_buf_addr; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *) &packet->payload + packet->cmd_buf_offset/4); + + for (i = 0; i < packet->num_cmd_buf; i++) { + if (!cmd_desc[i].length) + continue; + + if (cmd_desc[i].meta_data != OPE_CMD_META_GENERIC_BLOB) + continue; + + rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i], + cam_ope_packet_generic_blob_handler, &cmd_generic_blob); + if (rc) + CAM_ERR(CAM_OPE, "Failed in processing blobs %d", rc); + } + + return rc; +} + static int cam_ope_mgr_prepare_hw_update(void *hw_priv, void *hw_prepare_update_args) { @@ -1641,6 +2582,13 @@ static int cam_ope_mgr_prepare_hw_update(void *hw_priv, goto end; } + rc = cam_ope_process_generic_cmd_buffer(packet, ctx_data, + request_idx, NULL); + if (rc) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_OPE, "Failed: %d", rc); + goto end; + } prepare_args->num_hw_update_entries = 1; prepare_args->hw_update_entries[0].addr = (uintptr_t)ctx_data->req_list[request_idx]->cdm_cmd; @@ -1755,6 +2703,8 @@ static int cam_ope_mgr_config_hw(void *hw_priv, void *hw_config_args) config_args->hw_update_entries->addr; cdm_cmd->cookie = ope_req->req_idx; + cam_ope_mgr_ope_clk_update(hw_mgr, ctx_data, ope_req->req_idx); + rc = cam_ope_mgr_enqueue_config(hw_mgr, ctx_data, config_args); if (rc) goto config_err; diff --git a/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.h b/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.h index bd88ceb3d7..f062c61706 100644 --- a/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.h +++ b/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.h @@ -52,6 +52,112 @@ #define OPE_MAX_CDM_BLS 16 +#define CAM_OPE_MAX_PER_PATH_VOTES 6 +#define CAM_OPE_BW_CONFIG_UNKNOWN 0 +#define CAM_OPE_BW_CONFIG_V2 2 + +#define CLK_HW_OPE 0x0 +#define CLK_HW_MAX 0x1 + +#define OPE_DEVICE_IDLE_TIMEOUT 400 + + +/** + * struct cam_ope_clk_bw_request_v2 + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @num_paths: Number of paths for per path bw vote + * @axi_path: Per path vote info for OPE + */ +struct cam_ope_clk_bw_req_internal_v2 { + uint64_t budget_ns; + uint32_t frame_cycles; + uint32_t rt_flag; + uint32_t num_paths; + struct cam_axi_per_path_bw_vote axi_path[CAM_OPE_MAX_PER_PATH_VOTES]; +}; + +/** + * struct cam_ope_clk_bw_request + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @uncompressed_bw: Bandwidth required to process frame + * @compressed_bw: Compressed bandwidth to process frame + */ +struct cam_ope_clk_bw_request { + uint64_t budget_ns; + uint32_t frame_cycles; + uint32_t rt_flag; + uint64_t uncompressed_bw; + uint64_t compressed_bw; +}; + +/** + * struct cam_ctx_clk_info + * @curr_fc: Context latest request frame cycles + * @rt_flag: Flag to indicate real time request + * @base_clk: Base clock to process the request + * @reserved: Reserved field + * @uncompressed_bw: Current bandwidth voting + * @compressed_bw: Current compressed bandwidth voting + * @clk_rate: Supported clock rates for the context + * @num_paths: Number of valid AXI paths + * @axi_path: ctx based per path bw vote + */ +struct cam_ctx_clk_info { + uint32_t curr_fc; + uint32_t rt_flag; + uint32_t base_clk; + uint32_t reserved; + uint64_t uncompressed_bw; + uint64_t compressed_bw; + int32_t clk_rate[CAM_MAX_VOTE]; + uint32_t num_paths; + struct cam_axi_per_path_bw_vote axi_path[CAM_OPE_MAX_PER_PATH_VOTES]; +}; + +/** + * struct ope_cmd_generic_blob + * @ctx: Current context info + * @req_info_idx: Index used for request + * @io_buf_addr: pointer to io buffer address + */ +struct ope_cmd_generic_blob { + struct cam_ope_ctx *ctx; + uint32_t req_idx; + uint64_t *io_buf_addr; +}; + +/** + * struct cam_ope_clk_info + * @base_clk: Base clock to process request + * @curr_clk: Current clock of hadrware + * @threshold: Threshold for overclk count + * @over_clked: Over clock count + * @uncompressed_bw: Current bandwidth voting + * @compressed_bw: Current compressed bandwidth voting + * @num_paths: Number of AXI vote paths + * @axi_path: Current per path bw vote info + * @hw_type: IPE/BPS device type + * @watch_dog: watchdog timer handle + * @watch_dog_reset_counter: Counter for watch dog reset + */ +struct cam_ope_clk_info { + uint32_t base_clk; + uint32_t curr_clk; + uint32_t threshold; + uint32_t over_clked; + uint64_t uncompressed_bw; + uint64_t compressed_bw; + uint32_t num_paths; + struct cam_axi_per_path_bw_vote axi_path[CAM_OPE_MAX_PER_PATH_VOTES]; + uint32_t hw_type; + struct cam_req_mgr_timer *watch_dog; + uint32_t watch_dog_reset_counter; +}; + /** * struct ope_cmd_work_data * @@ -273,6 +379,8 @@ struct ope_io_buf { * @ope_debug_buf: Debug buffer * @io_buf: IO config info of a request * @cdm_cmd: CDM command for OPE CDM + * @clk_info: Clock Info V1 + * @clk_info_v2: Clock Info V2 */ struct cam_ope_request { uint64_t request_id; @@ -290,6 +398,8 @@ struct cam_ope_request { struct ope_debug_buffer ope_debug_buf; struct ope_io_buf io_buf[OPE_MAX_BATCH_SIZE][OPE_MAX_IO_BUFS]; struct cam_cdm_bl_request *cdm_cmd; + struct cam_ope_clk_bw_request clk_info; + struct cam_ope_clk_bw_req_internal_v2 clk_info_v2; }; /** @@ -320,6 +430,10 @@ struct cam_ope_cdm { * @req_list: Request List * @ope_cdm: OPE CDM info * @req_watch_dog: Watchdog for requests + * @req_watch_dog_reset_counter: Request reset counter + * @clk_info: OPE Ctx clock info + * @clk_watch_dog: Clock watchdog + * @clk_watch_dog_reset_counter: Reset counter */ struct cam_ope_ctx { void *context_priv; @@ -336,6 +450,10 @@ struct cam_ope_ctx { struct cam_ope_request *req_list[CAM_CTX_REQ_MAX]; struct cam_ope_cdm ope_cdm; struct cam_req_mgr_timer *req_watch_dog; + uint32_t req_watch_dog_reset_counter; + struct cam_ctx_clk_info clk_info; + struct cam_req_mgr_timer *clk_watch_dog; + uint32_t clk_watch_dog_reset_counter; }; /** @@ -366,6 +484,7 @@ struct cam_ope_ctx { * @timer_work_data: Timer work data * @ope_dev_intf: OPE device interface * @cdm_reg_map: OPE CDM register map + * @clk_info: OPE clock Info for HW manager */ struct cam_ope_hw_mgr { int32_t open_cnt; @@ -394,6 +513,7 @@ struct cam_ope_hw_mgr { struct ope_clk_work_data *timer_work_data; struct cam_hw_intf *ope_dev_intf[OPE_DEV_MAX]; struct cam_soc_reg_map *cdm_reg_map[OPE_DEV_MAX][OPE_BASE_MAX]; + struct cam_ope_clk_info clk_info; }; #endif /* CAM_OPE_HW_MGR_H */ diff --git a/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.c b/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.c index d2af4ebd2f..bc16bded1f 100644 --- a/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.c +++ b/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.c @@ -1711,11 +1711,28 @@ int cam_ope_process_cmd(void *device_priv, uint32_t cmd_type, struct cam_ope_dev_clk_update *clk_upd_cmd = (struct cam_ope_dev_clk_update *)cmd_args; + if (core_info->clk_enable == false) { + rc = cam_soc_util_clk_enable_default(soc_info, + CAM_SVS_VOTE); + if (rc) { + CAM_ERR(CAM_OPE, "Clock enable is failed"); + return rc; + } + core_info->clk_enable = true; + } + rc = cam_ope_update_clk_rate(soc_info, clk_upd_cmd->clk_rate); if (rc) CAM_ERR(CAM_OPE, "Failed to update clk: %d", rc); } break; + case OPE_HW_CLK_DISABLE: { + if (core_info->clk_enable == true) + cam_soc_util_clk_disable_default(soc_info); + + core_info->clk_enable = false; + } + break; case OPE_HW_BW_UPDATE: { struct cam_ope_dev_bw_update *cpas_vote = cmd_args; diff --git a/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev_intf.h b/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev_intf.h index 462752bd7f..9e35fb8061 100644 --- a/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev_intf.h +++ b/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev_intf.h @@ -12,20 +12,22 @@ #include "cam_cpas_api.h" -#define OPE_HW_INIT 0x1 -#define OPE_HW_DEINIT 0x2 -#define OPE_HW_ACQUIRE 0x3 -#define OPE_HW_RELEASE 0x4 -#define OPE_HW_START 0x5 -#define OPE_HW_STOP 0x6 -#define OPE_HW_FLUSH 0x7 -#define OPE_HW_PREPARE 0x8 -#define OPE_HW_ISR 0x9 -#define OPE_HW_PROBE 0xA -#define OPE_HW_CLK_UPDATE 0xB -#define OPE_HW_BW_UPDATE 0xC -#define OPE_HW_RESET 0xD -#define OPE_HW_SET_IRQ_CB 0xE +#define OPE_HW_INIT 0x1 +#define OPE_HW_DEINIT 0x2 +#define OPE_HW_ACQUIRE 0x3 +#define OPE_HW_RELEASE 0x4 +#define OPE_HW_START 0x5 +#define OPE_HW_STOP 0x6 +#define OPE_HW_FLUSH 0x7 +#define OPE_HW_PREPARE 0x8 +#define OPE_HW_ISR 0x9 +#define OPE_HW_PROBE 0xA +#define OPE_HW_CLK_UPDATE 0xB +#define OPE_HW_BW_UPDATE 0xC +#define OPE_HW_RESET 0xD +#define OPE_HW_SET_IRQ_CB 0xE +#define OPE_HW_CLK_DISABLE 0xF +#define OPE_HW_CLK_ENABLE 0x10 /** * struct cam_ope_dev_probe diff --git a/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.c b/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.c index 7f66abfe45..ca83bb31db 100644 --- a/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.c +++ b/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "ope_soc.h" #include "cam_soc_util.h" #include "cam_debug_util.h" diff --git a/include/uapi/camera/media/cam_cpas.h b/include/uapi/camera/media/cam_cpas.h index 2a44d93497..d78b384ce7 100644 --- a/include/uapi/camera/media/cam_cpas.h +++ b/include/uapi/camera/media/cam_cpas.h @@ -52,6 +52,15 @@ #define CAM_AXI_PATH_DATA_OPE_MAX_OFFSET \ (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 31) +#define CAM_AXI_PATH_DATA_OPE_START_OFFSET 64 +#define CAM_AXI_PATH_DATA_OPE_RD_IN (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 0) +#define CAM_AXI_PATH_DATA_OPE_RD_REF (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 1) +#define CAM_AXI_PATH_DATA_OPE_WR_VID (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 2) +#define CAM_AXI_PATH_DATA_OPE_WR_DISP (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 3) +#define CAM_AXI_PATH_DATA_OPE_WR_REF (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 4) +#define CAM_AXI_PATH_DATA_OPE_MAX_OFFSET \ + (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 31) + #define CAM_AXI_PATH_DATA_ALL 256 /**