diff --git a/drivers/cam_core/cam_hw_mgr_intf.h b/drivers/cam_core/cam_hw_mgr_intf.h index 2389327863..9b6942acab 100644 --- a/drivers/cam_core/cam_hw_mgr_intf.h +++ b/drivers/cam_core/cam_hw_mgr_intf.h @@ -243,14 +243,16 @@ struct cam_hw_stream_setttings { /** * struct cam_hw_config_args - Payload for config command * - * @ctxt_to_hw_map: HW context from the acquire - * @num_hw_update_entries: Number of hardware update entries - * @hw_update_entries: Hardware update list - * @out_map_entries: Out map info - * @num_out_map_entries: Number of out map entries - * @priv: Private pointer - * @request_id: Request ID - * @reapply True if reapplying after bubble + * @ctxt_to_hw_map: HW context from the acquire + * @num_hw_update_entries: Number of hardware update entries + * @hw_update_entries: Hardware update list + * @out_map_entries: Out map info + * @num_out_map_entries: Number of out map entries + * @priv: Private pointer + * @request_id: Request ID + * @reapply: True if reapplying after bubble + * @cdm_reset_before_apply: True is need to reset CDM before re-apply bubble + * request * */ struct cam_hw_config_args { @@ -263,6 +265,7 @@ struct cam_hw_config_args { uint64_t request_id; bool init_packet; bool reapply; + bool cdm_reset_before_apply; }; /** diff --git a/drivers/cam_isp/cam_isp_context.c b/drivers/cam_isp/cam_isp_context.c index 890520c78e..998a9fa77e 100644 --- a/drivers/cam_isp/cam_isp_context.c +++ b/drivers/cam_isp/cam_isp_context.c @@ -823,6 +823,8 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list( req_isp->bubble_detected = false; list_del_init(&req->list); atomic_set(&ctx_isp->process_bubble, 0); + req_isp->cdm_reset_before_apply = false; + ctx_isp->bubble_frame_cnt = 0; if (buf_done_req_id <= ctx->last_flush_req) { for (i = 0; i < req_isp->num_fence_map_out; i++) @@ -855,6 +857,7 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list( list_del_init(&req->list); list_add_tail(&req->list, &ctx->free_req_list); req_isp->reapply = false; + req_isp->cdm_reset_before_apply = false; CAM_DBG(CAM_REQ, "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u", @@ -1551,6 +1554,9 @@ static int __cam_isp_ctx_notify_sof_in_activated_state( struct cam_context *ctx = ctx_isp->base; struct cam_ctx_request *req; struct cam_isp_ctx_req *req_isp; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + uint64_t last_cdm_done_req = 0; struct cam_isp_hw_epoch_event_data *epoch_done_event_data = (struct cam_isp_hw_epoch_event_data *)evt_data; @@ -1561,6 +1567,82 @@ static int __cam_isp_ctx_notify_sof_in_activated_state( ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta; + if (atomic_read(&ctx_isp->process_bubble)) { + if (list_empty(&ctx->active_req_list)) { + CAM_ERR(CAM_ISP, + "No available active req in bubble"); + atomic_set(&ctx_isp->process_bubble, 0); + ctx_isp->bubble_frame_cnt = 0; + rc = -EINVAL; + return rc; + } + + if (ctx_isp->last_sof_timestamp == + ctx_isp->sof_timestamp_val) { + CAM_DBG(CAM_ISP, + "Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld", + ctx_isp->sof_timestamp_val); + goto notify_only; + } + + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + if (ctx_isp->bubble_frame_cnt >= 1 && + req_isp->bubble_detected) { + hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = + CAM_ISP_HW_MGR_GET_LAST_CDM_DONE; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd( + ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed"); + return rc; + } + + last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done; + CAM_DBG(CAM_ISP, "last_cdm_done req: %d", + last_cdm_done_req); + + if (last_cdm_done_req >= req->request_id) { + CAM_DBG(CAM_ISP, + "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done", + req->request_id); + ctx_isp->bubble_frame_cnt = 0; + } else { + CAM_DBG(CAM_ISP, + "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay", + req->request_id); + req_isp->num_acked = 0; + ctx_isp->bubble_frame_cnt = 0; + req_isp->bubble_detected = false; + req_isp->cdm_reset_before_apply = true; + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + atomic_set(&ctx_isp->process_bubble, 0); + ctx_isp->active_req_cnt--; + CAM_DBG(CAM_REQ, + "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u", + req->request_id, + ctx_isp->active_req_cnt, ctx->ctx_id); + } + } else if (req_isp->bubble_detected) { + ctx_isp->bubble_frame_cnt++; + CAM_DBG(CAM_ISP, + "Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld", + req->request_id, + ctx_isp->bubble_frame_cnt); + } else { + CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld", + req->request_id); + } + } + +notify_only: /* * notify reqmgr with sof signal. Note, due to scheduling delay * we can run into situation that two active requests has already @@ -1611,7 +1693,7 @@ static int __cam_isp_ctx_notify_sof_in_activated_state( ctx->ctx_id); rc = -EFAULT; } - + ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val; return 0; } @@ -1776,6 +1858,7 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp, req_isp = (struct cam_isp_ctx_req *)req->req_priv; req_isp->bubble_detected = true; req_isp->reapply = true; + req_isp->cdm_reset_before_apply = false; CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld", ctx->ctx_id, req_isp->bubble_report, req->request_id); @@ -1990,6 +2073,7 @@ static int __cam_isp_ctx_epoch_in_bubble_applied( CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld", ctx->ctx_id, req_isp->bubble_report, req->request_id); req_isp->reapply = true; + req_isp->cdm_reset_before_apply = false; if (req_isp->bubble_report && ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) { @@ -2914,14 +2998,10 @@ static int __cam_isp_ctx_apply_req_in_activated_state( cfg.priv = &req_isp->hw_update_data; cfg.init_packet = 0; cfg.reapply = req_isp->reapply; + cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply; - rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, - &cfg); - if (rc) { - CAM_ERR_RATE_LIMIT(CAM_ISP, - "ctx_id:%d ,Can not apply the configuration", - ctx->ctx_id); - } else { + rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (!rc) { spin_lock_bh(&ctx->lock); ctx_isp->substate_activated = next_state; ctx_isp->last_applied_req_id = apply->request_id; @@ -2937,6 +3017,22 @@ static int __cam_isp_ctx_apply_req_in_activated_state( req->request_id); __cam_isp_ctx_update_event_record(ctx_isp, CAM_ISP_CTX_EVENT_APPLY, req); + } else if (rc == -EALREADY) { + spin_lock_bh(&ctx->lock); + req_isp->bubble_detected = true; + req_isp->cdm_reset_before_apply = false; + atomic_set(&ctx_isp->process_bubble, 1); + list_del_init(&req->list); + list_add(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + spin_unlock_bh(&ctx->lock); + CAM_DBG(CAM_REQ, + "move request %lld to active list(cnt = %d), ctx %u", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id); + } else { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d", + ctx->ctx_id, apply->request_id, rc); } end: return rc; @@ -3274,6 +3370,7 @@ static int __cam_isp_ctx_flush_req(struct cam_context *ctx, } } req_isp->reapply = false; + req_isp->cdm_reset_before_apply = false; list_del_init(&req->list); list_add_tail(&req->list, &ctx->free_req_list); } @@ -3373,6 +3470,7 @@ static int __cam_isp_ctx_flush_req_in_top_state( } end: + ctx_isp->bubble_frame_cnt = 0; atomic_set(&ctx_isp->process_bubble, 0); atomic_set(&ctx_isp->rxd_epoch, 0); return rc; @@ -3638,6 +3736,7 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied( CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld", ctx->ctx_id, req_isp->bubble_report, req->request_id); req_isp->reapply = true; + req_isp->cdm_reset_before_apply = false; if (req_isp->bubble_report && ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) { @@ -4251,6 +4350,7 @@ static int __cam_isp_ctx_config_dev_in_top_state( req_isp->num_fence_map_in = cfg.num_in_map_entries; req_isp->num_acked = 0; req_isp->bubble_detected = false; + req_isp->cdm_reset_before_apply = false; req_isp->hw_update_data.packet = packet; for (i = 0; i < req_isp->num_fence_map_out; i++) { @@ -5008,6 +5108,7 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx, start_isp.hw_config.priv = &req_isp->hw_update_data; start_isp.hw_config.init_packet = 1; start_isp.hw_config.reapply = 0; + start_isp.hw_config.cdm_reset_before_apply = false; ctx_isp->last_applied_req_id = req->request_id; @@ -5021,6 +5122,7 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx, ctx_isp->frame_id = 0; ctx_isp->active_req_cnt = 0; ctx_isp->reported_req_id = 0; + ctx_isp->bubble_frame_cnt = 0; ctx_isp->substate_activated = ctx_isp->rdi_only_context ? CAM_ISP_CTX_ACTIVATED_APPLIED : (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH : @@ -5200,6 +5302,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock( ctx_isp->reported_req_id = 0; ctx_isp->last_applied_req_id = 0; ctx_isp->req_info.last_bufdone_req_id = 0; + ctx_isp->bubble_frame_cnt = 0; atomic_set(&ctx_isp->process_bubble, 0); atomic_set(&ctx_isp->rxd_epoch, 0); atomic64_set(&ctx_isp->state_monitor_head, -1); @@ -5715,6 +5818,7 @@ int cam_isp_context_init(struct cam_isp_context *ctx, ctx->use_frame_header_ts = false; ctx->active_req_cnt = 0; ctx->reported_req_id = 0; + ctx->bubble_frame_cnt = 0; ctx->req_info.last_bufdone_req_id = 0; ctx->hw_ctx = NULL; diff --git a/drivers/cam_isp/cam_isp_context.h b/drivers/cam_isp/cam_isp_context.h index 8ee0ff1d52..a0c9f7bc54 100644 --- a/drivers/cam_isp/cam_isp_context.h +++ b/drivers/cam_isp/cam_isp_context.h @@ -138,21 +138,23 @@ struct cam_isp_ctx_irq_ops { /** * struct cam_isp_ctx_req - ISP context request object * - * @base: Common request object ponter - * @cfg: ISP hardware configuration array - * @num_cfg: Number of ISP hardware configuration entries - * @fence_map_out: Output fence mapping array - * @num_fence_map_out: Number of the output fence map - * @fence_map_in: Input fence mapping array - * @num_fence_map_in: Number of input fence map - * @num_acked: Count to track acked entried for output. - * If count equals the number of fence out, it means - * the request has been completed. - * @bubble_report: Flag to track if bubble report is active on - * current request - * @hw_update_data: HW update data for this request - * @event_timestamp: Timestamp for different stage of request - * @reapply: True if reapplying after bubble + * @base: Common request object ponter + * @cfg: ISP hardware configuration array + * @num_cfg: Number of ISP hardware configuration entries + * @fence_map_out: Output fence mapping array + * @num_fence_map_out: Number of the output fence map + * @fence_map_in: Input fence mapping array + * @num_fence_map_in: Number of input fence map + * @num_acked: Count to track acked entried for output. + * If count equals the number of fence out, it means + * the request has been completed. + * @bubble_report: Flag to track if bubble report is active on + * current request + * @hw_update_data: HW update data for this request + * @event_timestamp: Timestamp for different stage of request + * @reapply: True if reapplying after bubble + * @cdm_reset_before_apply: For bubble re-apply when buf done not coming set + * to True * */ struct cam_isp_ctx_req { @@ -171,6 +173,7 @@ struct cam_isp_ctx_req { [CAM_ISP_CTX_EVENT_MAX]; bool bubble_detected; bool reapply; + bool cdm_reset_before_apply; }; /** @@ -242,6 +245,8 @@ struct cam_isp_context_event_record { * @subscribe_event: The irq event mask that CRM subscribes to, IFE * will invoke CRM cb at those event. * @last_applied_req_id: Last applied request id + * @last_sof_timestamp: SOF timestamp of the last frame + * @bubble_frame_cnt: Count of the frame after bubble * @state_monitor_head: Write index to the state monitoring array * @req_info Request id information about last buf done * @cam_isp_ctx_state_monitor: State monitoring array @@ -284,6 +289,8 @@ struct cam_isp_context { int64_t reported_req_id; uint32_t subscribe_event; int64_t last_applied_req_id; + uint64_t last_sof_timestamp; + uint32_t bubble_frame_cnt; atomic64_t state_monitor_head; struct cam_isp_context_state_monitor cam_isp_ctx_state_monitor[ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES]; diff --git a/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c index 40f1d24701..3811512209 100644 --- a/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c +++ b/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c @@ -2831,6 +2831,7 @@ void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata, complete_all(&ctx->config_done_complete); reg_dump_done = atomic_read(&ctx->cdm_done); atomic_set(&ctx->cdm_done, 1); + ctx->last_cdm_done_req = cookie; if ((g_ife_hw_mgr.debug_cfg.per_req_reg_dump) && (!reg_dump_done)) cam_ife_mgr_handle_reg_dump(ctx, @@ -3233,6 +3234,7 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args) if (cdm_acquire.id == CAM_CDM_IFE) ife_ctx->internal_cdm = true; atomic_set(&ife_ctx->cdm_done, 1); + ife_ctx->last_cdm_done_req = 0; acquire_args->support_consumed_addr = g_ife_hw_mgr.support_consumed_addr; @@ -3511,6 +3513,7 @@ static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args) ife_ctx->cdm_handle = cdm_acquire.handle; ife_ctx->cdm_id = cdm_acquire.id; atomic_set(&ife_ctx->cdm_done, 1); + ife_ctx->last_cdm_done_req = 0; acquire_args->ctxt_to_hw_map = ife_ctx; ife_ctx->ctx_in_use = 1; @@ -3847,6 +3850,7 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv, struct cam_ife_hw_mgr_ctx *ctx; struct cam_isp_prepare_hw_update_data *hw_update_data; unsigned long rem_jiffies = 0; + bool cdm_hang_detect = false; if (!hw_mgr_priv || !config_hw_args) { CAM_ERR(CAM_ISP, @@ -3882,6 +3886,31 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv, CAM_DBG(CAM_ISP, "Ctx[%pK][%d] : Applying Req %lld, init_packet=%d", ctx, ctx->ctx_index, cfg->request_id, cfg->init_packet); + if (cfg->reapply && cfg->cdm_reset_before_apply) { + if (ctx->last_cdm_done_req < cfg->request_id) { + cdm_hang_detect = + cam_cdm_detect_hang_error(ctx->cdm_handle); + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM callback not received for req: %lld, last_cdm_done_req: %lld, cdm_hang_detect: %d", + cfg->request_id, ctx->last_cdm_done_req, + cdm_hang_detect); + rc = cam_cdm_reset_hw(ctx->cdm_handle); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM reset unsuccessful for req: %lld. ctx: %d, rc: %d", + cfg->request_id, ctx->ctx_index, rc); + ctx->last_cdm_done_req = 0; + return rc; + } + } else { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM callback received, should wait for buf done for req: %lld", + cfg->request_id); + return -EALREADY; + } + ctx->last_cdm_done_req = 0; + } + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { if (hw_update_data->bw_config_valid[i] == true) { @@ -4779,6 +4808,7 @@ static int cam_ife_mgr_release_hw(void *hw_mgr_priv, ctx->is_fe_enabled = false; ctx->is_offline = false; ctx->pf_mid_found = false; + ctx->last_cdm_done_req = 0; atomic_set(&ctx->overflow_pending, 0); for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { ctx->sof_cnt[i] = 0; @@ -7004,6 +7034,10 @@ static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args) isp_hw_cmd_args->u.packet_op_code = CAM_ISP_PACKET_UPDATE_DEV; break; + case CAM_ISP_HW_MGR_GET_LAST_CDM_DONE: + isp_hw_cmd_args->u.last_cdm_done = + ctx->last_cdm_done_req; + break; default: CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x", hw_cmd_args->cmd_type); diff --git a/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h index f7f5461124..a0bbf78b19 100644 --- a/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h +++ b/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h @@ -85,6 +85,7 @@ struct cam_ife_hw_mgr_debug { * context * @cdm_done flag to indicate cdm has finished writing shadow * registers + * @last_cdm_done_req: Last cdm done request * @is_rdi_only_context flag to specify the context has only rdi resource * @config_done_complete indicator for configuration complete * @reg_dump_buf_desc: cmd buffer descriptors for reg dump @@ -138,6 +139,7 @@ struct cam_ife_hw_mgr_ctx { uint32_t eof_cnt[CAM_IFE_HW_NUM_MAX]; atomic_t overflow_pending; atomic_t cdm_done; + uint64_t last_cdm_done_req; uint32_t is_rdi_only_context; struct completion config_done_complete; uint32_t hw_version; diff --git a/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c b/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c index ebe69a1eac..646a575d73 100644 --- a/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c +++ b/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c @@ -1774,6 +1774,7 @@ void cam_tfe_cam_cdm_callback(uint32_t handle, void *userdata, (struct cam_tfe_hw_mgr_ctx *)hw_update_data->isp_mgr_ctx; complete_all(&ctx->config_done_complete); atomic_set(&ctx->cdm_done, 1); + ctx->last_cdm_done_req = cookie; if (g_tfe_hw_mgr.debug_cfg.per_req_reg_dump) cam_tfe_mgr_handle_reg_dump(ctx, hw_update_data->reg_dump_buf_desc, @@ -1891,6 +1892,7 @@ static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args) tfe_ctx->cdm_handle = cdm_acquire.handle; tfe_ctx->cdm_ops = cdm_acquire.ops; atomic_set(&tfe_ctx->cdm_done, 1); + tfe_ctx->last_cdm_done_req = 0; acquire_hw_info = (struct cam_isp_tfe_acquire_hw_info *) acquire_args->acquire_info; @@ -2127,6 +2129,7 @@ static int cam_tfe_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args) tfe_ctx->cdm_handle = cdm_acquire.handle; tfe_ctx->cdm_ops = cdm_acquire.ops; atomic_set(&tfe_ctx->cdm_done, 1); + tfe_ctx->last_cdm_done_req = 0; isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info; @@ -2440,6 +2443,7 @@ static int cam_tfe_mgr_config_hw(void *hw_mgr_priv, struct cam_cdm_bl_request *cdm_cmd; struct cam_tfe_hw_mgr_ctx *ctx; struct cam_isp_prepare_hw_update_data *hw_update_data; + bool cdm_hang_detect = false; if (!hw_mgr_priv || !config_hw_args) { CAM_ERR(CAM_ISP, "Invalid arguments"); @@ -2463,6 +2467,31 @@ static int cam_tfe_mgr_config_hw(void *hw_mgr_priv, hw_update_data = (struct cam_isp_prepare_hw_update_data *) cfg->priv; hw_update_data->isp_mgr_ctx = ctx; + if (cfg->reapply && cfg->cdm_reset_before_apply) { + if (ctx->last_cdm_done_req < cfg->request_id) { + cdm_hang_detect = + cam_cdm_detect_hang_error(ctx->cdm_handle); + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM callback not received for req: %lld, last_cdm_done_req: %lld, cdm_hang_detect: %d", + cfg->request_id, ctx->last_cdm_done_req, + cdm_hang_detect); + rc = cam_cdm_reset_hw(ctx->cdm_handle); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM reset unsuccessful for req: %lld, ctx: %d, rc: %d", + cfg->request_id, ctx->ctx_index, rc); + ctx->last_cdm_done_req = 0; + return rc; + } + } else { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM callback received, should wait for buf done for req: %lld", + cfg->request_id); + return -EALREADY; + } + ctx->last_cdm_done_req = 0; + } + for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) { if (hw_update_data->bw_config_valid[i] == true) { @@ -3426,6 +3455,7 @@ static int cam_tfe_mgr_release_hw(void *hw_mgr_priv, ctx->is_tpg = false; ctx->num_reg_dump_buf = 0; ctx->res_list_tpg.res_type = CAM_ISP_RESOURCE_MAX; + ctx->last_cdm_done_req = 0; atomic_set(&ctx->overflow_pending, 0); for (i = 0; i < ctx->last_submit_bl_cmd.bl_count; i++) { @@ -4718,6 +4748,10 @@ static int cam_tfe_mgr_cmd(void *hw_mgr_priv, void *cmd_args) isp_hw_cmd_args->u.packet_op_code = CAM_ISP_TFE_PACKET_CONFIG_DEV; break; + case CAM_ISP_HW_MGR_GET_LAST_CDM_DONE: + isp_hw_cmd_args->u.last_cdm_done = + ctx->last_cdm_done_req; + break; default: CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x", hw_cmd_args->cmd_type); diff --git a/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h b/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h index 6ea17d74ec..b4b75d9110 100644 --- a/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h +++ b/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h @@ -70,6 +70,7 @@ struct cam_tfe_hw_mgr_debug { * context * @cdm_done flag to indicate cdm has finished writing shadow * registers + * @last_cdm_done_req: Last CDM done request * @is_rdi_only_context flag to specify the context has only rdi resource * @reg_dump_buf_desc: cmd buffer descriptors for reg dump * @num_reg_dump_buf: count of descriptors in reg_dump_buf_desc @@ -112,6 +113,7 @@ struct cam_tfe_hw_mgr_ctx { atomic_t overflow_pending; atomic_t cdm_done; + uint64_t last_cdm_done_req; uint32_t is_rdi_only_context; struct cam_cmd_buf_desc reg_dump_buf_desc[ CAM_REG_DUMP_MAX_BUF_ENTRIES]; diff --git a/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h index 6811777370..4508752dba 100644 --- a/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h +++ b/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h @@ -234,6 +234,7 @@ enum cam_isp_hw_mgr_command { CAM_ISP_HW_MGR_CMD_SOF_DEBUG, CAM_ISP_HW_MGR_CMD_CTX_TYPE, CAM_ISP_HW_MGR_GET_PACKET_OPCODE, + CAM_ISP_HW_MGR_GET_LAST_CDM_DONE, CAM_ISP_HW_MGR_CMD_MAX, }; @@ -247,11 +248,12 @@ enum cam_isp_ctx_type { /** * struct cam_isp_hw_cmd_args - Payload for hw manager command * - * @cmd_type HW command type - * @cmd_data command data - * @sof_irq_enable To debug if SOF irq is enabled - * @ctx_type RDI_ONLY, PIX and RDI, or FS2 - * @packet_op_code packet opcode + * @cmd_type: HW command type + * @cmd_data: Command data + * @sof_irq_enable: To debug if SOF irq is enabled + * @ctx_type: RDI_ONLY, PIX and RDI, or FS2 + * @packet_op_code: Packet opcode + * @last_cdm_done: Last cdm done request */ struct cam_isp_hw_cmd_args { uint32_t cmd_type; @@ -260,6 +262,7 @@ struct cam_isp_hw_cmd_args { uint32_t sof_irq_enable; uint32_t ctx_type; uint32_t packet_op_code; + uint64_t last_cdm_done; } u; };