diff --git a/drivers/cam_core/cam_context.c b/drivers/cam_core/cam_context.c index 68e6b28eac..b742152775 100644 --- a/drivers/cam_core/cam_context.c +++ b/drivers/cam_core/cam_context.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #include @@ -182,6 +182,33 @@ int cam_context_handle_crm_apply_req(struct cam_context *ctx, return rc; } +int cam_context_handle_crm_state_change(struct cam_context *ctx, + struct cam_req_mgr_request_change_state *state_info) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!state_info) { + CAM_ERR(CAM_CORE, "Invalid change state payload"); + return -EINVAL; + } + + if (ctx->state_machine[ctx->state].crm_ops.change_state) { + rc = ctx->state_machine[ctx->state].crm_ops.change_state(ctx, + state_info); + } else { + CAM_ERR(CAM_CORE, "No crm change state req in dev %d, state %d", + ctx->dev_hdl, ctx->state); + rc = -EPROTO; + } + + return rc; +} + int cam_context_handle_crm_notify_frame_skip( struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) diff --git a/drivers/cam_core/cam_context.h b/drivers/cam_core/cam_context.h index 1a3c66b821..1b3f1bb58f 100644 --- a/drivers/cam_core/cam_context.h +++ b/drivers/cam_core/cam_context.h @@ -129,6 +129,7 @@ struct cam_ctx_ioctl_ops { * @flush_req: Flush request to remove request ids * @process_evt: Handle event notification from CRM.(optional) * @dump_req: Dump information for the issue request + * @change_state: Change sub-state of hw context layer to bubble * */ struct cam_ctx_crm_ops { @@ -148,6 +149,8 @@ struct cam_ctx_crm_ops { struct cam_req_mgr_link_evt_data *evt_data); int (*dump_req)(struct cam_context *ctx, struct cam_req_mgr_dump_info *dump); + int (*change_state)(struct cam_context *ctx, + struct cam_req_mgr_request_change_state *change_state); }; @@ -327,6 +330,18 @@ int cam_context_handle_crm_unlink(struct cam_context *ctx, int cam_context_handle_crm_apply_req(struct cam_context *ctx, struct cam_req_mgr_apply_request *apply); +/** + * cam_context_handle_crm_state_change() + * + * @brief: Handle state change request + * + * @ctx: Object pointer for cam_context + * @state_info: State change request command payload + * + */ +int cam_context_handle_crm_state_change(struct cam_context *ctx, + struct cam_req_mgr_request_change_state *state_info); + /** * cam_context_handle_crm_notify_frame_skip() * diff --git a/drivers/cam_core/cam_node.c b/drivers/cam_core/cam_node.c index 5e5ea9eac0..11ba559748 100644 --- a/drivers/cam_core/cam_node.c +++ b/drivers/cam_core/cam_node.c @@ -619,6 +619,24 @@ static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush) return cam_context_handle_crm_flush_req(ctx, flush); } +static int __cam_node_crm_state_change_req( + struct cam_req_mgr_request_change_state *state_info) +{ + struct cam_context *ctx = NULL; + + if (!state_info) + return -EINVAL; + + ctx = (struct cam_context *) cam_get_device_priv(state_info->dev_hdl); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + state_info->dev_hdl); + return -EINVAL; + } + + return cam_context_handle_crm_state_change(ctx, state_info); +} + static int __cam_node_crm_process_evt( struct cam_req_mgr_link_evt_data *evt_data) { @@ -715,6 +733,7 @@ int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf, node->crm_node_intf.dump_req = __cam_node_crm_dump_req; node->crm_node_intf.notify_frame_skip = __cam_node_crm_notify_frame_skip; + node->crm_node_intf.change_state = __cam_node_crm_state_change_req; mutex_init(&node->list_mutex); INIT_LIST_HEAD(&node->free_ctx_list); diff --git a/drivers/cam_cust/cam_custom_context.c b/drivers/cam_cust/cam_custom_context.c index f9098e791e..a38748d392 100644 --- a/drivers/cam_cust/cam_custom_context.c +++ b/drivers/cam_cust/cam_custom_context.c @@ -549,6 +549,7 @@ static int __cam_custom_ctx_get_dev_info_in_acquired(struct cam_context *ctx, dev_info->dev_id = CAM_REQ_MGR_DEVICE_CUSTOM_HW; dev_info->p_delay = 1; dev_info->trigger = CAM_TRIGGER_POINT_SOF; + dev_info->sof_ts_cb = NULL; return 0; } diff --git a/drivers/cam_isp/cam_isp_context.c b/drivers/cam_isp/cam_isp_context.c index 825e99f1ff..3bf6429a5a 100644 --- a/drivers/cam_isp/cam_isp_context.c +++ b/drivers/cam_isp/cam_isp_context.c @@ -749,8 +749,8 @@ static void __cam_isp_ctx_send_sof_timestamp( req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta; CAM_DBG(CAM_ISP, - "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u", - request_id, ctx_isp->frame_id, + "link hdl 0x%x request id:%lld frame number:%lld SOF time stamp:%lld status:%u", + ctx_isp->base->link_hdl, request_id, ctx_isp->frame_id, ctx_isp->sof_timestamp_val, sof_event_status); if (cam_req_mgr_notify_message(&req_msg, @@ -1677,6 +1677,7 @@ static int __cam_isp_ctx_reg_upd_in_applied_state( } req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); + list_del_init(&req->list); req_isp = (struct cam_isp_ctx_req *) req->req_priv; @@ -1685,8 +1686,9 @@ static int __cam_isp_ctx_reg_upd_in_applied_state( ctx_isp->active_req_cnt++; request_id = req->request_id; CAM_DBG(CAM_REQ, - "move request %lld to active list(cnt = %d), ctx %u", - req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id); + "move request %lld to active list(cnt = %d), ctx %u link %x", + req->request_id, ctx_isp->active_req_cnt, + ctx->ctx_id, ctx->link_hdl); __cam_isp_ctx_update_event_record(ctx_isp, CAM_ISP_CTX_EVENT_RUP, req); } else { @@ -1828,6 +1830,7 @@ notify_only: notify.trigger = CAM_TRIGGER_POINT_SOF; notify.req_id = ctx_isp->req_info.last_bufdone_req_id; notify.sof_timestamp_val = ctx_isp->sof_timestamp_val; + notify.sof_boottime = ctx_isp->boot_timestamp; notify.trigger_id = ctx_isp->trigger_id; ctx->ctx_crm_intf->notify_trigger(¬ify); @@ -1949,8 +1952,9 @@ static int __cam_isp_ctx_sof_in_activated_state( __cam_isp_ctx_update_state_monitor_array(ctx_isp, CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id); - CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u", - ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id); + CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u link %x", + ctx_isp->frame_id, ctx_isp->sof_timestamp_val, + ctx->ctx_id, ctx->link_hdl); return rc; } @@ -1995,6 +1999,7 @@ end: static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp, void *evt_data) { + bool rc = false; uint64_t request_id = 0; uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS; struct cam_req_mgr_trigger_notify notify; @@ -2036,8 +2041,9 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp, req_isp->reapply = true; req_isp->cdm_reset_before_apply = false; - CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld", - ctx->ctx_id, req_isp->bubble_report, req->request_id); + CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d link %x Report Bubble flag %d req id:%lld", + ctx->ctx_id, ctx->link_hdl, + req_isp->bubble_report, req->request_id); if (req_isp->bubble_report && ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) { struct cam_req_mgr_error_notify notify; @@ -2051,12 +2057,25 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp, notify.trigger = CAM_TRIGGER_POINT_SOF; notify.frame_id = ctx_isp->frame_id; notify.sof_timestamp_val = ctx_isp->sof_timestamp_val; + notify.sof_boottime_val = ctx_isp->boot_timestamp; + notify.need_recovery = true; CAM_WARN_RATE_LIMIT(CAM_ISP, "Notify CRM about Bubble req %lld frame %lld, ctx %u", req->request_id, ctx_isp->frame_id, ctx->ctx_id); trace_cam_log_event("Bubble", "Rcvd epoch in applied state", req->request_id, ctx->ctx_id); - ctx->ctx_crm_intf->notify_err(¬ify); + rc = ctx->ctx_crm_intf->notify_err(¬ify); + + CAM_DBG(CAM_CRM, "Need bubble recovery %d", rc); + + if (rc) { + req_isp->bubble_detected = false; + req_isp->reapply = false; + CAM_DBG(CAM_ISP, "Disable bubble for ctx %d link %d", + ctx->ctx_id, ctx->link_hdl); + return 0; + } + atomic_set(&ctx_isp->process_bubble, 1); } else { req_isp->bubble_report = 0; @@ -2230,6 +2249,7 @@ static int __cam_isp_ctx_buf_done_in_bubble( static int __cam_isp_ctx_epoch_in_bubble_applied( struct cam_isp_context *ctx_isp, void *evt_data) { + int rc = 0; uint64_t request_id = 0; struct cam_req_mgr_trigger_notify notify; struct cam_ctx_request *req; @@ -2287,10 +2307,17 @@ static int __cam_isp_ctx_epoch_in_bubble_applied( notify.trigger = CAM_TRIGGER_POINT_SOF; notify.frame_id = ctx_isp->frame_id; notify.sof_timestamp_val = ctx_isp->sof_timestamp_val; + notify.sof_boottime_val = ctx_isp->boot_timestamp; + notify.need_recovery = true; CAM_WARN_RATE_LIMIT(CAM_REQ, "Notify CRM about Bubble req_id %llu frame %lld, ctx %u", req->request_id, ctx_isp->frame_id, ctx->ctx_id); - ctx->ctx_crm_intf->notify_err(¬ify); + rc = ctx->ctx_crm_intf->notify_err(¬ify); + if (rc) { + req_isp->bubble_detected = false; + req_isp->reapply = false; + return 0; + } atomic_set(&ctx_isp->process_bubble, 1); } else { req_isp->bubble_report = 0; @@ -3167,10 +3194,10 @@ static int __cam_isp_ctx_apply_req_in_activated_state( goto end; } - CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u", + CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u link %x", req->request_id, __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated), - ctx->ctx_id); + ctx->ctx_id, ctx->link_hdl); req_isp = (struct cam_isp_ctx_req *) req->req_priv; if (ctx_isp->active_req_cnt >= 2) { @@ -3253,6 +3280,81 @@ end: return rc; } +static int __cam_isp_ctx_change_substate( + struct cam_context *ctx, + struct cam_req_mgr_request_change_state *state_info) +{ + int rc = 0; + uint64_t request_id = 0; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp = NULL; + struct cam_ctx_request *bubble_req = NULL; + struct cam_isp_ctx_req *req_isp = NULL; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + if (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, + list); + if (req->request_id == state_info->req_id) { + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + req_isp->bubble_detected = true; + req_isp->reapply = true; + bubble_req = req; + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->active_req_list); + goto end; + } + } else { + CAM_ERR(CAM_ISP, "Ctx:%d No wait request", ctx->ctx_id); + } + + if (!bubble_req) { + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + if (req->request_id == state_info->req_id) { + req_isp = + (struct cam_isp_ctx_req *)req->req_priv; + req_isp->bubble_detected = true; + req_isp->reapply = true; + bubble_req = req; + break; + } + } + } + + if (!bubble_req) { + CAM_ERR(CAM_ISP, "Req %lld not in active list ctx : %d", + state_info->req_id, + ctx->ctx_id); + goto done; + } + +end: + + if (req_isp->bubble_report) + atomic_set(&ctx_isp->process_bubble, 1); + + if ((req->request_id > ctx_isp->reported_req_id) + && !req_isp->bubble_report) { + request_id = req->request_id; + ctx_isp->reported_req_id = request_id; + } + + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_ERROR); + + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE; + + CAM_DBG(CAM_ISP, "next Substate[%s] ctx : %d", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id); + +done: + return rc; +} + static int __cam_isp_ctx_apply_req_in_sof( struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) { @@ -3313,8 +3415,8 @@ static int __cam_isp_ctx_apply_req_in_bubble( ctx_isp->substate_activated)); rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply, CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED); - CAM_DBG(CAM_ISP, "new Substate[%s]", - __cam_isp_ctx_substate_val_to_type( + CAM_DBG(CAM_ISP, "ctx %d link %x new Substate[%s]", + ctx->ctx_id, ctx->link_hdl, __cam_isp_ctx_substate_val_to_type( ctx_isp->substate_activated)); if (rc) @@ -3704,6 +3806,7 @@ static int __cam_isp_ctx_flush_req_in_top_state( flush_req); ctx_isp->active_req_cnt = 0; + spin_unlock_bh(&ctx->lock); reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx; @@ -3753,13 +3856,16 @@ static struct cam_ctx_ops .apply_req = __cam_isp_ctx_apply_req_in_sof, .notify_frame_skip = __cam_isp_ctx_apply_default_req_settings, + .change_state = __cam_isp_ctx_change_substate, }, .irq_ops = NULL, }, /* APPLIED */ { .ioctl_ops = {}, - .crm_ops = {}, + .crm_ops = { + .change_state = __cam_isp_ctx_change_substate, + }, .irq_ops = NULL, }, /* EPOCH */ @@ -3769,6 +3875,7 @@ static struct cam_ctx_ops .apply_req = __cam_isp_ctx_apply_req_in_epoch, .notify_frame_skip = __cam_isp_ctx_apply_default_req_settings, + .change_state = __cam_isp_ctx_change_substate, }, .irq_ops = NULL, }, @@ -3779,13 +3886,16 @@ static struct cam_ctx_ops .apply_req = __cam_isp_ctx_apply_req_in_bubble, .notify_frame_skip = __cam_isp_ctx_apply_default_req_settings, + .change_state = __cam_isp_ctx_change_substate, }, .irq_ops = NULL, }, /* Bubble Applied */ { .ioctl_ops = {}, - .crm_ops = {}, + .crm_ops = { + .change_state = __cam_isp_ctx_change_substate, + }, .irq_ops = NULL, }, /* HW ERROR */ @@ -5506,6 +5616,33 @@ static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx, return rc; } +static int __cam_isp_ctx_get_isp_info(int32_t dev_hdl, void *data) +{ + int rc = 0; + struct cam_context *ctx; + struct cam_req_mgr_dev_info *isp_dev = data; + struct cam_isp_context *isp_ctx = NULL; + + ctx = (struct cam_context *)cam_get_device_priv(dev_hdl); + + isp_ctx = (struct cam_isp_context *)ctx->ctx_priv; + + isp_dev->state = isp_ctx->substate_activated; + isp_dev->timestamp = isp_ctx->sof_timestamp_val; + isp_dev->boot_time = isp_ctx->boot_timestamp; + isp_dev->frame_id = isp_ctx->frame_id; + + if ((isp_ctx->substate_activated == + CAM_ISP_CTX_ACTIVATED_APPLIED) || + (isp_ctx->substate_activated == + CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) + isp_dev->is_applied = true; + else + isp_dev->is_applied = false; + + return rc; +} + static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx, struct cam_req_mgr_device_info *dev_info) { @@ -5517,6 +5654,7 @@ static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx, dev_info->p_delay = 1; dev_info->trigger = CAM_TRIGGER_POINT_SOF; dev_info->trigger_on = true; + dev_info->sof_ts_cb = &__cam_isp_ctx_get_isp_info; return rc; } @@ -5574,6 +5712,8 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx, atomic_set(&ctx_isp->process_bubble, 0); atomic_set(&ctx_isp->rxd_epoch, 0); ctx_isp->frame_id = 0; + ctx_isp->sof_timestamp_val = 0; + ctx_isp->boot_timestamp = 0; ctx_isp->active_req_cnt = 0; ctx_isp->reported_req_id = 0; ctx_isp->bubble_frame_cnt = 0; @@ -5923,6 +6063,33 @@ static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx, return rc; } +static int __cam_isp_ctx_change_state_req(struct cam_context *ctx, + struct cam_req_mgr_request_change_state *state_info) +{ + int rc = 0; + struct cam_ctx_ops *ctx_ops = NULL; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + CAM_DBG(CAM_ISP, "Enter: changes state ctx id %d link 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated]; + if (ctx_ops->crm_ops.change_state) { + rc = ctx_ops->crm_ops.change_state(ctx, state_info); + } else { + CAM_WARN_RATE_LIMIT(CAM_ISP, + "No handle function in activated Substate[%s]", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated)); + rc = -EFAULT; + } + + if (rc) + CAM_WARN_RATE_LIMIT(CAM_ISP, + "changes state failed"); + return rc; +} + static int __cam_isp_ctx_apply_req(struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) { @@ -6109,6 +6276,7 @@ static struct cam_ctx_ops .flush_req = __cam_isp_ctx_flush_req_in_top_state, .process_evt = __cam_isp_ctx_process_evt, .dump_req = __cam_isp_ctx_dump_in_top_state, + .change_state = __cam_isp_ctx_change_state_req, }, .irq_ops = __cam_isp_ctx_handle_irq_in_activated, .pagefault_ops = cam_isp_context_dump_requests, @@ -6305,6 +6473,8 @@ int cam_isp_context_init(struct cam_isp_context *ctx, ctx->base = ctx_base; ctx->frame_id = 0; + ctx->sof_timestamp_val = 0; + ctx->boot_timestamp = 0; ctx->custom_enabled = false; ctx->use_frame_header_ts = false; ctx->use_default_apply = false; diff --git a/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h index c8faf282e8..9322cd4fe8 100644 --- a/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h +++ b/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h @@ -284,15 +284,17 @@ enum cam_isp_ctx_type { * @ctx_type: RDI_ONLY, PIX and RDI, or FS2 * @packet_op_code: Packet opcode * @last_cdm_done: Last cdm done request + * @cam_isp_hw_sof_event_data sof event timestamp */ struct cam_isp_hw_cmd_args { uint32_t cmd_type; void *cmd_data; union { - uint32_t sof_irq_enable; - uint32_t ctx_type; - uint32_t packet_op_code; - uint64_t last_cdm_done; + uint32_t sof_irq_enable; + uint32_t ctx_type; + uint32_t packet_op_code; + uint64_t last_cdm_done; + struct cam_isp_hw_sof_event_data sof_done_event_data; } u; }; diff --git a/drivers/cam_req_mgr/cam_req_mgr_core.c b/drivers/cam_req_mgr/cam_req_mgr_core.c index 06469b0515..68fe95964a 100644 --- a/drivers/cam_req_mgr/cam_req_mgr_core.c +++ b/drivers/cam_req_mgr/cam_req_mgr_core.c @@ -18,9 +18,14 @@ #include "cam_req_mgr_debug.h" #include "cam_common_util.h" +#define THRESHOLD_FACTOR 3 + static struct cam_req_mgr_core_device *g_crm_core_dev; static struct cam_req_mgr_core_link g_links[MAXIMUM_LINKS_PER_SESSION]; +static char *__cam_req_mgr_dev_handle_to_name( + int32_t dev_hdl, struct cam_req_mgr_core_link *link); + void cam_req_mgr_core_link_reset(struct cam_req_mgr_core_link *link) { uint32_t pd = 0; @@ -43,6 +48,7 @@ void cam_req_mgr_core_link_reset(struct cam_req_mgr_core_link *link) link->open_req_cnt = 0; link->last_flush_id = 0; link->initial_sync_req = -1; + link->modified_init_sync_req = -1; link->dual_trigger = false; link->trigger_cnt[0][0] = 0; link->trigger_cnt[0][1] = 0; @@ -51,13 +57,18 @@ void cam_req_mgr_core_link_reset(struct cam_req_mgr_core_link *link) link->in_msync_mode = false; link->retry_cnt = 0; link->is_shutdown = false; - link->initial_skip = true; + link->initial_skip = 0; link->sof_timestamp = 0; link->prev_sof_timestamp = 0; link->skip_init_frame = false; - link->num_sync_links = 0; link->last_sof_trigger_jiffies = 0; link->wq_congestion = false; + link->num_sync_link = 0; + link->activate_seq = -1; + link->bubble_skip = 0; + link->sync_frame_id = 0; + link->is_sync_req = true; + link->skip_sync_apply = false; atomic_set(&link->eof_event_cnt, 0); for (pd = 0; pd < CAM_PIPELINE_DELAY_MAX; pd++) { @@ -201,6 +212,33 @@ static int __cam_req_mgr_inject_delay( return rc; } +/** + * cam_req_mgr_get_dev_hdl() + * + * @brief : Finds device handle based on the device name + * @dev_name : Device name whose dev handle is to be found + * @link : Link on which the device is connected + * @return : device handle + * + */ +static int32_t cam_req_mgr_get_dev_hdl( + char *dev_name, struct cam_req_mgr_core_link *link) +{ + struct cam_req_mgr_connected_device *dev = NULL; + int i = 0; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + CAM_DBG(CAM_CRM, "dev name %s, iterate dev name %s", + dev_name, dev->dev_info.name); + if (strcmp(dev_name, dev->dev_info.name) == 0) + goto end; + } + return -EINVAL; +end: + return dev->dev_hdl; +} + /** * __cam_req_mgr_find_dev_name() * @@ -385,10 +423,10 @@ static int __cam_req_mgr_notify_error_on_link( * @return: 0 for success, negative for failure * */ -static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data) +static int __cam_req_mgr_traverse( + struct cam_req_mgr_traverse *traverse_data) { int rc = 0; - int32_t next_idx = traverse_data->idx; int32_t curr_idx = traverse_data->idx; struct cam_req_mgr_req_tbl *tbl; struct cam_req_mgr_apply *apply_data; @@ -404,24 +442,18 @@ static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data) tbl = traverse_data->tbl; apply_data = traverse_data->apply_data; slot = &tbl->slot[curr_idx]; - CAM_DBG(CAM_CRM, - "Enter pd %d idx %d state %d skip %d status %d skip_idx %d", - tbl->pd, curr_idx, tbl->slot[curr_idx].state, - tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status, - traverse_data->in_q->slot[curr_idx].skip_idx); - /* Check if req is ready or in skip mode or pd tbl is in skip mode */ - if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY || - traverse_data->in_q->slot[curr_idx].skip_idx == 1 || - tbl->skip_traverse > 0) { - if (tbl->next) { - __cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta, - tbl->num_slots); - traverse_data->idx = next_idx; - traverse_data->tbl = tbl->next; - rc = __cam_req_mgr_traverse(traverse_data); - } - if (rc >= 0) { + while (tbl) { + CAM_DBG(CAM_CRM, + "Enter pd %d idx %d state %d skip %d status %d skip_idx %d", + tbl->pd, curr_idx, tbl->slot[curr_idx].state, + tbl->skip_traverse, + traverse_data->in_q->slot[curr_idx].status, + traverse_data->in_q->slot[curr_idx].skip_idx); + if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY || + traverse_data->in_q->slot[curr_idx].skip_idx == 1 || + tbl->skip_traverse > 0) { + SET_SUCCESS_BIT(traverse_data->result, tbl->pd); if (traverse_data->validate_only == false) { @@ -432,33 +464,34 @@ static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data) apply_data[tbl->pd].idx = curr_idx; CAM_DBG(CAM_CRM, "req_id: %lld with pd of %d", - apply_data[tbl->pd].req_id, - apply_data[tbl->pd].pd); - /* - * If traverse is successful decrement - * traverse skip - */ + apply_data[tbl->pd].req_id, + apply_data[tbl->pd].pd); + if (tbl->skip_traverse > 0) { apply_data[tbl->pd].req_id = -1; tbl->skip_traverse--; } } + rc = 0; } else { - /* linked pd table is not ready for this traverse yet */ - return rc; + traverse_data->result_data.req_id = + CRM_GET_REQ_ID(traverse_data->in_q, curr_idx); + traverse_data->result_data.pd = tbl->pd; + traverse_data->result_data.masked_value = + (tbl->dev_mask & slot->req_ready_map); + SET_FAILURE_BIT(traverse_data->result, tbl->pd); + rc = -EAGAIN; + break; } - } else { - /* This pd table is not ready to proceed with asked idx */ - traverse_data->result_data.req_id = - CRM_GET_REQ_ID(traverse_data->in_q, curr_idx); - traverse_data->result_data.pd = tbl->pd; - traverse_data->result_data.masked_value = - (tbl->dev_mask & slot->req_ready_map); - SET_FAILURE_BIT(traverse_data->result, tbl->pd); - return -EAGAIN; + if (tbl->next) { + __cam_req_mgr_dec_idx(&curr_idx, tbl->pd_delta, + tbl->num_slots); + traverse_data->idx = curr_idx; + traverse_data->tbl = tbl->next; + } + tbl = tbl->next; } - - return 0; + return rc; } /** @@ -739,6 +772,51 @@ static int __cam_req_mgr_check_for_lower_pd_devices( return -EAGAIN; } +/** + * cam_req_mgr_dump_link_info() + * @brief : Dump link info on failure + * + * @dump_info : payload to dump data + */ +static void cam_req_mgr_dump_link_info( + struct cam_req_mgr_dump_link_data *dump_info) +{ + int i = 0; + struct cam_req_mgr_core_link *m_link; + struct cam_req_mgr_dev_info dev_data; + + m_link = dump_info->m_link; + dev_data = dump_info->dev_data; + + CAM_DBG(CAM_CRM, + "Master link req_id %lld 0x%x Slave link 0x%x req_id %lld frame id %lld csid time %lld boottime %lld", + m_link->link_hdl, dump_info->s_link->link_hdl, + dump_info->m_req_id, dump_info->s_req_id, + dev_data.frame_id, dev_data.timestamp, + dev_data.boot_time); + + CAM_DBG(CAM_CRM, + "Master link 0x%x csid time %lld bootime %lld frame id %lld sync frame id %lld init sync req %lld", + m_link->link_hdl, + m_link->sof_timestamp, + m_link->sof_boottime, + m_link->frame_id, + m_link->sync_frame_id, + m_link->initial_sync_req); + + for (i = 0; i < m_link->num_sync_link; i++) { + CAM_DBG(CAM_CRM, + "slave link 0x%x csid time %lld bootime %lld frame id %lld sync frame id %lld init sync req %lld", + m_link->sync_link[i]->link_hdl, + m_link->sync_link[i]->sof_timestamp, + m_link->sync_link[i]->sof_boottime, + m_link->sync_link[i]->frame_id, + m_link->sync_link[i]->sync_frame_id, + m_link->sync_link[i]->initial_sync_req); + } +} + + /** * __cam_req_mgr_check_next_req_slot() * @@ -1094,19 +1172,6 @@ static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link, traverse_data.validate_only = validate_only; traverse_data.open_req_cnt = link->open_req_cnt; - /* - * Some no-sync mode requests are processed after link config, - * then process the sync mode requests after no-sync mode requests - * are handled, the initial_skip should be false when processing - * the sync mode requests. - */ - if (link->initial_skip) { - CAM_DBG(CAM_CRM, - "Set initial_skip to false for link %x", - link->link_hdl); - link->initial_skip = false; - } - /* * Traverse through all pd tables, if result is success, * apply the settings @@ -1161,429 +1226,37 @@ static int32_t __cam_req_mgr_find_slot_for_req( } __cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots); } - if (i >= in_q->num_slots) + + if ((i >= in_q->num_slots) || (idx >= in_q->num_slots)) idx = -1; return idx; } /** - * __cam_req_mgr_check_sync_for_mslave() + * cam_req_mgr_get_device_info() * - * @brief : Processes requests during sync mode [master-slave] - * Here master corresponds to the link having a higher - * max_delay (pd) compared to the slave link. - * @link : Pointer to link whose input queue and req tbl are - * traversed through - * @slot : Pointer to the current slot being processed - * @return : 0 for success, negative for failure + * @brief : Get state and qtime info from device + * @link : pointer to link + * @dev_info : pointer to dev_info payload + * + * @return : void * */ -static int __cam_req_mgr_check_sync_for_mslave( + +static void cam_req_mgr_get_device_info( struct cam_req_mgr_core_link *link, - struct cam_req_mgr_core_link *sync_link, - struct cam_req_mgr_slot *slot) + struct cam_req_mgr_dev_info *dev_info) { - struct cam_req_mgr_slot *sync_slot = NULL; - int sync_slot_idx = 0, prev_idx, next_idx, rd_idx, sync_rd_idx, rc = 0; - int64_t req_id = 0, sync_req_id = 0; - int32_t sync_num_slots = 0; + int i; + struct cam_req_mgr_connected_device *dev; - if (!sync_link || !link) { - CAM_ERR(CAM_CRM, "Sync link or link is null"); - return -EINVAL; + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + dev_info->link_hdl = link->link_hdl; + if (dev && dev->dev_info.sof_ts_cb) + dev->dev_info.sof_ts_cb(dev->dev_hdl, dev_info); } - - req_id = slot->req_id; - - if (!sync_link->req.in_q) { - CAM_ERR(CAM_CRM, "Link hdl %x in_q is NULL", - sync_link->link_hdl); - return -EINVAL; - } - - sync_num_slots = sync_link->req.in_q->num_slots; - sync_rd_idx = sync_link->req.in_q->rd_idx; - - CAM_DBG(CAM_CRM, - "link_hdl %x req %lld frame_skip_flag %d open_req_cnt:%u initial_sync_req [%lld,%lld] is_master:%d", - link->link_hdl, req_id, link->sync_link_sof_skip, - link->open_req_cnt, link->initial_sync_req, - sync_link->initial_sync_req, link->is_master); - - if (sync_link->sync_link_sof_skip) { - CAM_DBG(CAM_CRM, - "No req applied on corresponding SOF on sync link: %x", - sync_link->link_hdl); - sync_link->sync_link_sof_skip = false; - return -EAGAIN; - } - - if (link->in_msync_mode && - sync_link->in_msync_mode && - (req_id - sync_link->req.in_q->slot[sync_rd_idx].req_id > - link->max_delay - sync_link->max_delay)) { - CAM_DBG(CAM_CRM, - "Req: %lld on link:%x need to hold for link: %x req:%d", - req_id, - link->link_hdl, - sync_link->link_hdl, - sync_link->req.in_q->slot[sync_rd_idx].req_id); - return -EINVAL; - } - - if (link->is_master) { - if (sync_link->initial_skip) { - CAM_DBG(CAM_CRM, "Link 0x%x [slave] not streamed on", - sync_link->link_hdl); - return -EAGAIN; - } - - rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true); - if (rc) { - CAM_DBG(CAM_CRM, - "Req: %lld [master] not ready on link: %x, rc=%d", - req_id, link->link_hdl, rc); - link->sync_link_sof_skip = true; - return rc; - } - - prev_idx = slot->idx; - __cam_req_mgr_dec_idx(&prev_idx, - (link->max_delay - sync_link->max_delay), - link->req.in_q->num_slots); - - rd_idx = sync_link->req.in_q->rd_idx; - sync_req_id = link->req.in_q->slot[prev_idx].req_id; - if ((sync_link->initial_sync_req != -1) && - (sync_link->initial_sync_req <= sync_req_id)) { - sync_slot_idx = __cam_req_mgr_find_slot_for_req( - sync_link->req.in_q, sync_req_id); - if (sync_slot_idx == -1) { - CAM_DBG(CAM_CRM, - "Prev Req: %lld [master] not found on link: %x [slave]", - sync_req_id, sync_link->link_hdl); - link->sync_link_sof_skip = true; - return -EINVAL; - } - - if ((sync_link->req.in_q->slot[sync_slot_idx].status != - CRM_SLOT_STATUS_REQ_APPLIED) && - (((sync_slot_idx - rd_idx + sync_num_slots) % - sync_num_slots) >= 1) && - (sync_link->req.in_q->slot[rd_idx].status != - CRM_SLOT_STATUS_REQ_APPLIED)) { - CAM_DBG(CAM_CRM, - "Prev Req: %lld [master] not next on link: %x [slave]", - sync_req_id, - sync_link->link_hdl); - return -EINVAL; - } - - rc = __cam_req_mgr_check_link_is_ready(sync_link, - sync_slot_idx, true); - if (rc && - (sync_link->req.in_q->slot[sync_slot_idx].status - != CRM_SLOT_STATUS_REQ_APPLIED)) { - CAM_DBG(CAM_CRM, - "Req: %lld not ready on [slave] link: %x, rc=%d", - sync_req_id, sync_link->link_hdl, rc); - link->sync_link_sof_skip = true; - return rc; - } - } - } else { - if (link->initial_skip) - link->initial_skip = false; - - rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true); - if (rc) { - CAM_DBG(CAM_CRM, - "Req: %lld [slave] not ready on link: %x, rc=%d", - req_id, link->link_hdl, rc); - link->sync_link_sof_skip = true; - return rc; - } - - next_idx = link->req.in_q->rd_idx; - rd_idx = sync_link->req.in_q->rd_idx; - __cam_req_mgr_inc_idx(&next_idx, - (sync_link->max_delay - link->max_delay), - link->req.in_q->num_slots); - - sync_req_id = link->req.in_q->slot[next_idx].req_id; - - if ((sync_link->initial_sync_req != -1) && - (sync_link->initial_sync_req <= sync_req_id)) { - sync_slot_idx = __cam_req_mgr_find_slot_for_req( - sync_link->req.in_q, sync_req_id); - if (sync_slot_idx == -1) { - CAM_DBG(CAM_CRM, - "Next Req: %lld [slave] not found on link: %x [master]", - sync_req_id, sync_link->link_hdl); - link->sync_link_sof_skip = true; - return -EINVAL; - } - - if ((sync_link->req.in_q->slot[sync_slot_idx].status != - CRM_SLOT_STATUS_REQ_APPLIED) && - (((sync_slot_idx - rd_idx + sync_num_slots) % - sync_num_slots) >= 1) && - (sync_link->req.in_q->slot[rd_idx].status != - CRM_SLOT_STATUS_REQ_APPLIED)) { - CAM_DBG(CAM_CRM, - "Next Req: %lld [slave] not next on link: %x [master]", - sync_req_id, sync_link->link_hdl); - return -EINVAL; - } - - sync_slot = &sync_link->req.in_q->slot[sync_slot_idx]; - rc = __cam_req_mgr_check_link_is_ready(sync_link, - sync_slot_idx, true); - if (rc && (sync_slot->status != - CRM_SLOT_STATUS_REQ_APPLIED)) { - CAM_DBG(CAM_CRM, - "Next Req: %lld [slave] not ready on [master] link: %x, rc=%d", - sync_req_id, sync_link->link_hdl, rc); - link->sync_link_sof_skip = true; - return rc; - } - } - } - - CAM_DBG(CAM_REQ, - "Req: %lld ready to apply on link: %x [validation successful]", - req_id, link->link_hdl); - - return 0; -} - - -/** - * __cam_req_mgr_check_sync_request_is_ready() - * - * @brief : processes requests during sync mode - * @link : pointer to link whose input queue and req tbl are - * traversed through - * @slot : pointer to the current slot being processed - * @return : 0 for success, negative for failure - * - */ -static int __cam_req_mgr_check_sync_req_is_ready( - struct cam_req_mgr_core_link *link, - struct cam_req_mgr_core_link *sync_link, - struct cam_req_mgr_slot *slot, - uint32_t trigger) -{ - struct cam_req_mgr_slot *sync_rd_slot = NULL; - int64_t req_id = 0, sync_req_id = 0; - int sync_slot_idx = 0, sync_rd_idx = 0, rc = 0; - int32_t sync_num_slots = 0; - uint64_t sync_frame_duration = 0; - uint64_t sof_timestamp_delta = 0; - uint64_t master_slave_diff = 0; - bool ready = true, sync_ready = true; - int slot_idx_diff = 0; - - if (!sync_link || !link) { - CAM_ERR(CAM_CRM, "Sync link null"); - return -EINVAL; - } - - req_id = slot->req_id; - - if (!sync_link->req.in_q) { - CAM_ERR(CAM_CRM, "Link hdl %x in_q is NULL", - sync_link->link_hdl); - return -EINVAL; - } - - sync_num_slots = sync_link->req.in_q->num_slots; - sync_rd_idx = sync_link->req.in_q->rd_idx; - sync_rd_slot = &sync_link->req.in_q->slot[sync_rd_idx]; - sync_req_id = sync_rd_slot->req_id; - - CAM_DBG(CAM_REQ, - "link_hdl %x sync link_hdl %x req %lld", - link->link_hdl, sync_link->link_hdl, req_id); - - if (sync_link->initial_skip) { - link->initial_skip = false; - CAM_DBG(CAM_CRM, - "sync link %x not streamed on", - sync_link->link_hdl); - return -EAGAIN; - } - - if (sync_link->prev_sof_timestamp) - sync_frame_duration = sync_link->sof_timestamp - - sync_link->prev_sof_timestamp; - else - sync_frame_duration = DEFAULT_FRAME_DURATION; - - sof_timestamp_delta = - link->sof_timestamp >= sync_link->sof_timestamp - ? link->sof_timestamp - sync_link->sof_timestamp - : sync_link->sof_timestamp - link->sof_timestamp; - - CAM_DBG(CAM_CRM, - "sync link %x last frame_duration is %d ns", - sync_link->link_hdl, sync_frame_duration); - - if (link->initial_skip) { - link->initial_skip = false; - - if ((link->sof_timestamp > sync_link->sof_timestamp) && - (sync_link->sof_timestamp > 0) && - (link->sof_timestamp - sync_link->sof_timestamp) < - (sync_frame_duration / 2)) { - /* - * If this frame sync with the previous frame of sync - * link, then we need to skip this frame, since the - * previous frame of sync link is also skipped. - */ - CAM_DBG(CAM_CRM, - "This frame sync with previous sync_link %x frame", - sync_link->link_hdl); - return -EAGAIN; - } else if (link->sof_timestamp <= sync_link->sof_timestamp) { - /* - * Sometimes, link receives the SOF event is eariler - * than sync link in IFE CSID side, but link's SOF - * event is processed later than sync link's, then - * we need to skip this SOF event since the sync - * link's SOF event is also skipped. - */ - CAM_DBG(CAM_CRM, - "The previous frame of sync link is skipped"); - return -EAGAIN; - } - } - - if (sync_link->sync_link_sof_skip) { - CAM_DBG(CAM_REQ, - "No req applied on corresponding SOF on sync link: %x", - sync_link->link_hdl); - sync_link->sync_link_sof_skip = false; - return -EAGAIN; - } - - sync_slot_idx = __cam_req_mgr_find_slot_for_req( - sync_link->req.in_q, req_id); - if (sync_slot_idx == -1) { - CAM_DBG(CAM_CRM, "Req: %lld not found on link: %x [other link]", - req_id, sync_link->link_hdl); - sync_ready = false; - return -EAGAIN; - } - - slot_idx_diff = (sync_slot_idx - sync_rd_idx + sync_num_slots) % - sync_num_slots; - if ((sync_link->req.in_q->slot[sync_slot_idx].status != - CRM_SLOT_STATUS_REQ_APPLIED) && - ((slot_idx_diff > 1) || - ((slot_idx_diff == 1) && - (sync_rd_slot->status != - CRM_SLOT_STATUS_REQ_APPLIED)))) { - CAM_DBG(CAM_CRM, - "Req: %lld [other link] not next req to be applied on link: %x", - req_id, sync_link->link_hdl); - return -EAGAIN; - } - - rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true); - if (rc) { - CAM_DBG(CAM_CRM, - "Req: %lld [My link] not ready on link: %x, rc=%d", - req_id, link->link_hdl, rc); - ready = false; - } - - if (sync_link->req.in_q) { - rc = __cam_req_mgr_check_link_is_ready(sync_link, - sync_slot_idx, true); - if (rc && (sync_link->req.in_q->slot[sync_slot_idx].status != - CRM_SLOT_STATUS_REQ_APPLIED)) { - CAM_DBG(CAM_CRM, - "Req: %lld not ready on link: %x, rc=%d", - req_id, sync_link->link_hdl, rc); - sync_ready = false; - } - } else { - CAM_ERR(CAM_CRM, "Link hdl %x in_q is NULL", - sync_link->link_hdl); - return -EINVAL; - } - - /* - * If both of them are ready or not ready, then just - * skip this sof and don't skip sync link next SOF. - */ - if (sync_ready != ready) { - CAM_DBG(CAM_CRM, - "Req: %lld ready %d sync_ready %d, ignore sync link next SOF", - req_id, ready, sync_ready); - - /* - * Only skip the frames if current frame sync with - * next frame of sync link. - */ - if (link->sof_timestamp - sync_link->sof_timestamp > - sync_frame_duration / 2) - link->sync_link_sof_skip = true; - return -EINVAL; - } else if (ready == false) { - CAM_DBG(CAM_CRM, - "Req: %lld not ready on link: %x", - req_id, link->link_hdl); - return -EINVAL; - } - - /* - * Do the self-correction when the frames are sync, - * we consider that the frames are synced if the - * difference of two SOF timestamp less than - * (sync_frame_duration / 5). - */ - master_slave_diff = sync_frame_duration; - do_div(master_slave_diff, 5); - if ((trigger == CAM_TRIGGER_POINT_SOF) && - (sync_link->sof_timestamp > 0) && - (sof_timestamp_delta < master_slave_diff) && - (sync_rd_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC)) { - - /* - * This means current frame should sync with next - * frame of sync link, then the request id of in - * rd slot of two links should be same. - */ - CAM_DBG(CAM_CRM, - "link %x req_id %lld, sync_link %x req_id %lld", - link->link_hdl, req_id, - sync_link->link_hdl, sync_req_id); - - if (req_id > sync_req_id) { - CAM_DBG(CAM_CRM, - "link %x too quickly, skip this frame", - link->link_hdl); - return -EAGAIN; - } else if (req_id < sync_req_id) { - CAM_DBG(CAM_CRM, - "sync link %x too quickly, skip next frame of sync link", - sync_link->link_hdl); - link->sync_link_sof_skip = true; - } else if (sync_link->req.in_q->slot[sync_slot_idx].status != - CRM_SLOT_STATUS_REQ_APPLIED) { - CAM_DBG(CAM_CRM, - "link %x other not applied", link->link_hdl); - return -EAGAIN; - } - } - CAM_DBG(CAM_REQ, - "Req: %lld ready to apply on link: %x [validation successful]", - req_id, link->link_hdl); - - return 0; } static int __cam_req_mgr_check_multi_sync_link_ready( @@ -1591,61 +1264,16 @@ static int __cam_req_mgr_check_multi_sync_link_ready( struct cam_req_mgr_slot *slot, uint32_t trigger) { - int i, rc = 0; + int rc = 0; if (link->state == CAM_CRM_LINK_STATE_IDLE) { CAM_ERR(CAM_CRM, "link hdl %x is in idle state", - link->link_hdl); + link->link_hdl); return -EINVAL; } - for (i = 0; i < link->num_sync_links; i++) { - if (link->sync_link[i]) { - if (link->sync_link[i]->state == - CAM_CRM_LINK_STATE_IDLE) { - CAM_ERR(CAM_CRM, "sync link hdl %x is idle", - link->sync_link[i]->link_hdl); - return -EINVAL; - } - if (link->max_delay == link->sync_link[i]->max_delay) { - rc = __cam_req_mgr_check_sync_req_is_ready( - link, link->sync_link[i], - slot, trigger); - if (rc < 0) { - CAM_DBG(CAM_CRM, "link %x not ready", - link->link_hdl); - return rc; - } - } else if (link->max_delay > - link->sync_link[i]->max_delay) { - link->is_master = true; - link->sync_link[i]->is_master = false; - rc = __cam_req_mgr_check_sync_for_mslave( - link, link->sync_link[i], slot); - if (rc < 0) { - CAM_DBG(CAM_CRM, "link%x not ready", - link->link_hdl); - return rc; - } - } else { - link->is_master = false; - link->sync_link[i]->is_master = true; - rc = __cam_req_mgr_check_sync_for_mslave( - link, link->sync_link[i], slot); - if (rc < 0) { - CAM_DBG(CAM_CRM, "link %x not ready", - link->link_hdl); - return rc; - } - } - } else { - CAM_ERR(CAM_REQ, "Sync link is null"); - return -EINVAL; - } - } - rc = __cam_req_mgr_inject_delay(link->req.l_tbl, - slot->idx, trigger); + slot->idx, trigger); if (rc < 0) { CAM_DBG(CAM_CRM, "Req: %lld needs to inject delay at %s", @@ -1730,7 +1358,7 @@ enum crm_req_eof_trigger_type __cam_req_mgr_check_for_eof( static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link, struct cam_req_mgr_trigger_notify *trigger_data) { - int rc = 0, idx, i; + int i = 0, rc = 0, idx; int reset_step = 0; uint32_t trigger = trigger_data->trigger; struct cam_req_mgr_slot *slot = NULL; @@ -1805,8 +1433,14 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link, if (slot->status != CRM_SLOT_STATUS_REQ_READY) { if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + if (!link->in_msync_mode) { + CAM_DBG(CAM_CRM, + "Settings master-slave sync mode for link 0x%x", + link->link_hdl); + link->in_msync_mode = true; + } rc = __cam_req_mgr_check_multi_sync_link_ready( - link, slot, trigger); + link, slot, trigger); } else { if (link->in_msync_mode) { CAM_DBG(CAM_CRM, @@ -1814,9 +1448,10 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link, link->link_hdl); link->in_msync_mode = false; link->initial_sync_req = -1; - for (i = 0; i < link->num_sync_links; i++) { - if (link->sync_link[i]) { - tmp_link = link->sync_link[i]; + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION - 1; + i++) { + tmp_link = link->sync_link[i]; + if (tmp_link) { tmp_link->initial_sync_req = -1; tmp_link->in_msync_mode = false; } @@ -1886,6 +1521,13 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link, } } + if (link->num_sync_link && + (link->initial_sync_req == slot->req_id)) { + link->sync_frame_id = trigger_data->frame_id; + CAM_DBG(CAM_CRM, "link %x sync frame %lld", + link->link_hdl, link->sync_frame_id); + } + rc = __cam_req_mgr_send_req(link, link->req.in_q, trigger, &dev); if (rc < 0) { /* Apply req failed retry at next sof */ @@ -1954,15 +1596,10 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link, link->link_hdl); idx = in_q->rd_idx; reset_step = link->max_delay; - - for (i = 0; i < link->num_sync_links; i++) { - if (link->sync_link[i]) { - if ((link->in_msync_mode) && - (link->sync_link[i]->max_delay > - reset_step)) - reset_step = + for (i = 0; i < link->num_sync_link - 1; i++) { + if (reset_step < link->sync_link[i]->max_delay) + reset_step = link->sync_link[i]->max_delay; - } } if (slot->req_id > 0) @@ -2521,8 +2158,9 @@ static void __cam_req_mgr_unreserve_link( } } - for (j = 0; j < MAXIMUM_LINKS_PER_SESSION - 1; j++) - link->sync_link[j] = NULL; + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION - 1; i++) + link->sync_link[i] = NULL; + session->num_links--; CAM_DBG(CAM_CRM, "Active session links (%d)", session->num_links); mutex_unlock(&session->lock); @@ -2652,13 +2290,6 @@ int cam_req_mgr_process_sched_req(void *priv, void *data) sched_req = (struct cam_req_mgr_sched_request *)&task_data->u; in_q = link->req.in_q; - CAM_DBG(CAM_CRM, - "link_hdl %x req_id %lld at slot %d sync_mode %d is_master %d exp_timeout_val %d ms", - sched_req->link_hdl, sched_req->req_id, - in_q->wr_idx, sched_req->sync_mode, - link->is_master, - sched_req->additional_timeout); - mutex_lock(&link->req.lock); slot = &in_q->slot[in_q->wr_idx]; @@ -2668,9 +2299,10 @@ int cam_req_mgr_process_sched_req(void *priv, void *data) slot->status = CRM_SLOT_STATUS_REQ_ADDED; slot->req_id = sched_req->req_id; - slot->sync_mode = sched_req->sync_mode; slot->skip_idx = 0; slot->recover = sched_req->bubble_enable; + slot->sync_mode = sched_req->sync_mode; + slot->real_sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC; if (sched_req->additional_timeout < 0) { CAM_WARN(CAM_CRM, "Requested timeout is invalid [%dms]", @@ -2694,16 +2326,63 @@ int cam_req_mgr_process_sched_req(void *priv, void *data) __cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots); if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { - if (link->initial_sync_req == -1) + if (link->initial_sync_req == -1) { link->initial_sync_req = slot->req_id; + link->modified_init_sync_req = slot->req_id; + } } else { link->initial_sync_req = -1; - for (i = 0; i < link->num_sync_links; i++) { - if (link->sync_link[i]) + for (i = 0; i < link->num_sync_link; i++) { + if (link->sync_link[i]) { link->sync_link[i]->initial_sync_req = -1; + link->modified_init_sync_req = -1; + } } } + CAM_DBG(CAM_CRM, + "link_hdl %x req_id %lld at slot %d schedule_mode %d slot mode %d is_master %d exp_timeout_val %d ms", + sched_req->link_hdl, sched_req->req_id, + in_q->wr_idx, sched_req->sync_mode, + slot->sync_mode, + link->is_master, + sched_req->additional_timeout); + /* + * This logic is to make first few requests in non-sync mode + * so that these requests should apply irrespective of threshold + * time condition as difference in sensor stream on may be more than + * threshold value at start. For sw sync logic to work, first few + * requests needs to apply in same frame duration. + */ + + if (link->is_sync_req && slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC + && ((link->initial_sync_req + 5) > slot->req_id)) { + slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC; + slot->real_sync_mode = CAM_REQ_MGR_SYNC_MODE_SYNC; + } else if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + if (link->is_sync_req && (link->initial_skip == 0)) { + link->is_sync_req = false; + link->initial_sync_req = slot->req_id + 1; + CAM_DBG(CAM_CRM, "Initial sync req %lld link %x", + link->initial_sync_req, link->link_hdl); + } + + if (link->initial_skip && link->initial_sync_req) { + link->initial_skip--; + slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC; + slot->real_sync_mode = CAM_REQ_MGR_SYNC_MODE_SYNC; + CAM_DBG(CAM_CRM, "Initial sync req %lld link %x", + link->initial_sync_req, link->link_hdl); + } + } + CAM_DBG(CAM_CRM, + "link_hdl %x req_id %lld at slot %d schedule_mode %d slot mode %d is_master %d exp_timeout_val %d ms init sync %d", + sched_req->link_hdl, sched_req->req_id, + in_q->wr_idx, sched_req->sync_mode, + slot->sync_mode, + link->is_master, + sched_req->additional_timeout, + link->initial_sync_req); mutex_unlock(&link->req.lock); end: @@ -2722,12 +2401,19 @@ end: */ int cam_req_mgr_process_add_req(void *priv, void *data) { - int rc = 0, i = 0, idx; + int rc = 0, i = 0, j = 0; + int idx, idx2; struct cam_req_mgr_add_request *add_req = NULL; struct cam_req_mgr_core_link *link = NULL; struct cam_req_mgr_connected_device *device = NULL; + struct cam_req_mgr_connected_device *device2 = NULL; struct cam_req_mgr_req_tbl *tbl = NULL; + struct cam_req_mgr_req_tbl *tbl2 = NULL; struct cam_req_mgr_tbl_slot *slot = NULL; + struct cam_req_mgr_tbl_slot *sync_slot = NULL; + int32_t sync_dev_hdl = -1; + char *dev_name = NULL; + struct cam_req_mgr_slot *link_slot = NULL; struct crm_task_payload *task_data = NULL; if (!data || !priv) { @@ -2755,6 +2441,7 @@ int cam_req_mgr_process_add_req(void *priv, void *data) rc = -EINVAL; goto end; } + /* * Go through request table and add * request id to proper table @@ -2774,6 +2461,7 @@ int cam_req_mgr_process_add_req(void *priv, void *data) goto end; } + link_slot = &link->req.in_q->slot[idx]; slot = &tbl->slot[idx]; slot->ops.is_applied = false; @@ -2784,6 +2472,62 @@ int cam_req_mgr_process_add_req(void *priv, void *data) add_req->req_id, slot->inject_delay_at_sof, device->dev_info.name); + + if (link_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + for (i = 0; i < link->num_sync_link; i++) { + mutex_lock(&link->sync_link[i]->req.lock); + idx2 = __cam_req_mgr_find_slot_for_req( + link->sync_link[i]->req.in_q, + add_req->req_id); + if (idx2 < 0) { + CAM_ERR(CAM_CRM, + "req %lld not found in in_q for dev %s on link 0x%x", + add_req->req_id, + device->dev_info.name, + link->sync_link[i]->link_hdl); + rc = -EBADSLT; + mutex_unlock( + &link->sync_link[i]->req.lock); + mutex_unlock(&link->req.lock); + goto end; + } + dev_name = __cam_req_mgr_dev_handle_to_name( + add_req->dev_hdl, link); + sync_dev_hdl = cam_req_mgr_get_dev_hdl(dev_name, + link->sync_link[i]); + for (j = 0; + j < link->sync_link[i]->num_devs; j++) { + device2 = &link->sync_link[i]->l_dev[j]; + CAM_ERR(CAM_CRM, "device hdl %x : sync link dev hdl %x", + device2->dev_hdl, sync_dev_hdl); + if (device2->dev_hdl == sync_dev_hdl) { + tbl2 = device2->pd_tbl; + break; + } + } + if (!tbl2) { + CAM_ERR_RATE_LIMIT(CAM_CRM, + "Sync link dev hdl %x not found", + sync_dev_hdl); + rc = -EINVAL; + mutex_unlock( + &link->sync_link[i]->req.lock); + mutex_unlock(&link->req.lock); + goto end; + } + sync_slot = &tbl2->slot[idx2]; + sync_slot->ops.dev_hdl = sync_dev_hdl; + sync_slot->inject_delay_at_sof = + (add_req->skip_at_sof & 0xFF); + CAM_DBG(CAM_CRM, + "link %x Req_id %llu injecting delay %llu frame at SOF by %s", + link->sync_link[i]->link_hdl, + add_req->req_id, + slot->inject_delay_at_sof, + device->dev_info.name); + mutex_unlock(&link->sync_link[i]->req.lock); + } + } } if ((add_req->skip_at_eof & 0xFF) > slot->inject_delay_at_eof) { @@ -2793,14 +2537,123 @@ int cam_req_mgr_process_add_req(void *priv, void *data) add_req->req_id, slot->inject_delay_at_eof, device->dev_info.name); + + if (link_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + for (i = 0; i < link->num_sync_link; i++) { + mutex_lock(&link->sync_link[i]->req.lock); + idx2 = __cam_req_mgr_find_slot_for_req( + link->sync_link[i]->req.in_q, + add_req->req_id); + if (idx2 < 0) { + CAM_ERR(CAM_CRM, + "req %lld not found in in_q for dev %s on link 0x%x", + add_req->req_id, + device->dev_info.name, + link->sync_link[i]->link_hdl); + rc = -EBADSLT; + mutex_unlock( + &link->sync_link[i]->req.lock); + mutex_unlock(&link->req.lock); + goto end; + } + dev_name = __cam_req_mgr_dev_handle_to_name( + add_req->dev_hdl, link); + sync_dev_hdl = cam_req_mgr_get_dev_hdl(dev_name, + link->sync_link[i]); + for (j = 0; + j < link->sync_link[i]->num_devs; j++) { + device2 = &link->sync_link[i]->l_dev[j]; + CAM_ERR(CAM_CRM, "device hdl %x : sync link dev hdl %x", + device2->dev_hdl, sync_dev_hdl); + if (device2->dev_hdl == sync_dev_hdl) { + tbl2 = device2->pd_tbl; + break; + } + } + if (!tbl2) { + CAM_ERR_RATE_LIMIT(CAM_CRM, + "Synclink dev hdl %x not found", + sync_dev_hdl); + rc = -EINVAL; + mutex_unlock( + &link->sync_link[i]->req.lock); + mutex_unlock(&link->req.lock); + goto end; + } + sync_slot = &tbl2->slot[idx2]; + sync_slot->ops.dev_hdl = sync_dev_hdl; + sync_slot->inject_delay_at_eof = + (add_req->skip_at_eof & 0xFF); + CAM_DBG(CAM_CRM, + "link %x Req_id %llu injecting delay %llu frame at EOF by %s", + link->sync_link[i]->link_hdl, + add_req->req_id, + slot->inject_delay_at_sof, + device->dev_info.name); + mutex_unlock(&link->sync_link[i]->req.lock); + } + } } if (add_req->trigger_eof) { slot->ops.apply_at_eof = true; slot->ops.dev_hdl = add_req->dev_hdl; CAM_DBG(CAM_REQ, - "Req_id %llu slot:%d added for EOF tigger for Device: %s", + "link %x Req_id %llu slot:%d added for EOF tigger for Device: %s", + link->link_hdl, add_req->req_id, idx, device->dev_info.name); + + if (link_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + for (i = 0; i < link->num_sync_link; i++) { + mutex_lock(&link->sync_link[i]->req.lock); + idx2 = __cam_req_mgr_find_slot_for_req( + link->sync_link[i]->req.in_q, + add_req->req_id); + if (idx2 < 0) { + CAM_ERR(CAM_CRM, + "req %lld not found in in_q for dev %s on link 0x%x", + add_req->req_id, + device->dev_info.name, + link->sync_link[i]->link_hdl); + rc = -EBADSLT; + mutex_unlock( + &link->sync_link[i]->req.lock); + mutex_unlock(&link->req.lock); + goto end; + } + dev_name = __cam_req_mgr_dev_handle_to_name( + add_req->dev_hdl, link); + sync_dev_hdl = cam_req_mgr_get_dev_hdl(dev_name, + link->sync_link[i]); + for (j = 0; + j < link->sync_link[i]->num_devs; j++) { + device2 = &link->sync_link[i]->l_dev[j]; + CAM_ERR(CAM_CRM, + "device hdl %x : sync link dev hdl %x", + device2->dev_hdl, sync_dev_hdl); + if (device2->dev_hdl == sync_dev_hdl) { + tbl2 = device2->pd_tbl; + break; + } + } + if (!tbl2) { + CAM_ERR_RATE_LIMIT(CAM_CRM, + "Sync link dev hdl %x not found", + sync_dev_hdl); + rc = -EINVAL; + mutex_unlock( + &link->sync_link[i]->req.lock); + mutex_unlock(&link->req.lock); + goto end; + } + sync_slot = &tbl2->slot[idx2]; + sync_slot->ops.is_applied = false; + sync_slot->ops.apply_at_eof = true; + sync_slot->ops.dev_hdl = sync_dev_hdl; + mutex_unlock(&link->sync_link[i]->req.lock); + } + } + } if (slot->state != CRM_REQ_STATE_PENDING && @@ -2814,8 +2667,8 @@ int cam_req_mgr_process_add_req(void *priv, void *data) slot->state = CRM_REQ_STATE_PENDING; slot->req_ready_map |= (1 << device->dev_bit); - CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x", - idx, add_req->dev_hdl, add_req->req_id, tbl->pd, + CAM_DBG(CAM_CRM, "link %x idx %d dev_hdl %x req_id %lld pd %d ready_map %x", + link->link_hdl, idx, add_req->dev_hdl, add_req->req_id, tbl->pd, slot->req_ready_map); trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device); @@ -3034,7 +2887,8 @@ static int cam_req_mgr_process_trigger(void *priv, void *data) mutex_lock(&link->req.lock); - if (trigger_data->trigger == CAM_TRIGGER_POINT_SOF) { + if (trigger_data->trigger == CAM_TRIGGER_POINT_SOF + && !link->num_sync_link) { idx = __cam_req_mgr_find_slot_for_req(in_q, trigger_data->req_id); if (idx >= 0) { @@ -3069,6 +2923,8 @@ static int cam_req_mgr_process_trigger(void *priv, void *data) in_q->slot[in_q->rd_idx].status); spin_unlock_bh(&link->link_state_spin_lock); + CAM_DBG(CAM_CRM, "Link %x req id %lld", + link->link_hdl, in_q->slot[in_q->rd_idx].req_id); /* * Move to next req at SOF only in case @@ -3113,7 +2969,7 @@ end: * @return : String containing the device name * */ -static const char *__cam_req_mgr_dev_handle_to_name( +static char *__cam_req_mgr_dev_handle_to_name( int32_t dev_hdl, struct cam_req_mgr_core_link *link) { struct cam_req_mgr_connected_device *dev = NULL; @@ -3142,11 +2998,12 @@ static const char *__cam_req_mgr_dev_handle_to_name( */ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req) { - int rc = 0, idx; + int i, rc = 0, idx; struct crm_workq_task *task = NULL; struct cam_req_mgr_core_link *link = NULL; struct cam_req_mgr_add_request *dev_req; struct crm_task_payload *task_data; + struct cam_req_mgr_slot *slot = NULL; if (!add_req) { CAM_ERR(CAM_CRM, "sof_data is NULL"); @@ -3185,6 +3042,8 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req) goto end; } + slot = &link->req.in_q->slot[idx]; + task = cam_req_mgr_workq_get_task(link->workq); if (!task) { CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task dev %x req %lld", @@ -3206,6 +3065,10 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req) atomic_inc(&link->eof_event_cnt); CAM_DBG(CAM_REQ, "Req_id: %llu, eof_event_cnt: %d", dev_req->req_id, link->eof_event_cnt); + if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + for (i = 0; i < link->num_sync_link; i++) + atomic_inc(&link->sync_link[i]->eof_event_cnt); + } } task->process_cb = &cam_req_mgr_process_add_req; @@ -3222,23 +3085,30 @@ end: * cam_req_mgr_cb_notify_err() * * @brief : Error received from device, sends bubble recovery + * * @err_info : contains information about error occurred like bubble/overflow * - * @return : 0 on success, negative in case of failure + * @return : true on skipping bubble processing, otherwise, false * */ -static int cam_req_mgr_cb_notify_err( +static bool cam_req_mgr_cb_notify_err( struct cam_req_mgr_error_notify *err_info) { - int rc = 0; + bool rc = false; + int i, j; + uint32_t idx; struct crm_workq_task *task = NULL; struct cam_req_mgr_core_link *link = NULL; struct cam_req_mgr_error_notify *notify_err; struct crm_task_payload *task_data; + struct cam_req_mgr_slot *slot = NULL; + struct cam_req_mgr_slot *tmp_slot = NULL; + struct cam_req_mgr_connected_device *dev = NULL; + struct cam_req_mgr_request_change_state state_info; if (!err_info) { CAM_ERR(CAM_CRM, "err_info is NULL"); - rc = -EINVAL; + rc = false; goto end; } @@ -3246,37 +3116,142 @@ static int cam_req_mgr_cb_notify_err( cam_get_device_priv(err_info->link_hdl); if (!link) { CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl); - rc = -EINVAL; + rc = false; goto end; } + if (link->req.in_q->num_slots > MAX_REQ_SLOTS) + { + CAM_ERR(CAM_CRM, "invalid MAX slot %d", + link->req.in_q->num_slots); + rc = false; + goto end; + } + + idx = __cam_req_mgr_find_slot_for_req( + link->req.in_q, err_info->req_id); + if (idx < 0) { + CAM_ERR(CAM_CRM, + "req %lld not found in in_q link %x", + err_info->req_id, link->link_hdl); + rc = false; + goto end; + } + + if ((err_info->error != CRM_KMD_ERR_BUBBLE) || + (!err_info->req_id)) { + CAM_ERR(CAM_CRM, "invalid param: Err %d req %lld", + err_info->error, err_info->req_id); + rc = false; + goto end; + } + tmp_slot = &link->req.in_q->slot[idx]; + + /* Incrementing slot idx to find if request in the slot + * is applied in sync mode or non-sync mode. + */ + __cam_req_mgr_inc_idx( + &idx, 1, link->req.in_q->num_slots); + + slot = &link->req.in_q->slot[idx]; + + /* Condition to check if crm need to process bubble notified by + * crm or isp. If bubble is reported for slave link then don't + * process bubble recovery. Checking previous slot with tmp_slot + * to handle condition where bubble is reported for last request. + */ + if ((slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC || + tmp_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) && + !link->is_master) { + crm_timer_reset(link->watchdog); + CAM_DBG(CAM_CRM, "Not processing bubble as it is slave link %x", + link->link_hdl); + return true; + } + spin_lock_bh(&link->link_state_spin_lock); if (link->state != CAM_CRM_LINK_STATE_READY) { CAM_WARN(CAM_CRM, "invalid link state:%d", link->state); spin_unlock_bh(&link->link_state_spin_lock); - rc = -EPERM; + rc = false; goto end; } crm_timer_reset(link->watchdog); spin_unlock_bh(&link->link_state_spin_lock); - task = cam_req_mgr_workq_get_task(link->workq); - if (!task) { - CAM_ERR(CAM_CRM, "no empty task req_id %lld", err_info->req_id); - rc = -EBUSY; - goto end; + if (link->is_master && + slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + for (i = 0; i < link->num_sync_link; i++) { + CAM_DBG(CAM_CRM, "Bubble for sync link %x", + link->sync_link[i]->link_hdl); + state_info.link_hdl = link->sync_link[i]->link_hdl; + state_info.req_id = err_info->req_id; + for (j = 0; j < link->sync_link[i]->num_devs; j++) { + dev = &link->sync_link[i]->l_dev[j]; + if (dev->ops && dev->ops->change_state) { + state_info.dev_hdl = dev->dev_hdl; + + if (!(dev->ops->change_state( + &state_info))) { + CAM_DBG(CAM_CRM, + "Err on bubble notify"); + rc = false; + } + + task = cam_req_mgr_workq_get_task( + link->workq); + if (!task) { + CAM_ERR(CAM_CRM, + "no empty task req_id %lld", + err_info->req_id); + rc = false; + goto end; + } + + task_data = (struct crm_task_payload *) + task->payload; + task_data->type = + CRM_WORKQ_TASK_NOTIFY_ERR; + notify_err = + (struct cam_req_mgr_error_notify *)& + task_data->u; + notify_err->req_id = err_info->req_id; + notify_err->link_hdl = + link->sync_link[i]->link_hdl; + notify_err->dev_hdl = dev->dev_hdl; + notify_err->error = err_info->error; + task->process_cb = + &cam_req_mgr_process_error; + cam_req_mgr_workq_enqueue_task( + task, + link->sync_link[i], + CRM_TASK_PRIORITY_0); + } + } + } } - task_data = (struct crm_task_payload *)task->payload; - task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR; - notify_err = (struct cam_req_mgr_error_notify *)&task_data->u; - notify_err->req_id = err_info->req_id; - notify_err->link_hdl = err_info->link_hdl; - notify_err->dev_hdl = err_info->dev_hdl; - notify_err->error = err_info->error; - notify_err->trigger = err_info->trigger; - task->process_cb = &cam_req_mgr_process_error; - rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0); + if (err_info->need_recovery) { + task = cam_req_mgr_workq_get_task(link->workq); + if (!task) { + CAM_ERR(CAM_CRM, "no empty task req_id %lld", + err_info->req_id); + rc = false; + goto end; + } + + task_data = (struct crm_task_payload *)task->payload; + task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR; + notify_err = (struct cam_req_mgr_error_notify *)&task_data->u; + notify_err->req_id = err_info->req_id; + notify_err->link_hdl = err_info->link_hdl; + notify_err->dev_hdl = err_info->dev_hdl; + notify_err->error = err_info->error; + notify_err->trigger = err_info->trigger; + task->process_cb = &cam_req_mgr_process_error; + rc = cam_req_mgr_workq_enqueue_task( + task, link, CRM_TASK_PRIORITY_0); + } end: return rc; @@ -3432,7 +3407,51 @@ end: return rc; } +static int cam_req_mgr_send_to_bubble( + struct cam_req_mgr_core_link *link, + uint64_t req_id) +{ + int rc = 0, i; + struct cam_req_mgr_error_notify notify; + struct cam_req_mgr_connected_device *dev = NULL; + struct cam_req_mgr_request_change_state state_info; + struct cam_req_mgr_dev_info dev_data; + notify.link_hdl = link->link_hdl; + notify.req_id = req_id; + notify.error = CRM_KMD_ERR_BUBBLE; + notify.trigger = CAM_TRIGGER_POINT_SOF; + notify.frame_id = link->frame_id; + notify.sof_timestamp_val = link->sof_timestamp; + notify.need_recovery = false; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + if (dev->ops && dev->ops->change_state) { + notify.dev_hdl = dev->dev_hdl; + if (dev->ops && dev->ops->change_state) { + cam_req_mgr_get_device_info(link, &dev_data); + if (dev_data.is_applied) { + state_info.link_hdl = link->link_hdl; + state_info.req_id = req_id; + state_info.dev_hdl = dev->dev_hdl; + rc = dev->ops->change_state( + &state_info); + if (rc < 0) + CAM_INFO(CAM_CRM, + "Failed to change state"); + } else { + link->bubble_skip = link->max_delay; + } + } + rc = cam_req_mgr_cb_notify_err(¬ify); + if (!rc) + CAM_INFO(CAM_CRM, + "Bubble recovery failed"); + } + } + return rc; +} /** * cam_req_mgr_cb_notify_trigger() @@ -3446,12 +3465,27 @@ end: static int cam_req_mgr_cb_notify_trigger( struct cam_req_mgr_trigger_notify *trigger_data) { - int32_t rc = 0, trigger_id = 0; - uint32_t trigger; - struct crm_workq_task *task = NULL; - struct cam_req_mgr_core_link *link = NULL; + int32_t i = 0, rc = 0, trigger_id = 0; + int32_t sync_dev_hdl = -1, sync_id = 0; + int64_t req_diff = 0; + uint32_t trigger; + struct crm_workq_task *task = NULL; + struct cam_req_mgr_core_link *link = NULL; struct cam_req_mgr_trigger_notify *notify_trigger; - struct crm_task_payload *task_data; + struct crm_task_payload *task_data; + struct cam_req_mgr_slot *slot = NULL; + struct cam_req_mgr_slot *tmp_slot = NULL; + struct cam_req_mgr_req_queue *in_q; + struct timespec64 ts; + struct cam_req_mgr_dev_info dev_data; + struct cam_req_mgr_slot *sync_slot = NULL; + struct cam_req_mgr_req_queue *sync_in_q; + int32_t sync_rd_idx = 0; + char *dev_name = NULL; + uint64_t curr_sync_time = 0; + uint64_t frame_duration = 0; + uint64_t curr_boot_timestamp; + uint64_t threshold = 0; if (!trigger_data) { CAM_ERR(CAM_CRM, "trigger_data is NULL"); @@ -3474,6 +3508,10 @@ static int cam_req_mgr_cb_notify_trigger( trigger_id = trigger_data->trigger_id; trigger = trigger_data->trigger; + ktime_get_boottime_ts64(&ts); + curr_boot_timestamp = + (uint64_t)((ts.tv_sec * 1000000000) + + ts.tv_nsec); /* * Reduce the workq overhead when there is @@ -3517,8 +3555,416 @@ static int cam_req_mgr_cb_notify_trigger( if (trigger_data->trigger == CAM_TRIGGER_POINT_SOF) crm_timer_reset(link->watchdog); + link->prev_sof_timestamp = link->sof_timestamp; + link->sof_timestamp = trigger_data->sof_timestamp_val; + link->frame_id = trigger_data->frame_id; + link->sof_boottime = trigger_data->sof_boottime; spin_unlock_bh(&link->link_state_spin_lock); + in_q = link->req.in_q; + slot = &in_q->slot[in_q->rd_idx]; + tmp_slot = &in_q->slot[in_q->rd_idx]; + + CAM_DBG(CAM_CRM, + "link %x req %lld idx %d state %d real sync mode %d", + link->link_hdl, slot->req_id, sync_id, slot->status, + slot->real_sync_mode); + + if (link->initial_sync_req != -1 && + slot->real_sync_mode != CAM_REQ_MGR_SYNC_MODE_SYNC && + link->num_sync_link) { + sync_id = slot->idx; + __cam_req_mgr_inc_idx( + &sync_id, 1, link->req.in_q->num_slots); + slot = &in_q->slot[sync_id]; + CAM_DBG(CAM_CRM, + "E link %x req %lld idx %d state %d", + link->link_hdl, slot->req_id, sync_id, slot->status); + } + + + CAM_DBG(CAM_CRM, + "link %x req %lld slot mode %d tmp slot mode %d init sync %lld", + link->link_hdl, slot->req_id, + slot->sync_mode, tmp_slot->sync_mode, + link->initial_sync_req); + + if (trigger == CAM_TRIGGER_POINT_SOF && + (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC || + (tmp_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC && + (link->initial_sync_req != -1)))) { + /* If in sync and slave link then don't apply */ + if (!link->is_master) { + + CAM_DBG(CAM_CRM, + "Skipping slave link 0x%x apply sof timestamp %llx frame %lld slot req %lld", + link->link_hdl, + link->sof_timestamp, + trigger_data->frame_id, + slot->req_id); + + CAM_DBG(CAM_CRM, + "[Slave %x] epoch time %lld sof boottime %lld frame id %lld open cnt %d req id %lld", + link->link_hdl, + curr_boot_timestamp, + link->sof_boottime, + trigger_data->frame_id, + link->open_req_cnt, + slot->req_id); + + goto end; + } else { + + frame_duration = + (curr_boot_timestamp - link->sof_boottime) * 2; + CAM_DBG(CAM_CRM, + "[Master %x] epoch time %lld sof boottime %lld frame id %lld frame duration %d ms open cnt %d req id %lld", + link->link_hdl, + curr_boot_timestamp, + link->sof_boottime, + trigger_data->frame_id, + frame_duration/1000000, + link->open_req_cnt, + slot->req_id); + + rc = __cam_req_mgr_check_next_req_slot(link); + rc = __cam_req_mgr_check_link_is_ready( + link, slot->idx, true); + + if (rc) { + CAM_DBG(CAM_CRM, + "Req:%lld [Master] not ready on link: %x, rc=%d", + slot->req_id, link->link_hdl, rc); + return rc; + } + + /* Checking each sync link if they are ready to apply + * on this epoch, if not then skip apply on this epoch + */ + for (i = 0; i < link->num_sync_link; i++) { + sync_in_q = link->sync_link[i]->req.in_q; + sync_rd_idx = sync_in_q->rd_idx; + __cam_req_mgr_inc_idx( + &sync_rd_idx, 1, + link->sync_link[i]->req.in_q->num_slots); + sync_slot = &sync_in_q->slot[sync_rd_idx]; + rc = __cam_req_mgr_check_next_req_slot( + link->sync_link[i]); + + /* This condition is for SAT usecase, + * if request id to be sync is different + * then this will help to sync + */ + if (link->initial_sync_req != -1 && + (link->sync_link[i]->initial_sync_req != + -1) && (link->initial_sync_req == + slot->req_id) && + (link->sync_link[i]->initial_sync_req > + sync_slot->req_id)) { + CAM_DBG(CAM_CRM, + "Not applying on Master %x as slave %x is not in sync", + link->link_hdl, + link->sync_link[i]->link_hdl); + return rc; + } + + /* checking if packet for synclink is + * available or not + */ + rc = __cam_req_mgr_check_link_is_ready( + link->sync_link[i], sync_rd_idx, true); + + if (rc) { + CAM_DBG(CAM_CRM, + "Req:%lld [sync] not ready on link: %x, rc=%d", + sync_slot->req_id, + link->sync_link[i]->link_hdl, + rc); + return rc; + } + + /* Getting time and state info ISP context */ + cam_req_mgr_get_device_info( + link->sync_link[i], &dev_data); + + /* Checking if frame difference is maintain */ + req_diff = + (link->sync_frame_id - + link->sync_link[i]->sync_frame_id); + if (!link->sync_link_sof_skip && + link->sync_frame_id && + link->initial_sync_req != slot->req_id + && (req_diff != (link->frame_id - + dev_data.frame_id))) { + struct cam_req_mgr_dump_link_data + dump_info; + + CAM_DBG(CAM_CRM, + "Frame diff not matching %d", + link->frame_id - + dev_data.frame_id); + dump_info.m_link = link; + dump_info.s_link = link->sync_link[i]; + dump_info.dev_data = dev_data; + dump_info.s_req_id = sync_slot->req_id; + dump_info.m_req_id = slot->req_id; + cam_req_mgr_dump_link_info(&dump_info); + return rc; + } + + /* Calculating sof time differnce + * of master and slave for current frame + */ + curr_sync_time = + (dev_data.timestamp >= + link->sof_timestamp) ? + dev_data.timestamp - link->sof_timestamp : + link->sof_timestamp - dev_data.timestamp; + + threshold = frame_duration / THRESHOLD_FACTOR; + + /* Checking if master and sync links are in + * same frame duration considering master frame + * duration in calculating threshold value + */ + if (curr_sync_time > threshold) { + struct cam_req_mgr_dump_link_data + dump_info; + CAM_DBG(CAM_CRM, + "Master %x and slave %x are not in same time frame time diff %lld threshold %lld", + link->link_hdl, + link->sync_link[i]->link_hdl, + curr_sync_time/1000000, + threshold/1000000); + + dump_info.m_link = link; + dump_info.s_link = link->sync_link[i]; + dump_info.dev_data = dev_data; + dump_info.s_req_id = sync_slot->req_id; + dump_info.m_req_id = slot->req_id; + cam_req_mgr_dump_link_info(&dump_info); + return 0; + } + + /* If ISP substate is in applied state then + * condition it as bubble condition, + * Trigger bubble recovery + */ + if (dev_data.is_applied && + link->initial_sync_req != slot->req_id) { + CAM_ERR(CAM_CRM, + "Slave link %x req %lld is in applied state, triggering bubble recovery", + link->sync_link[i]->link_hdl, + (sync_slot->req_id - + link->sync_link[i]->max_delay)); + rc = cam_req_mgr_send_to_bubble(link, + (sync_slot->req_id - + link->sync_link[i]->max_delay)); + link->sync_link_sof_skip = true; + CAM_DBG(CAM_CRM, + "Master sof %lld slave sof %lld", + link->sof_timestamp, + dev_data.timestamp); + return rc; + } + + /* Checking if the initial request + * difference is maintained + */ + req_diff = link->initial_sync_req - + link->sync_link[i]->initial_sync_req; + + if (!link->sync_link_sof_skip && + req_diff != + (slot->req_id - sync_slot->req_id)) { + struct cam_req_mgr_dump_link_data + dump_info; + + CAM_DBG(CAM_CRM, + "Req diff %lld Master link %x req %lld slave link %x req %lld", + req_diff, + link->link_hdl, + slot->req_id, + link->sync_link[i]->link_hdl, + sync_slot->req_id); + + dump_info.m_link = link; + dump_info.s_link = link->sync_link[i]; + dump_info.dev_data = dev_data; + dump_info.s_req_id = sync_slot->req_id; + dump_info.m_req_id = slot->req_id; + cam_req_mgr_dump_link_info(&dump_info); + return rc; + } + } + } + } + + if (link->bubble_skip) { + link->bubble_skip--; + if (link->bubble_skip == 0) + link->sync_link_sof_skip = false; + goto slave; + } + + + if (trigger == CAM_TRIGGER_POINT_SOF && + slot->real_sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + int32_t slot_rd_idx; + + slot_rd_idx = link->req.in_q->rd_idx; + slot = &link->req.in_q->slot[slot_rd_idx]; + CAM_DBG(CAM_CRM, + "link %x idx %d req %lld state %d slot idx %d", + link->link_hdl, slot->idx, + slot->req_id, slot->status, slot_rd_idx); + if (slot->status == CRM_SLOT_STATUS_REQ_APPLIED) { + rc = __cam_req_mgr_check_next_req_slot(link); + if (rc) { + CAM_DBG(CAM_REQ, + "No pending req to apply to lower pd devices"); + rc = 0; + goto row; + } + __cam_req_mgr_inc_idx( + &slot_rd_idx, 1, + link->req.in_q->num_slots); + } +row: + slot = &link->req.in_q->slot[slot_rd_idx]; + CAM_DBG(CAM_CRM, + "link %x idx %d req %lld state %d slot idx %d", + link->link_hdl, slot->idx, + slot->req_id, slot->status, slot_rd_idx); + + CAM_DBG(CAM_ISP, + "Start sync cond link %x modified sync req %lld enter", + slot->req_id, link->link_hdl); + + for (i = 0; i < link->num_sync_link; i++) { + int32_t sync_req_idx; + + //Checking if need to skip apply for this epoch + if (link->sync_link[i]->skip_sync_apply) { + CAM_INFO(CAM_ISP, + "Skipping apply on link %x as applied failed on other link %x", + link->link_hdl, + link->sync_link[i]->link_hdl); + link->sync_link[i]->skip_sync_apply = false; + return 0; + } + + //Check if other link is streaming or not + cam_req_mgr_get_device_info( + link->sync_link[i], &dev_data); + if (!dev_data.timestamp) { + CAM_INFO(CAM_ISP, "Link %x not streaming", + link->sync_link[i]->link_hdl); + return 0; + } + + sync_in_q = link->sync_link[i]->req.in_q; + if (slot->req_id == -1) { + sync_req_idx = sync_in_q->rd_idx; + } else { + sync_req_idx = __cam_req_mgr_find_slot_for_req( + link->sync_link[i]->req.in_q, slot->req_id); + } + + if (sync_req_idx == -1) { + CAM_DBG(CAM_CRM, + "Request %lld not in in_q link %x", + slot->req_id, + link->sync_link[i]->link_hdl); + if (slot->sync_mode != + CAM_REQ_MGR_SYNC_MODE_SYNC) { + CAM_DBG(CAM_CRM, + "slot not in sync mode %lld"); + __cam_req_mgr_inc_idx( + &sync_req_idx, 1, + link->req.in_q->num_slots); + goto cl; + } + return 0; + } + + __cam_req_mgr_dec_idx( + &sync_req_idx, 1, + link->sync_link[i]->req.in_q->num_slots); +cl: + sync_slot = &sync_in_q->slot[sync_req_idx]; + + //Checking if corresponding req is applied in other link or not + if (link->sync_link[i]->modified_init_sync_req != -1 && + sync_slot->req_id != -1 && + (link->sync_link[i]->modified_init_sync_req != + sync_slot->req_id) && + (link->sync_link[i]->modified_init_sync_req < + sync_slot->req_id) && + (sync_slot->status != + CRM_SLOT_STATUS_REQ_APPLIED)) { + CAM_ERR(CAM_ISP, + "ERROR Req %lld on link %x not applied", + sync_slot->req_id, + link->sync_link[i]->link_hdl); + return 0; + } + + CAM_DBG(CAM_ISP, + "Link %x modified sync req %lld enter state %d", + link->sync_link[i]->link_hdl, + sync_slot->req_id, sync_slot->status); + + __cam_req_mgr_inc_idx( + &sync_req_idx, 1, + link->sync_link[i]->req.in_q->num_slots); + + sync_slot = &sync_in_q->slot[sync_req_idx]; + + CAM_DBG(CAM_CRM, + "Check Req:%lld state %d link: %x, rc=%d", + sync_slot->req_id, sync_slot->status, + link->sync_link[i]->link_hdl, rc); + + if (sync_slot->status != CRM_SLOT_STATUS_REQ_APPLIED) { + rc = __cam_req_mgr_check_next_req_slot( + link->sync_link[i]); + if (rc) { + CAM_DBG(CAM_REQ, + "No pending req to apply to lower pd devices"); + rc = 0; + goto low; + } + rc = __cam_req_mgr_check_link_is_ready( + link->sync_link[i], sync_req_idx, true); + if (rc) { + CAM_DBG(CAM_CRM, + "Req:%lld not ready on link: %x, rc=%d", + sync_slot->req_id, + link->sync_link[i]->link_hdl, + rc); + link->skip_sync_apply = true; + return 0; + } + } +low: + CAM_DBG(CAM_ISP, + "End sync cond Link %x modified sync req %lld enter", + link->sync_link[i]->link_hdl, + sync_slot->req_id); + } + + rc = __cam_req_mgr_check_link_is_ready( + link, slot_rd_idx, true); + if (rc) { + CAM_DBG(CAM_CRM, + "M Req:%lld not ready on link: %x, rc=%d", + slot->req_id, link->link_hdl, rc); + link->skip_sync_apply = true; + return 0; + } + } + task = cam_req_mgr_workq_get_task(link->workq); if (!task) { CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task frame %lld", @@ -3543,6 +3989,69 @@ static int cam_req_mgr_cb_notify_trigger( task->process_cb = &cam_req_mgr_process_trigger; rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0); +slave: + + /* If sync mode is on and master epoch then enqueue workqueue to apply + * request for all sync link of master. + */ + + if (trigger == CAM_TRIGGER_POINT_SOF && + ((slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC || + (tmp_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC && + (link->initial_sync_req != -1))) && + link->is_master && + tmp_slot->real_sync_mode != CAM_REQ_MGR_SYNC_MODE_SYNC)) { + + CAM_DBG(CAM_CRM, + "In sync mode req %lld tmp mode % real mode %d sync mode %d link %x ", + slot->req_id, tmp_slot->sync_mode, + tmp_slot->real_sync_mode, + slot->sync_mode, link->link_hdl); + + dev_name = __cam_req_mgr_dev_handle_to_name( + trigger_data->dev_hdl, link); + + for (i = 0; i < link->num_sync_link; i++) { + + task = cam_req_mgr_workq_get_task( + link->sync_link[i]->workq); + if (!task) { + CAM_ERR(CAM_CRM, "No empty task frame %lld", + trigger_data->frame_id); + rc = -EBUSY; + goto end; + } + + sync_dev_hdl = cam_req_mgr_get_dev_hdl(dev_name, + link->sync_link[i]); + + if (sync_dev_hdl == -1) { + CAM_ERR(CAM_CRM, "Sync dev hdl is null"); + goto end; + } + CAM_DBG(CAM_CRM, "Sync link 0x%x sync dev hdl %x", + link->sync_link[i]->link_hdl, sync_dev_hdl); + + cam_req_mgr_get_device_info( + link->sync_link[i], &dev_data); + + task_data = (struct crm_task_payload *)task->payload; + task_data->type = CRM_WORKQ_TASK_NOTIFY_SOF; + notify_trigger = (struct cam_req_mgr_trigger_notify *) + &task_data->u; + notify_trigger->frame_id = dev_data.frame_id; + notify_trigger->link_hdl = link->sync_link[i]->link_hdl; + notify_trigger->dev_hdl = sync_dev_hdl; + notify_trigger->trigger = trigger_data->trigger; + notify_trigger->req_id = trigger_data->req_id; + notify_trigger->sof_timestamp_val = + trigger_data->sof_timestamp_val; + task->process_cb = &cam_req_mgr_process_trigger; + rc = cam_req_mgr_workq_enqueue_task(task, + link->sync_link[i], CRM_TASK_PRIORITY_0); + } + } + end: return rc; } @@ -3889,6 +4398,8 @@ int cam_req_mgr_destroy_session( __cam_req_mgr_free_link(link); } } + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) + clear_bit(i, g_crm_core_dev->bitmap); list_del(&cam_session->entry); mutex_unlock(&cam_session->lock); mutex_destroy(&cam_session->lock); @@ -4243,11 +4754,66 @@ end: return rc; } +/** + * __cam_req_mgr_set_master_link() + * + * @brief : Select master link based on the activate sequence + * + * @link : pointer to array of links + * @num_of_links : number of links to be sync + * + * @return : void + * + */ +static void __cam_req_mgr_set_master_link( + struct cam_req_mgr_core_link **link, + int32_t num_of_links) +{ + int i = 0, j = 0, k = 0; + int idx = -1; + + idx = find_first_bit(g_crm_core_dev->bitmap, + MAXIMUM_LINKS_PER_SESSION); + + for (i = 0; i < num_of_links; i++) { + + CAM_DBG(CAM_CRM, "idx %d, link%d 0x%x active seq %d", + idx, i, link[i]->link_hdl, + link[i]->activate_seq); + + if (link[i]->activate_seq == idx) { + + link[i]->is_master = true; + CAM_DBG(CAM_CRM, "Master link 0x%x num of links %d ", + link[i]->link_hdl, num_of_links); + + for (j = 0, k = 0; j < num_of_links; j++) { + if (link[i]->link_hdl != + link[j]->link_hdl) { + + link[i]->sync_link[k] = link[j]; + link[i]->num_sync_link++; + + link[j]->sync_link[0] = link[i]; + link[j]->num_sync_link = 1; + link[j]->is_master = false; + + k++; + } + } + } + + link[i]->initial_skip = + g_crm_core_dev->max_delay - link[i]->max_delay; + CAM_DBG(CAM_CRM, "link %x initial skip %d", + link[i]->link_hdl, link[i]->initial_skip); + } +} + int cam_req_mgr_sync_config( struct cam_req_mgr_sync_mode *sync_info) { - int i, j, rc = 0; - int sync_idx = 0; + int i, j, rc = 0; struct cam_req_mgr_core_session *cam_session; struct cam_req_mgr_core_link *link[MAX_LINKS_PER_SESSION]; @@ -4269,10 +4835,12 @@ int cam_req_mgr_sync_config( return -EINVAL; } - if ((!sync_info->link_hdls[0]) || (!sync_info->link_hdls[1])) { - CAM_WARN(CAM_CRM, "Invalid link handles 0x%x 0x%x", - sync_info->link_hdls[0], sync_info->link_hdls[1]); - return -EINVAL; + for (i = 0; i < sync_info->num_links; i++) { + if (!sync_info->link_hdls[i]) { + CAM_WARN(CAM_CRM, "Invalid link handles 0x%x", + sync_info->link_hdls[i]); + return -EINVAL; + } } mutex_lock(&g_crm_core_dev->crm_lock); @@ -4288,60 +4856,56 @@ int cam_req_mgr_sync_config( mutex_lock(&cam_session->lock); for (i = 0; i < sync_info->num_links; i++) { - - if (!sync_info->link_hdls[i]) { - CAM_ERR(CAM_CRM, "link handle %d is null", i); - rc = -EINVAL; - goto done; - } - link[i] = cam_get_device_priv(sync_info->link_hdls[i]); if (!link[i]) { CAM_ERR(CAM_CRM, "link%d NULL pointer", i); rc = -EINVAL; goto done; } + CAM_DBG(CAM_CRM, "Link hdl %x sync mode %d", + link[i]->link_hdl, + sync_info->sync_mode); link[i]->sync_link_sof_skip = false; + link[i]->is_master = false; + link[i]->in_msync_mode = false; link[i]->initial_sync_req = -1; - link[i]->num_sync_links = 0; + link[i]->num_sync_link = 0; + link[i]->bubble_skip = 0; + link[i]->sync_frame_id = 0; + link[i]->is_sync_req = true; + link[i]->modified_init_sync_req = -1; for (j = 0; j < sync_info->num_links-1; j++) link[i]->sync_link[j] = NULL; + + g_crm_core_dev->max_delay = CAM_PIPELINE_DELAY_0; + + if (g_crm_core_dev->max_delay < link[i]->max_delay) + g_crm_core_dev->max_delay = link[i]->max_delay; } if (sync_info->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { - for (i = 0; i < sync_info->num_links; i++) { - j = 0; - sync_idx = 0; - CAM_DBG(CAM_REQ, "link %x adds sync link:", - link[i]->link_hdl); - while (j < sync_info->num_links) { - if (i != j) { - link[i]->sync_link[sync_idx++] = - link[j]; - link[i]->num_sync_links++; - CAM_DBG(CAM_REQ, "sync_link[%d] : %x", - sync_idx-1, link[j]->link_hdl); - } - j++; - } - link[i]->initial_skip = true; - link[i]->sof_timestamp = 0; - } + __cam_req_mgr_set_master_link(link, sync_info->num_links); + } else { - for (j = 0; j < sync_info->num_links; j++) { - link[j]->initial_skip = true; - link[j]->sof_timestamp = 0; + for (i = 0; i < sync_info->num_links; i++) { + /* + * Reset below info after the mode is configured + * to NO-SYNC mode since they may be overridden + * if the sync config is invoked after SOF comes. + */ + link[i]->initial_skip = 0; + link[i]->sof_timestamp = 0; + link[i]->num_sync_link = 0; + link[i]->initial_sync_req = -1; + link[i]->modified_init_sync_req = -1; } } cam_session->sync_mode = sync_info->sync_mode; - CAM_DBG(CAM_REQ, - "Sync config completed on %d links with sync_mode %d", - sync_info->num_links, sync_info->sync_mode); done: mutex_unlock(&cam_session->lock); @@ -4419,13 +4983,13 @@ end: int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control) { - int rc = 0; - int i, j; - struct cam_req_mgr_core_link *link = NULL; - - struct cam_req_mgr_connected_device *dev = NULL; - struct cam_req_mgr_link_evt_data evt_data; - int init_timeout = 0; + int rc = 0, i, j; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_connected_device *dev = NULL; + struct cam_req_mgr_link_evt_data evt_data; + int init_timeout = 0; + long idx; + bool bit; if (!control) { CAM_ERR(CAM_CRM, "Control command is NULL"); @@ -4456,6 +5020,22 @@ int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control) if (control->ops == CAM_REQ_MGR_LINK_ACTIVATE) { spin_lock_bh(&link->link_state_spin_lock); link->state = CAM_CRM_LINK_STATE_READY; + do { + idx = find_first_zero_bit( + g_crm_core_dev->bitmap, + MAXIMUM_LINKS_PER_SESSION); + if (idx >= MAXIMUM_LINKS_PER_SESSION) { + CAM_ERR(CAM_SYNC, + "Error: Unable to allocate link bit"); + rc = -ENOMEM; + break; + } + bit = test_and_set_bit( + idx, g_crm_core_dev->bitmap); + } while (bit); + link->activate_seq = idx; + CAM_DBG(CAM_CRM, "link hdl 0x%x activate idx %d", + link->link_hdl, link->activate_seq); spin_unlock_bh(&link->link_state_spin_lock); if (control->init_timeout[i]) link->skip_init_frame = true; @@ -4501,6 +5081,10 @@ int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control) link->state = CAM_CRM_LINK_STATE_IDLE; link->skip_init_frame = false; crm_timer_exit(&link->watchdog); + if ((link->activate_seq >= 0) && + (link->activate_seq < MAXIMUM_LINKS_PER_SESSION)) + clear_bit(link->activate_seq, g_crm_core_dev->bitmap); + link->activate_seq = -1; spin_unlock_bh(&link->link_state_spin_lock); CAM_DBG(CAM_CRM, "De-activate link: 0x%x", link->link_hdl); diff --git a/drivers/cam_req_mgr/cam_req_mgr_core.h b/drivers/cam_req_mgr/cam_req_mgr_core.h index f31f4670e2..9808cb0b31 100644 --- a/drivers/cam_req_mgr/cam_req_mgr_core.h +++ b/drivers/cam_req_mgr/cam_req_mgr_core.h @@ -263,7 +263,8 @@ struct cam_req_mgr_req_tbl { * - members updated due to external events * @recover : if user enabled recovery for this request. * @req_id : mask tracking which all devices have request ready - * @sync_mode : Sync mode in which req id in this slot has to applied + * @sync_mode : Modified sync mode in which req id in this slot has to applied + * @real_sync_mode : Actual sync mode in which req id in this slot has to applied * @additional_timeout : Adjusted watchdog timeout value associated with * this request */ @@ -274,6 +275,7 @@ struct cam_req_mgr_slot { int32_t recover; int64_t req_id; int32_t sync_mode; + int32_t real_sync_mode; int32_t additional_timeout; }; @@ -356,7 +358,7 @@ struct cam_req_mgr_connected_device { * @lock : mutex lock to guard link data operations * @link_state_spin_lock : spin lock to protect link state variable * @sync_link : array of pointer to the sync link for synchronization - * @num_sync_links : num of links sync associated with this link + * @num_sync_link : total number of sync links * @sync_link_sof_skip : flag determines if a pkt is not available for a given * frame in a particular link skip corresponding * frame in sync link as well. @@ -366,11 +368,14 @@ struct cam_req_mgr_connected_device { * @is_used : 1 if link is in use else 0 * @is_master : Based on pd among links, the link with the highest pd * is assigned as master - * @initial_skip : Flag to determine if slave has started streaming in - * master-slave sync + * @initial_skip : Flag to determine if initial req need to skip for + * diff pd + * @is_sync_req : flag used for deciding sync and non-sync * @in_msync_mode : Flag to determine if a link is in master-slave mode * @initial_sync_req : The initial req which is required to sync with the * other link + * @modified_init_sync_req : Modified initial req which is required to sync + * with the other link * @retry_cnt : Counter that tracks number of attempts to apply * the same req * @is_shutdown : Flag to indicate if link needs to be disconnected @@ -385,6 +390,10 @@ struct cam_req_mgr_connected_device { * case of long exposure use case * @last_sof_trigger_jiffies : Record the jiffies of last sof trigger jiffies * @wq_congestion : Indicates if WQ congestion is detected or not + * @activate_seq : sequence in which link is activated + * @frame_id : current frame id + * @sync_frame_id : current frame id of sync link + * @bubble_skip : req to skip on bubble */ struct cam_req_mgr_core_link { int32_t link_hdl; @@ -401,19 +410,22 @@ struct cam_req_mgr_core_link { void *parent; struct mutex lock; spinlock_t link_state_spin_lock; - struct cam_req_mgr_core_link - *sync_link[MAXIMUM_LINKS_PER_SESSION - 1]; - int32_t num_sync_links; + struct cam_req_mgr_core_link *sync_link[ + MAXIMUM_LINKS_PER_SESSION]; + int32_t num_sync_link; bool sync_link_sof_skip; uint32_t open_req_cnt; uint32_t last_flush_id; atomic_t is_used; bool is_master; - bool initial_skip; + uint32_t initial_skip; + bool is_sync_req; bool in_msync_mode; int64_t initial_sync_req; + int64_t modified_init_sync_req; uint32_t retry_cnt; bool is_shutdown; + uint64_t sof_boottime; uint64_t sof_timestamp; uint64_t prev_sof_timestamp; bool dual_trigger; @@ -422,6 +434,11 @@ struct cam_req_mgr_core_link { bool skip_init_frame; uint64_t last_sof_trigger_jiffies; bool wq_congestion; + int32_t activate_seq; + uint64_t frame_id; + uint64_t sync_frame_id; + int32_t bubble_skip; + bool skip_sync_apply; }; /** @@ -455,11 +472,33 @@ struct cam_req_mgr_core_session { * @session_head : list head holding sessions * @crm_lock : mutex lock to protect session creation & destruction * @recovery_on_apply_fail : Recovery on apply failure using debugfs. + * @bitmap : bitmap to store index of link + * @max_delay : max pipeline delay in a session */ struct cam_req_mgr_core_device { struct list_head session_head; struct mutex crm_lock; bool recovery_on_apply_fail; + DECLARE_BITMAP(bitmap, MAXIMUM_LINKS_PER_SESSION); + uint32_t max_delay; +}; + +/** + * struct cam_req_mgr_dump_link_data + * - Dump data + * @m_link : master link handle + * @s_link : slave link handle + * @m_req_id : master req id + * @s_req_id : slave req id + * @dev_info : current timing data of slave link + * + */ +struct cam_req_mgr_dump_link_data { + struct cam_req_mgr_core_link *m_link; + struct cam_req_mgr_core_link *s_link; + uint64_t m_req_id; + uint64_t s_req_id; + struct cam_req_mgr_dev_info dev_data; }; /** diff --git a/drivers/cam_req_mgr/cam_req_mgr_interface.h b/drivers/cam_req_mgr/cam_req_mgr_interface.h index b6807ed4a7..e8e4f941a3 100644 --- a/drivers/cam_req_mgr/cam_req_mgr_interface.h +++ b/drivers/cam_req_mgr/cam_req_mgr_interface.h @@ -22,6 +22,7 @@ struct cam_req_mgr_apply_request; struct cam_req_mgr_flush_request; struct cam_req_mgr_link_evt_data; struct cam_req_mgr_dump_info; +struct cam_req_mgr_request_change_state; /* Request Manager -- camera device driver interface */ /** @@ -33,8 +34,9 @@ struct cam_req_mgr_dump_info; * userspace * @cam_req_mgr_notify_timer : start the timer */ -typedef int (*cam_req_mgr_notify_trigger)(struct cam_req_mgr_trigger_notify *); -typedef int (*cam_req_mgr_notify_err)(struct cam_req_mgr_error_notify *); +typedef int (*cam_req_mgr_notify_trigger)( + struct cam_req_mgr_trigger_notify *); +typedef bool (*cam_req_mgr_notify_err)(struct cam_req_mgr_error_notify *); typedef int (*cam_req_mgr_add_req)(struct cam_req_mgr_add_request *); typedef int (*cam_req_mgr_notify_timer)(struct cam_req_mgr_timer_notify *); typedef int (*cam_req_mgr_notify_stop)(struct cam_req_mgr_notify_stop *); @@ -48,8 +50,9 @@ typedef int (*cam_req_mgr_notify_stop)(struct cam_req_mgr_notify_stop *); * @cam_req_mgr_notify_frame_skip: CRM asks device to apply setting for * frame skip * @cam_req_mgr_flush_req : Flush or cancel request - * cam_req_mgr_process_evt : generic events + * @cam_req_mgr_process_evt : generic events * @cam_req_mgr_dump_req : dump request + * @cam_req_mgr_change_state : CRM asks device to change its state */ typedef int (*cam_req_mgr_get_dev_info) (struct cam_req_mgr_device_info *); typedef int (*cam_req_mgr_link_setup)(struct cam_req_mgr_core_dev_link_setup *); @@ -59,6 +62,8 @@ typedef int (*cam_req_mgr_notify_frame_skip)( typedef int (*cam_req_mgr_flush_req)(struct cam_req_mgr_flush_request *); typedef int (*cam_req_mgr_process_evt)(struct cam_req_mgr_link_evt_data *); typedef int (*cam_req_mgr_dump_req)(struct cam_req_mgr_dump_info *); +typedef int (*cam_req_mgr_change_state)( + struct cam_req_mgr_request_change_state *); /** * @brief : cam_req_mgr_crm_cb - func table @@ -87,15 +92,17 @@ struct cam_req_mgr_crm_cb { * @flush_req : payload to flush request * @process_evt : payload to generic event * @dump_req : payload to dump request + * @change_state : payload to change state */ struct cam_req_mgr_kmd_ops { - cam_req_mgr_get_dev_info get_dev_info; - cam_req_mgr_link_setup link_setup; - cam_req_mgr_apply_req apply_req; + cam_req_mgr_get_dev_info get_dev_info; + cam_req_mgr_link_setup link_setup; + cam_req_mgr_apply_req apply_req; cam_req_mgr_notify_frame_skip notify_frame_skip; - cam_req_mgr_flush_req flush_req; - cam_req_mgr_process_evt process_evt; - cam_req_mgr_dump_req dump_req; + cam_req_mgr_flush_req flush_req; + cam_req_mgr_process_evt process_evt; + cam_req_mgr_dump_req dump_req; + cam_req_mgr_change_state change_state; }; /** @@ -216,6 +223,7 @@ enum cam_req_mgr_link_evt_type { * @trigger : trigger point of this notification, CRM will send apply * only to the devices which subscribe to this point. * @sof_timestamp_val: Captured time stamp value at sof hw event + * @sof_boottime : Captured boot time stamp value at sof hw event * @req_id : req id which returned buf_done * @trigger_id: ID to differentiate between the trigger devices */ @@ -225,6 +233,7 @@ struct cam_req_mgr_trigger_notify { int64_t frame_id; uint32_t trigger; uint64_t sof_timestamp_val; + uint64_t sof_boottime; uint64_t req_id; int32_t trigger_id; }; @@ -249,6 +258,8 @@ struct cam_req_mgr_timer_notify { * @frame_id : frame id for internal tracking * @trigger : trigger point of this notification, CRM will send apply * @sof_timestamp_val : Captured time stamp value at sof hw event + * @sof_boottime_val : Captured boottime stamp value at sof hw event + * @need_recovery : flag to check if recovery is needed * @error : what error device hit while processing this req */ struct cam_req_mgr_error_notify { @@ -258,6 +269,8 @@ struct cam_req_mgr_error_notify { int64_t frame_id; uint32_t trigger; uint64_t sof_timestamp_val; + uint64_t sof_boottime_val; + bool need_recovery; enum cam_req_mgr_device_error error; }; @@ -302,6 +315,7 @@ struct cam_req_mgr_notify_stop { * @p_delay : delay between time settings applied and take effect * @trigger : Trigger point for the client * @trigger_on : This device provides trigger + * @sof_ts_cb : callback to real time drivers */ struct cam_req_mgr_device_info { int32_t dev_hdl; @@ -310,6 +324,7 @@ struct cam_req_mgr_device_info { enum cam_pipeline_delay p_delay; uint32_t trigger; bool trigger_on; + int32_t (*sof_ts_cb)(int32_t dev_hdl, void *data); }; /** @@ -408,4 +423,37 @@ struct cam_req_mgr_dump_info { int32_t link_hdl; int32_t dev_hdl; }; + +/** + * struct cam_req_mgr_dev_info + * @link_hdl : link identifier + * @state : Current substate for the activated state. + * @timestamp : time stamp for the sof event + * @boot_time : boot time stamp for the sof event + * @frame_id : frame id + * @is_applied : if ISP is in applied state + * + */ +struct cam_req_mgr_dev_info { + int32_t link_hdl; + uint32_t state; + uint64_t timestamp; + uint64_t boot_time; + uint64_t frame_id; + bool is_applied; +}; + +/** + * struct cam_req_mgr_request_change_state + * @link_hdl : link identifier + * @dev_hdl : device handle or identifier + * @req_id : request id + * + */ +struct cam_req_mgr_request_change_state { + int32_t link_hdl; + int32_t dev_hdl; + uint64_t req_id; +}; + #endif diff --git a/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c index cb63a7368e..2ccaf1e0d5 100644 --- a/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c +++ b/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c @@ -410,6 +410,7 @@ int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info) strlcpy(info->name, CAM_ACTUATOR_NAME, sizeof(info->name)); info->p_delay = 1; info->trigger = CAM_TRIGGER_POINT_SOF; + info->sof_ts_cb = NULL; return 0; } diff --git a/drivers/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/cam_sensor_module/cam_flash/cam_flash_core.c index 3483185bd5..32d7f65cbf 100644 --- a/drivers/cam_sensor_module/cam_flash/cam_flash_core.c +++ b/drivers/cam_sensor_module/cam_flash/cam_flash_core.c @@ -1784,6 +1784,8 @@ int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info) strlcpy(info->name, CAM_FLASH_NAME, sizeof(info->name)); info->p_delay = CAM_FLASH_PIPELINE_DELAY; info->trigger = CAM_TRIGGER_POINT_SOF; + info->sof_ts_cb = NULL; + return 0; } diff --git a/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c index 47799af936..bb237f1d56 100644 --- a/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c +++ b/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c @@ -1216,6 +1216,7 @@ int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info) else info->p_delay = 2; info->trigger = CAM_TRIGGER_POINT_SOF; + info->sof_ts_cb = NULL; return rc; }