msm: camera: reqmgr: Fix request sync issues

Fix several request sync issues.

CRs-Fixed: 3029196
Change-Id: I92b4048b392fa19e2c9748a5ee0efd9ef0b4e680
Signed-off-by: Depeng Shao <depengs@codeaurora.org>
This commit is contained in:
Depeng Shao
2021-09-18 10:39:10 +08:00
والد e589c42a5b
کامیت c8ac17dee6
16فایلهای تغییر یافته به همراه787 افزوده شده و 2332 حذف شده

مشاهده پرونده

@@ -182,60 +182,6 @@ int cam_context_handle_crm_apply_req(struct cam_context *ctx,
return rc;
}
int cam_context_handle_crm_signal_buf_done(struct cam_context *ctx,
struct cam_req_mgr_signal_info *state_info)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!state_info) {
CAM_ERR(CAM_CORE, "Invalid change state payload");
return -EINVAL;
}
if (ctx->state_machine[ctx->state].crm_ops.signal_buf_done) {
rc = ctx->state_machine[ctx->state].crm_ops.signal_buf_done(ctx,
state_info);
} else {
CAM_ERR(CAM_CORE, "No crm change state req in dev %d, state %d",
ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
return rc;
}
int cam_context_handle_crm_state_change(struct cam_context *ctx,
struct cam_req_mgr_request_change_state *state_info)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!state_info) {
CAM_ERR(CAM_CORE, "Invalid change state payload");
return -EINVAL;
}
if (ctx->state_machine[ctx->state].crm_ops.change_state) {
rc = ctx->state_machine[ctx->state].crm_ops.change_state(ctx,
state_info);
} else {
CAM_ERR(CAM_CORE, "No crm change state req in dev %d, state %d",
ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
return rc;
}
int cam_context_handle_crm_notify_frame_skip(
struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply)

مشاهده پرونده

@@ -130,8 +130,6 @@ struct cam_ctx_ioctl_ops {
* @flush_req: Flush request to remove request ids
* @process_evt: Handle event notification from CRM.(optional)
* @dump_req: Dump information for the issue request
* @change_state: Change sub-state of hw context layer to bubble
* @signal_buf_done Notify device to signal buf done
*
*/
struct cam_ctx_crm_ops {
@@ -151,10 +149,6 @@ struct cam_ctx_crm_ops {
struct cam_req_mgr_link_evt_data *evt_data);
int (*dump_req)(struct cam_context *ctx,
struct cam_req_mgr_dump_info *dump);
int (*change_state)(struct cam_context *ctx,
struct cam_req_mgr_request_change_state *change_state);
int (*signal_buf_done)(struct cam_context *ctx,
struct cam_req_mgr_signal_info *signal_info);
};
@@ -341,30 +335,6 @@ int cam_context_handle_crm_unlink(struct cam_context *ctx,
int cam_context_handle_crm_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply);
/**
* cam_context_handle_crm_state_change()
*
* @brief: Handle state change request
*
* @ctx: Object pointer for cam_context
* @state_info: State change request command payload
*
*/
int cam_context_handle_crm_state_change(struct cam_context *ctx,
struct cam_req_mgr_request_change_state *state_info);
/**
* cam_context_handle_crm_signal_buf_done()
*
* @brief: Handle signal buf done command
*
* @ctx: Object pointer for cam_context
* @signal_info Signal buf done request command payload
*
*/
int cam_context_handle_crm_signal_buf_done(struct cam_context *ctx,
struct cam_req_mgr_signal_info *signal_info);
/**
* cam_context_handle_crm_notify_frame_skip()
*

مشاهده پرونده

@@ -619,42 +619,6 @@ static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush)
return cam_context_handle_crm_flush_req(ctx, flush);
}
static int __cam_req_mgr_signal_buf_done(
struct cam_req_mgr_signal_info *signal_buf_done_info)
{
struct cam_context *ctx = NULL;
if (!signal_buf_done_info)
return -EINVAL;
ctx = (struct cam_context *) cam_get_device_priv(signal_buf_done_info->dev_hdl);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
signal_buf_done_info->dev_hdl);
return -EINVAL;
}
return cam_context_handle_crm_signal_buf_done(ctx, signal_buf_done_info);
}
static int __cam_node_crm_state_change_req(
struct cam_req_mgr_request_change_state *state_info)
{
struct cam_context *ctx = NULL;
if (!state_info)
return -EINVAL;
ctx = (struct cam_context *) cam_get_device_priv(state_info->dev_hdl);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
state_info->dev_hdl);
return -EINVAL;
}
return cam_context_handle_crm_state_change(ctx, state_info);
}
static int __cam_node_crm_process_evt(
struct cam_req_mgr_link_evt_data *evt_data)
{
@@ -751,8 +715,6 @@ int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
node->crm_node_intf.dump_req = __cam_node_crm_dump_req;
node->crm_node_intf.notify_frame_skip =
__cam_node_crm_notify_frame_skip;
node->crm_node_intf.change_state = __cam_node_crm_state_change_req;
node->crm_node_intf.signal_buf_done = __cam_req_mgr_signal_buf_done;
mutex_init(&node->list_mutex);
INIT_LIST_HEAD(&node->free_ctx_list);

مشاهده پرونده

@@ -549,7 +549,6 @@ static int __cam_custom_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
dev_info->dev_id = CAM_REQ_MGR_DEVICE_CUSTOM_HW;
dev_info->p_delay = 1;
dev_info->trigger = CAM_TRIGGER_POINT_SOF;
dev_info->sof_ts_cb = NULL;
return 0;
}

مشاهده پرونده

@@ -100,87 +100,6 @@ static void __cam_isp_ctx_update_event_record(
ctx_isp->event_record[event][iterator].timestamp = cur_time;
}
static int cam_isp_ctx_sync_signal_on_buf_done_ready(
struct cam_context *ctx,
struct cam_ctx_request *req,
uint32_t status, uint32_t event_cause)
{
struct cam_isp_ctx_req *req_isp;
int k = 0, rc = 0;
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
if (!req_isp->buf_done_mask)
return rc;
for (k = 0; k < req_isp->num_fence_map_out; k++) {
if (req_isp->buf_done_mask & (1 << k)) {
rc = cam_sync_signal(req_isp->fence_map_out[k].sync_id,
status, event_cause);
if (rc) {
CAM_ERR(CAM_ISP,
"ctx[%d] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d",
ctx->ctx_id, req->request_id,
req_isp->fence_map_out[k].sync_id,
status, rc);
return rc;
} else {
CAM_DBG(CAM_ISP,
"ctx[%d] : Sync signal success for Req %llu, sync_id %d status=%d",
ctx->ctx_id, req->request_id,
req_isp->fence_map_out[k].sync_id, status);
req_isp->fence_map_out[k].sync_id = -1;
}
}
}
req_isp->buf_done_mask = 0;
return rc;
}
static int cam_isp_ctx_handle_sync_signal(
struct cam_context *ctx,
struct cam_ctx_request *req, int32_t sync_index,
uint32_t status, uint32_t event_cause)
{
struct cam_isp_ctx_req *req_isp;
int32_t buf_done_ready = 0;
int rc = 0;
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
buf_done_ready = atomic_read(&req_isp->buf_done_ready);
if (buf_done_ready != INIT_BUF_DONE) {
rc = cam_isp_ctx_sync_signal_on_buf_done_ready(
ctx, req, status, event_cause);
if (rc) {
CAM_DBG(CAM_ISP,
"Sync failed with rc = %d", rc);
return rc;
}
rc = cam_sync_signal(req_isp->fence_map_out[sync_index].sync_id,
status,
event_cause);
if (rc) {
CAM_DBG(CAM_ISP,
"ctx[%d] :Sync failed Req %llu, sync_id %d status %d with rc = %d",
ctx->ctx_id, req->request_id,
req_isp->fence_map_out[sync_index].sync_id,
status, rc);
return rc;
}
CAM_DBG(CAM_ISP,
"ctx[%d] : Sync signal success for Req %llu, sync_id %d status %d",
ctx->ctx_id, req->request_id,
req_isp->fence_map_out[sync_index].sync_id,
status);
req_isp->fence_map_out[sync_index].sync_id = -1;
} else {
req_isp->buf_done_mask |= 1 << sync_index;
}
return rc;
}
static int __cam_isp_ctx_dump_event_record(
struct cam_isp_context *ctx_isp,
uintptr_t cpu_addr,
@@ -766,7 +685,6 @@ static int __cam_isp_ctx_enqueue_init_request(
req_isp_new->hw_update_data.frame_header_res_id;
req_isp_old->hw_update_data.frame_header_cpu_addr =
hw_update_data->frame_header_cpu_addr;
req_isp_old->hw_update_data.fps = req_isp_new->hw_update_data.fps;
req_old->request_id = req->request_id;
@@ -1001,9 +919,9 @@ static void __cam_isp_ctx_send_sof_timestamp(
req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
CAM_DBG(CAM_ISP,
"link hdl 0x%x request id:%lld frame number:%lld SOF time stamp:%lld status:%u ctx %d",
ctx_isp->base->link_hdl, request_id, ctx_isp->frame_id,
ctx_isp->sof_timestamp_val, sof_event_status, ctx_isp->base->ctx_id);
"request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
request_id, ctx_isp->frame_id,
ctx_isp->sof_timestamp_val, sof_event_status);
if (cam_req_mgr_notify_message(&req_msg,
V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
@@ -1074,7 +992,6 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
atomic_set(&ctx_isp->process_bubble, 0);
req_isp->cdm_reset_before_apply = false;
ctx_isp->bubble_frame_cnt = 0;
atomic_set(&req_isp->buf_done_ready, 0);
if (buf_done_req_id <= ctx->last_flush_req) {
for (i = 0; i < req_isp->num_fence_map_out; i++)
@@ -1105,11 +1022,6 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
CAM_REQ_MGR_SOF_EVENT_SUCCESS);
}
}
rc = cam_isp_ctx_sync_signal_on_buf_done_ready(
ctx, req, CAM_SYNC_STATE_SIGNALED_SUCCESS,
CAM_SYNC_COMMON_EVENT_SUCCESS);
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
req_isp->reapply = false;
@@ -1226,29 +1138,19 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
}
if (!req_isp->bubble_detected) {
if (req_isp->is_sync_mode) {
CAM_DBG(CAM_ISP,
"Hold sync signal: req %lld res 0x%x fd 0x%x, ctx %u",
req->request_id,
req_isp->fence_map_out[j].resource_handle,
req_isp->fence_map_out[j].sync_id,
ctx->ctx_id);
rc = cam_isp_ctx_handle_sync_signal(ctx, req, j,
CAM_SYNC_STATE_SIGNALED_SUCCESS,
CAM_SYNC_COMMON_EVENT_SUCCESS);
} else {
CAM_DBG(CAM_ISP,
"Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
req->request_id,
req_isp->fence_map_out[j].resource_handle,
req_isp->fence_map_out[j].sync_id,
ctx->ctx_id);
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_SUCCESS,
CAM_SYNC_COMMON_EVENT_SUCCESS);
if (rc)
CAM_DBG(CAM_ISP, "Sync failed with rc = %d", rc);
}
CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
rc);
} else if (!req_isp->bubble_report) {
CAM_DBG(CAM_ISP,
"Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
@@ -1280,8 +1182,10 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
req->request_id,
req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
if (!rc)
if (!rc) {
req_isp->num_acked++;
req_isp->fence_map_out[j].sync_id = -1;
}
if ((ctx_isp->use_frame_header_ts) &&
(req_isp->hw_update_data.frame_header_res_id ==
@@ -1348,10 +1252,7 @@ static int __cam_isp_handle_deferred_buf_done(
"ctx[%d] : Req %llu, status=%d res=0x%x should never happen",
ctx->ctx_id, req->request_id, status,
req_isp->fence_map_out[j].resource_handle);
if (req_isp->is_sync_mode) {
rc = cam_isp_ctx_handle_sync_signal(
ctx, req, j, status, event_cause);
} else {
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
status, event_cause);
if (rc) {
@@ -1364,7 +1265,6 @@ static int __cam_isp_handle_deferred_buf_done(
req_isp->num_acked++;
req_isp->fence_map_out[j].sync_id = -1;
}
}
} else {
req_isp->num_acked++;
}
@@ -1474,27 +1374,16 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
req_isp->fence_map_out[j].sync_id);
continue;
} else if (!req_isp->bubble_detected) {
if (req_isp->is_sync_mode) {
CAM_DBG(CAM_ISP,
"Hold sync signal: req %lld res 0x%x fd 0x%x, ctx %u",
req->request_id,
req_isp->fence_map_out[j].resource_handle,
req_isp->fence_map_out[j].sync_id,
ctx->ctx_id);
rc = cam_isp_ctx_handle_sync_signal(ctx, req, j,
CAM_SYNC_STATE_SIGNALED_SUCCESS,
CAM_SYNC_COMMON_EVENT_SUCCESS);
} else {
CAM_DBG(CAM_ISP,
"Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
req->request_id,
req_isp->fence_map_out[j].resource_handle,
req_isp->fence_map_out[j].sync_id,
ctx->ctx_id);
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_SUCCESS,
CAM_SYNC_COMMON_EVENT_SUCCESS);
}
if (rc) {
CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
req_isp->fence_map_out[j].sync_id, req->request_id, rc);
@@ -1505,9 +1394,6 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
CAM_SYNC_STATE_SIGNALED_SUCCESS,
CAM_SYNC_COMMON_EVENT_SUCCESS);
}
/* Reset fence */
if (!req_isp->is_sync_mode)
req_isp->fence_map_out[j].sync_id = -1;
} else if (!req_isp->bubble_report) {
CAM_DBG(CAM_ISP,
"Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
@@ -1553,7 +1439,12 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
continue;
}
CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
req->request_id,
req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
if (!rc) {
req_isp->num_acked++;
}
if ((ctx_isp->use_frame_header_ts) &&
(req_isp->hw_update_data.frame_header_res_id ==
@@ -1964,7 +1855,6 @@ static int __cam_isp_ctx_reg_upd_in_applied_state(
struct cam_context *ctx = ctx_isp->base;
struct cam_isp_ctx_req *req_isp;
uint64_t request_id = 0;
struct cam_req_mgr_notify_rup notify_rup_info;
if (list_empty(&ctx->wait_req_list)) {
CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
@@ -1972,28 +1862,16 @@ static int __cam_isp_ctx_reg_upd_in_applied_state(
}
req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
if (req_isp->num_fence_map_out != 0) {
list_add_tail(&req->list, &ctx->active_req_list);
ctx_isp->active_req_cnt++;
if (req_isp->is_sync_mode && !atomic_read(&req_isp->buf_done_ready)) {
request_id = req->request_id;
notify_rup_info.link_hdl = ctx->link_hdl;
notify_rup_info.req_id = request_id;
ctx->ctx_crm_intf->notify_rup(&notify_rup_info);
atomic_set(&req_isp->buf_done_ready, notify_rup_info.state);
}
CAM_DBG(CAM_REQ,
"move request %lld to active list(cnt = %d), state %d sync mode %d ctx %u link %x",
req->request_id, ctx_isp->active_req_cnt,
notify_rup_info.state,
req_isp->is_sync_mode,
ctx->ctx_id, ctx->link_hdl);
"move request %lld to active list(cnt = %d), ctx %u",
req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
__cam_isp_ctx_update_event_record(ctx_isp,
CAM_ISP_CTX_EVENT_RUP, req);
} else {
@@ -2128,7 +2006,6 @@ notify_only:
*/
if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
ctx_isp->active_req_cnt <= 2) {
if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF) {
notify.link_hdl = ctx->link_hdl;
notify.dev_hdl = ctx->dev_hdl;
@@ -2136,21 +2013,11 @@ notify_only:
notify.trigger = CAM_TRIGGER_POINT_SOF;
notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
notify.sof_boottime = ctx_isp->boot_timestamp;
notify.trigger_id = ctx_isp->trigger_id;
if (!list_empty(&ctx->active_req_list)) {
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
if (req_isp->hw_update_data.fps != -1)
ctx_isp->fps = req_isp->hw_update_data.fps;
}
notify.fps = ctx_isp->fps;
ctx->ctx_crm_intf->notify_trigger(&notify);
CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld ctx %u fps %d",
ctx_isp->frame_id, ctx->ctx_id, ctx_isp->fps);
CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld ctx %u",
ctx_isp->frame_id, ctx->ctx_id);
}
list_for_each_entry(req, &ctx->active_req_list, list) {
@@ -2267,9 +2134,8 @@ static int __cam_isp_ctx_sof_in_activated_state(
__cam_isp_ctx_update_state_monitor_array(ctx_isp,
CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u link %x",
ctx_isp->frame_id, ctx_isp->sof_timestamp_val,
ctx->ctx_id, ctx->link_hdl);
CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u",
ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id);
return rc;
}
@@ -2314,7 +2180,6 @@ end:
static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
void *evt_data)
{
bool rc = false;
uint64_t request_id = 0;
uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS;
struct cam_req_mgr_trigger_notify notify;
@@ -2356,9 +2221,8 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
req_isp->reapply = true;
req_isp->cdm_reset_before_apply = false;
CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d link %x Report Bubble flag %d req id:%lld",
ctx->ctx_id, ctx->link_hdl,
req_isp->bubble_report, req->request_id);
CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
ctx->ctx_id, req_isp->bubble_report, req->request_id);
if (req_isp->bubble_report && ctx->ctx_crm_intf &&
ctx->ctx_crm_intf->notify_err) {
struct cam_req_mgr_error_notify notify;
@@ -2372,25 +2236,12 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
notify.trigger = CAM_TRIGGER_POINT_SOF;
notify.frame_id = ctx_isp->frame_id;
notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
notify.sof_boottime_val = ctx_isp->boot_timestamp;
notify.need_recovery = true;
CAM_WARN_RATE_LIMIT(CAM_ISP,
"Notify CRM about Bubble req %lld frame %lld, ctx %u",
req->request_id, ctx_isp->frame_id, ctx->ctx_id);
trace_cam_log_event("Bubble", "Rcvd epoch in applied state",
req->request_id, ctx->ctx_id);
rc = ctx->ctx_crm_intf->notify_err(&notify);
if (rc) {
req_isp->bubble_detected = false;
req_isp->reapply = false;
ctx_isp->substate_activated =
CAM_ISP_CTX_ACTIVATED_APPLIED;
CAM_DBG(CAM_ISP, "Disable bubble for ctx %d link %d",
ctx->ctx_id, ctx->link_hdl);
return 0;
}
ctx->ctx_crm_intf->notify_err(&notify);
atomic_set(&ctx_isp->process_bubble, 1);
} else {
req_isp->bubble_report = 0;
@@ -2532,11 +2383,10 @@ static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
__cam_isp_ctx_substate_val_to_type(
ctx_isp->substate_activated));
CAM_DBG(CAM_ISP, "SOF in epoch ctx:%d frame_id:%lld next substate:%s time stamp:0x%llx",
CAM_DBG(CAM_ISP, "SOF in epoch ctx:%d frame_id:%lld next substate:%s",
ctx->ctx_id, ctx_isp->frame_id,
__cam_isp_ctx_substate_val_to_type(
ctx_isp->substate_activated),
ctx_isp->sof_timestamp_val);
ctx_isp->substate_activated));
return rc;
}
@@ -2566,7 +2416,6 @@ static int __cam_isp_ctx_buf_done_in_bubble(
static int __cam_isp_ctx_epoch_in_bubble_applied(
struct cam_isp_context *ctx_isp, void *evt_data)
{
int rc = 0;
uint64_t request_id = 0;
struct cam_req_mgr_trigger_notify notify;
struct cam_ctx_request *req;
@@ -2624,17 +2473,10 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
notify.trigger = CAM_TRIGGER_POINT_SOF;
notify.frame_id = ctx_isp->frame_id;
notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
notify.sof_boottime_val = ctx_isp->boot_timestamp;
notify.need_recovery = true;
CAM_WARN_RATE_LIMIT(CAM_REQ,
"Notify CRM about Bubble req_id %llu frame %lld, ctx %u",
req->request_id, ctx_isp->frame_id, ctx->ctx_id);
rc = ctx->ctx_crm_intf->notify_err(&notify);
if (rc) {
req_isp->bubble_detected = false;
req_isp->reapply = false;
return 0;
}
ctx->ctx_crm_intf->notify_err(&notify);
atomic_set(&ctx_isp->process_bubble, 1);
} else {
req_isp->bubble_report = 0;
@@ -3514,10 +3356,10 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
goto end;
}
CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u link %x",
CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u",
req->request_id,
__cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated),
ctx->ctx_id, ctx->link_hdl);
ctx->ctx_id);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
if (ctx_isp->active_req_cnt >= 2) {
@@ -3549,7 +3391,6 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
goto end;
}
req_isp->bubble_report = apply->report_if_bubble;
req_isp->is_sync_mode = apply->is_sync_mode;
cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
cfg.request_id = req->request_id;
@@ -3601,121 +3442,6 @@ end:
return rc;
}
static int __cam_isp_ctx_signal_buf_done(
struct cam_context *ctx,
struct cam_req_mgr_signal_info *signal_buf_done)
{
struct cam_ctx_request *req = NULL;
struct cam_isp_ctx_req *req_isp = NULL;
if (!list_empty(&ctx->wait_req_list)) {
req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request,
list);
if (req->request_id == signal_buf_done->req_id) {
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
atomic_set(&req_isp->buf_done_ready, signal_buf_done->state);
goto end;
}
}
if (!list_empty(&ctx->active_req_list)) {
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request,
list);
if (req->request_id == signal_buf_done->req_id) {
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
atomic_set(&req_isp->buf_done_ready, signal_buf_done->state);
goto end;
}
}
CAM_WARN(CAM_ISP, "Request %lld not found in ctx : %d",
signal_buf_done->req_id,
ctx->ctx_id);
end:
return 0;
}
static int __cam_isp_ctx_change_substate(
struct cam_context *ctx,
struct cam_req_mgr_request_change_state *state_info)
{
int rc = 0;
uint64_t request_id = 0;
struct cam_ctx_request *req = NULL;
struct cam_ctx_request *req_temp = NULL;
struct cam_ctx_request *bubble_req = NULL;
struct cam_isp_ctx_req *req_isp = NULL;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
spin_lock_bh(&ctx->lock);
if (!list_empty(&ctx->wait_req_list)) {
req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request,
list);
if (req->request_id == state_info->req_id) {
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
req_isp->bubble_detected = true;
req_isp->reapply = true;
bubble_req = req;
ctx_isp->active_req_cnt++;
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->active_req_list);
spin_unlock_bh(&ctx->lock);
goto end;
}
} else {
CAM_ERR(CAM_ISP, "Ctx:%d No wait request", ctx->ctx_id);
}
spin_unlock_bh(&ctx->lock);
if (!bubble_req) {
list_for_each_entry_safe(req, req_temp,
&ctx->active_req_list, list) {
if (req->request_id == state_info->req_id) {
req_isp =
(struct cam_isp_ctx_req *)req->req_priv;
req_isp->bubble_detected = true;
req_isp->reapply = true;
bubble_req = req;
break;
}
}
}
if (!bubble_req) {
CAM_ERR(CAM_ISP, "Req %lld not in active list ctx : %d",
state_info->req_id,
ctx->ctx_id);
goto done;
}
end:
if (req_isp->bubble_report)
atomic_set(&ctx_isp->process_bubble, 1);
if ((req->request_id > ctx_isp->reported_req_id)
&& !req_isp->bubble_report) {
request_id = req->request_id;
ctx_isp->reported_req_id = request_id;
}
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_ERROR);
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
CAM_DBG(CAM_ISP, "next Substate[%s] ctx : %d",
__cam_isp_ctx_substate_val_to_type(
ctx_isp->substate_activated), ctx->ctx_id);
done:
return rc;
}
static int __cam_isp_ctx_apply_req_in_sof(
struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
{
@@ -3776,8 +3502,8 @@ static int __cam_isp_ctx_apply_req_in_bubble(
ctx_isp->substate_activated));
rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
CAM_DBG(CAM_ISP, "ctx %d link %x new Substate[%s]",
ctx->ctx_id, ctx->link_hdl, __cam_isp_ctx_substate_val_to_type(
CAM_DBG(CAM_ISP, "new Substate[%s]",
__cam_isp_ctx_substate_val_to_type(
ctx_isp->substate_activated));
if (rc)
@@ -4167,7 +3893,6 @@ static int __cam_isp_ctx_flush_req_in_top_state(
flush_req);
ctx_isp->active_req_cnt = 0;
spin_unlock_bh(&ctx->lock);
reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
@@ -4217,18 +3942,13 @@ static struct cam_ctx_ops
.apply_req = __cam_isp_ctx_apply_req_in_sof,
.notify_frame_skip =
__cam_isp_ctx_apply_default_req_settings,
.change_state = __cam_isp_ctx_change_substate,
.signal_buf_done = __cam_isp_ctx_signal_buf_done,
},
.irq_ops = NULL,
},
/* APPLIED */
{
.ioctl_ops = {},
.crm_ops = {
.change_state = __cam_isp_ctx_change_substate,
.signal_buf_done = __cam_isp_ctx_signal_buf_done,
},
.crm_ops = {},
.irq_ops = NULL,
},
/* EPOCH */
@@ -4238,8 +3958,6 @@ static struct cam_ctx_ops
.apply_req = __cam_isp_ctx_apply_req_in_epoch,
.notify_frame_skip =
__cam_isp_ctx_apply_default_req_settings,
.change_state = __cam_isp_ctx_change_substate,
.signal_buf_done = __cam_isp_ctx_signal_buf_done,
},
.irq_ops = NULL,
},
@@ -4250,17 +3968,13 @@ static struct cam_ctx_ops
.apply_req = __cam_isp_ctx_apply_req_in_bubble,
.notify_frame_skip =
__cam_isp_ctx_apply_default_req_settings,
.change_state = __cam_isp_ctx_change_substate,
},
.irq_ops = NULL,
},
/* Bubble Applied */
{
.ioctl_ops = {},
.crm_ops = {
.change_state = __cam_isp_ctx_change_substate,
.signal_buf_done = __cam_isp_ctx_signal_buf_done,
},
.crm_ops = {},
.irq_ops = NULL,
},
/* HW ERROR */
@@ -5171,9 +4885,6 @@ static int __cam_isp_ctx_config_dev_in_top_state(
cfg.num_out_map_entries = 0;
cfg.num_in_map_entries = 0;
memset(&req_isp->hw_update_data, 0, sizeof(req_isp->hw_update_data));
req_isp->hw_update_data.fps = -1;
req_isp->is_sync_mode = false;
req_isp->hw_update_data.packet = packet;
rc = ctx->hw_mgr_intf->hw_prepare_update(
ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
@@ -5190,7 +4901,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
req_isp->num_deferred_acks = 0;
req_isp->bubble_detected = false;
req_isp->cdm_reset_before_apply = false;
atomic_set(&req_isp->buf_done_ready, INIT_BUF_DONE);
req_isp->hw_update_data.packet = packet;
for (i = 0; i < req_isp->num_fence_map_out; i++) {
rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
@@ -6002,47 +5713,6 @@ static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
return rc;
}
static int __cam_isp_ctx_get_isp_info(int32_t dev_hdl, void *data)
{
int rc = 0;
struct cam_context *ctx;
struct cam_req_mgr_dev_info *isp_dev = data;
struct cam_isp_context *isp_ctx = NULL;
ctx = (struct cam_context *)cam_get_device_priv(dev_hdl);
if (!ctx) {
CAM_ERR(CAM_ISP, "Cannot get context for handle %d", dev_hdl);
return -EINVAL;
}
isp_ctx = (struct cam_isp_context *)ctx->ctx_priv;
isp_dev->state = isp_ctx->substate_activated;
isp_dev->timestamp = isp_ctx->sof_timestamp_val;
isp_dev->boot_time = isp_ctx->boot_timestamp;
isp_dev->frame_id = isp_ctx->frame_id;
if ((isp_ctx->substate_activated ==
CAM_ISP_CTX_ACTIVATED_APPLIED) ||
(isp_ctx->substate_activated ==
CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) {
if (!list_empty(&ctx->wait_req_list)) {
struct cam_ctx_request *req;
req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request, list);
isp_dev->bubble_req = req->request_id;
}
isp_dev->is_applied = true;
} else {
isp_dev->is_applied = false;
isp_dev->bubble_req = 0;
}
return rc;
}
static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
struct cam_req_mgr_device_info *dev_info)
{
@@ -6054,7 +5724,6 @@ static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
dev_info->p_delay = 1;
dev_info->trigger = CAM_TRIGGER_POINT_SOF;
dev_info->trigger_on = true;
dev_info->sof_ts_cb = &__cam_isp_ctx_get_isp_info;
return rc;
}
@@ -6112,12 +5781,9 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
atomic_set(&ctx_isp->process_bubble, 0);
atomic_set(&ctx_isp->rxd_epoch, 0);
ctx_isp->frame_id = 0;
ctx_isp->sof_timestamp_val = 0;
ctx_isp->boot_timestamp = 0;
ctx_isp->active_req_cnt = 0;
ctx_isp->reported_req_id = 0;
ctx_isp->bubble_frame_cnt = 0;
ctx_isp->fps = req_isp->hw_update_data.fps;
ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
CAM_ISP_CTX_ACTIVATED_APPLIED :
(req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
@@ -6299,7 +5965,6 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
ctx_isp->last_applied_req_id = 0;
ctx_isp->req_info.last_bufdone_req_id = 0;
ctx_isp->bubble_frame_cnt = 0;
ctx_isp->fps = -1;
atomic_set(&ctx_isp->process_bubble, 0);
atomic_set(&ctx_isp->rxd_epoch, 0);
atomic64_set(&ctx_isp->state_monitor_head, -1);
@@ -6484,60 +6149,6 @@ static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
return rc;
}
static int __cam_isp_ctx_signal_buf_done_req(struct cam_context *ctx,
struct cam_req_mgr_signal_info *signal_buf_done)
{
int rc = 0;
struct cam_ctx_ops *ctx_ops = NULL;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
CAM_DBG(CAM_ISP, "Enter: signal buf done ctx id %d link 0x%x",
ctx->ctx_id, ctx->link_hdl);
ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
if (ctx_ops->crm_ops.signal_buf_done) {
rc = ctx_ops->crm_ops.signal_buf_done(ctx, signal_buf_done);
} else {
CAM_WARN_RATE_LIMIT(CAM_ISP,
"No handle function in activated Substate[%s]",
__cam_isp_ctx_substate_val_to_type(
ctx_isp->substate_activated));
rc = -EFAULT;
}
if (rc)
CAM_WARN_RATE_LIMIT(CAM_ISP,
"signal buf done failed");
return rc;
}
static int __cam_isp_ctx_change_state_req(struct cam_context *ctx,
struct cam_req_mgr_request_change_state *state_info)
{
int rc = 0;
struct cam_ctx_ops *ctx_ops = NULL;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
CAM_DBG(CAM_ISP, "Enter: changes state ctx id %d link 0x%x",
ctx->ctx_id, ctx->link_hdl);
ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
if (ctx_ops->crm_ops.change_state) {
rc = ctx_ops->crm_ops.change_state(ctx, state_info);
} else {
CAM_WARN_RATE_LIMIT(CAM_ISP,
"No handle function in activated Substate[%s]",
__cam_isp_ctx_substate_val_to_type(
ctx_isp->substate_activated));
rc = -EFAULT;
}
if (rc)
CAM_WARN_RATE_LIMIT(CAM_ISP,
"changes state failed");
return rc;
}
static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply)
{
@@ -6750,8 +6361,6 @@ static struct cam_ctx_ops
.flush_req = __cam_isp_ctx_flush_req_in_top_state,
.process_evt = __cam_isp_ctx_process_evt,
.dump_req = __cam_isp_ctx_dump_in_top_state,
.change_state = __cam_isp_ctx_change_state_req,
.signal_buf_done = __cam_isp_ctx_signal_buf_done_req,
},
.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
.pagefault_ops = cam_isp_context_dump_requests,
@@ -6951,8 +6560,6 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
ctx->base = ctx_base;
ctx->frame_id = 0;
ctx->sof_timestamp_val = 0;
ctx->boot_timestamp = 0;
ctx->custom_enabled = false;
ctx->use_frame_header_ts = false;
ctx->use_default_apply = false;

مشاهده پرونده

@@ -157,9 +157,7 @@ struct cam_isp_ctx_irq_ops {
* @reapply: True if reapplying after bubble
* @cdm_reset_before_apply: For bubble re-apply when buf done not coming set
* to True
* @buf_done_ready Flag to check if ready to signal buf done when in sync mode
* @buf_done_mask Mask used to check number of buf done which is yet to be signaled
* @is_sync_mode If request need to be apply in sync with other link
*
*/
struct cam_isp_ctx_req {
struct cam_ctx_request *base;
@@ -179,9 +177,6 @@ struct cam_isp_ctx_req {
bool bubble_detected;
bool reapply;
bool cdm_reset_before_apply;
atomic_t buf_done_ready;
int32_t buf_done_mask;
bool is_sync_mode;
};
/**
@@ -279,7 +274,6 @@ struct cam_isp_context_event_record {
* @workq: Worker thread for offline ife
* @trigger_id: ID provided by CRM for each ctx on the link
* @last_bufdone_err_apply_req_id: last bufdone error apply request id
* @fps: Current FPS for the activated state.
*
*/
struct cam_isp_context {
@@ -329,7 +323,6 @@ struct cam_isp_context {
struct cam_req_mgr_core_workq *workq;
int32_t trigger_id;
int64_t last_bufdone_err_apply_req_id;
uint32_t fps;
};
/**

مشاهده پرونده

@@ -9008,6 +9008,7 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG:
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_EXP_ORDER_CFG:
case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG:
break;
case CAM_ISP_GENERIC_BLOB_TYPE_DYNAMIC_MODE_SWITCH: {
struct cam_isp_mode_switch_info *mup_config;
@@ -9051,28 +9052,6 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
if (rc)
CAM_ERR(CAM_ISP,
"BW limit update failed for IFE rc: %d", rc);
}
break;
case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG: {
struct cam_fps_config *fps_config;
struct cam_isp_prepare_hw_update_data *prepare_hw_data;
if (blob_size < sizeof(struct cam_fps_config)) {
CAM_ERR(CAM_ISP, "Invalid fps blob size %u expected %u",
blob_size, sizeof(struct cam_fps_config));
return -EINVAL;
}
fps_config = (struct cam_fps_config *)blob_data;
prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
prepare->priv;
if (fps_config->fps)
prepare_hw_data->fps = fps_config->fps;
CAM_DBG(CAM_ISP, "FPS value %u ctx %d req id %lld",
fps_config->fps, ife_mgr_ctx->ctx_index,
prepare_hw_data->packet->header.request_id);
}
break;
case CAM_ISP_GENERIC_BLOB_TYPE_INIT_CONFIG: {
@@ -9568,8 +9547,8 @@ static int cam_sfe_packet_generic_blob_handler(void *user_data,
case CAM_ISP_GENERIC_BLOB_TYPE_CSID_QCFA_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_SENSOR_BLANKING_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_DISCARD_INITIAL_FRAMES:
case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_INIT_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG:
break;
default:
CAM_WARN(CAM_ISP, "Invalid blob type: %u", blob_type);

مشاهده پرونده

@@ -194,7 +194,6 @@ struct cam_isp_bw_clk_config_info {
* @num_reg_dump_buf: Count of descriptors in reg_dump_buf_desc
* @packet CSL packet from user mode driver
* @mup_en Flag if dynamic sensor switch is enabled
* @fps: Fps vaue associated with this packet/request
*
*/
struct cam_isp_prepare_hw_update_data {
@@ -209,7 +208,6 @@ struct cam_isp_prepare_hw_update_data {
uint32_t num_reg_dump_buf;
struct cam_packet *packet;
bool mup_en;
int32_t fps;
};
@@ -324,7 +322,6 @@ enum cam_isp_ctx_type {
* @ctx_type: RDI_ONLY, PIX and RDI, or FS2
* @packet_op_code: Packet opcode
* @last_cdm_done: Last cdm done request
* @cam_isp_hw_sof_event_data sof event timestamp
*/
struct cam_isp_hw_cmd_args {
uint32_t cmd_type;
@@ -334,7 +331,6 @@ struct cam_isp_hw_cmd_args {
uint32_t ctx_type;
uint32_t packet_op_code;
uint64_t last_cdm_done;
struct cam_isp_hw_sof_event_data sof_done_event_data;
} u;
};

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است Diff را بارگزاری کن

مشاهده پرونده

@@ -43,10 +43,6 @@
#define VERSION_2 2
#define CAM_REQ_MGR_MAX_TRIGGERS 2
#define CAM_REQ_MGR_INIT_SYNC_REQ_NUM 5
#define CAM_REQ_MGR_SYNC_MISMATCH_THRESHOLD 3
/**
* enum crm_req_eof_trigger_type
* @codes: to identify which type of eof trigger for next slot
@@ -268,7 +264,7 @@ struct cam_req_mgr_req_tbl {
* - members updated due to external events
* @recover : if user enabled recovery for this request.
* @req_id : mask tracking which all devices have request ready
* @sync_mode : Modified sync mode in which req id in this slot has to applied
* @sync_mode : Sync mode in which req id in this slot has to applied
* @additional_timeout : Adjusted watchdog timeout value associated with
* this request
*/
@@ -337,50 +333,6 @@ struct cam_req_mgr_connected_device {
void *parent;
};
/**
* struct cam_req_mgr_sof_time
* - Frame sof time in ns
* @csid_timestamp_ns : CSID SOF timestamp value
* @prev_csid_timestamp_ns : Previous CSID SOF timestamp value
* @boottime_ns : SOF Boottime value
* @last_sof_trigger_jiffies : Record the jiffies of last sof trigger jiffies
*/
struct cam_req_mgr_sof_time {
uint64_t boottime_ns;
uint64_t csid_timestamp_ns;
uint64_t prev_csid_timestamp_ns;
uint64_t last_sof_trigger_jiffies;
};
/**
* struct cam_req_mgr_sync_data
* - Sync link data and properties
* @num_sync_link : total number of sync links
* @sync_link : array of pointer to the sync link for synchronization
* @initial_sync_req : The initial req which is required to sync with the
* other link
* @modified_init_sync_req : Modified initial req which is required to sync
* with the other link
* @sync_link_sof_skip : flag determines if a pkt is not available for a given
* frame in a particular link skip corresponding
* frame in sync link as well.
* @sync_frame_id : current frame id of sync link
* @sof_time : sof timing value in different format
* @is_sync_req : flag used for deciding sync and non-sync
* @sync_mismatch_count : counter to store number of frame sync mismatch
*/
struct cam_req_mgr_sync_data {
int32_t num_sync_link;
struct cam_req_mgr_core_link *sync_link[MAXIMUM_LINKS_PER_SESSION];
int64_t initial_sync_req;
int64_t modified_init_sync_req;
bool sync_link_sof_skip;
uint64_t sync_frame_id;
struct cam_req_mgr_sof_time sof_time;
bool is_sync_req;
int32_t sync_mismatch_count;
};
/**
* struct cam_req_mgr_core_link
* - Link Properties
@@ -404,33 +356,36 @@ struct cam_req_mgr_sync_data {
* @parent : pvt data - link's parent is session
* @lock : mutex lock to guard link data operations
* @link_state_spin_lock : spin lock to protect link state variable
* @sync_data : sync data datails needed in sync mode
* @sync_link : array of pointer to the sync link for synchronization
* @num_sync_links : num of links sync associated with this link
* @sync_link_sof_skip : flag determines if a pkt is not available for a given
* frame in a particular link skip corresponding
* frame in sync link as well.
* @open_req_cnt : Counter to keep track of open requests that are yet
* to be serviced in the kernel.
* @last_flush_id : Last request to flush
* @is_used : 1 if link is in use else 0
* @is_master : Based on pd among links, the link with the highest pd
* is assigned as master
* @initial_skip : Flag to determine if initial req need to skip for
* diff pd
* @initial_skip : Flag to determine if slave has started streaming in
* master-slave sync
* @in_msync_mode : Flag to determine if a link is in master-slave mode
* @initial_sync_req : The initial req which is required to sync with the
* other link
* @retry_cnt : Counter that tracks number of attempts to apply
* the same req
* @is_shutdown : Flag to indicate if link needs to be disconnected
* as part of shutdown.
* @sof_timestamp_value : SOF timestamp value
* @prev_sof_timestamp : Previous SOF timestamp value
* @dual_trigger : Links needs to wait for two triggers prior to
* applying the settings
* @trigger_cnt : trigger count value per device initiating the trigger
* @eof_event_cnt : Atomic variable to track the number of EOF requests
* @skip_init_frame : skip initial frames crm_wd_timer validation in the
* case of long exposure use case
* @last_sof_trigger_jiffies : Record the jiffies of last sof trigger jiffies
* @wq_congestion : Indicates if WQ congestion is detected or not
* @activate_seq : sequence in which link is activated
* @frame_id : current frame id
* @skip_apply_count : Counter that track number of frames to skip apply request
* @num_isp_dev : number of isp dev in a link
* @retry_threshold : number of times to retry apply on increased threshold
* @fps : current frame rate
*/
struct cam_req_mgr_core_link {
int32_t link_hdl;
@@ -447,28 +402,28 @@ struct cam_req_mgr_core_link {
void *parent;
struct mutex lock;
spinlock_t link_state_spin_lock;
struct cam_req_mgr_sync_data sync_data;
struct cam_req_mgr_core_link
*sync_link[MAXIMUM_LINKS_PER_SESSION - 1];
int32_t num_sync_links;
bool sync_link_sof_skip;
uint32_t open_req_cnt;
uint32_t last_flush_id;
atomic_t is_used;
bool is_master;
uint32_t initial_skip;
bool initial_skip;
bool in_msync_mode;
int64_t initial_sync_req;
uint32_t retry_cnt;
bool is_shutdown;
uint64_t sof_timestamp;
uint64_t prev_sof_timestamp;
bool dual_trigger;
uint32_t trigger_cnt[CAM_REQ_MGR_MAX_TRIGGERS]
[CAM_TRIGGER_MAX_POINTS + 1];
atomic_t eof_event_cnt;
bool skip_init_frame;
uint64_t last_sof_trigger_jiffies;
bool wq_congestion;
int32_t activate_seq;
uint64_t frame_id;
int32_t skip_apply_count;
bool skip_sync_apply;
uint32_t num_isp_dev;
uint32_t retry_threshold;
int32_t fps;
};
/**
@@ -502,33 +457,11 @@ struct cam_req_mgr_core_session {
* @session_head : list head holding sessions
* @crm_lock : mutex lock to protect session creation & destruction
* @recovery_on_apply_fail : Recovery on apply failure using debugfs.
* @bitmap : bitmap to store index of link
* @max_delay : max pipeline delay in a session
*/
struct cam_req_mgr_core_device {
struct list_head session_head;
struct mutex crm_lock;
bool recovery_on_apply_fail;
DECLARE_BITMAP(bitmap, MAXIMUM_LINKS_PER_SESSION);
uint32_t max_delay;
};
/**
* struct cam_req_mgr_dump_link_data
* - Dump data
* @m_link : master link handle
* @s_link : slave link handle
* @m_req_id : master req id
* @s_req_id : slave req id
* @dev_info : current timing data of slave link
*
*/
struct cam_req_mgr_dump_link_data {
struct cam_req_mgr_core_link *m_link;
struct cam_req_mgr_core_link *s_link;
uint64_t m_req_id;
uint64_t s_req_id;
struct cam_req_mgr_dev_info dev_data;
};
/**
@@ -575,7 +508,6 @@ struct cam_req_mgr_req_data_mini_dump {
* struct cam_req_mgr_core_link_mini_dump
* @workq : Work q information
* @req : req data holder.
* @sof_timestamp : SOF timestamp value
* @initial_sync_req : The initial req which is required to sync with the
* @prev_sof_timestamp : Previous SOF timestamp value
* @last_flush_id : Last request to flush
@@ -604,7 +536,6 @@ struct cam_req_mgr_req_data_mini_dump {
struct cam_req_mgr_core_link_mini_dump {
struct cam_req_mgr_core_workq_mini_dump workq;
struct cam_req_mgr_req_data_mini_dump req;
struct cam_req_mgr_sof_time sof_time;
int64_t initial_sync_req;
uint32_t last_flush_id;
uint32_t is_used;

مشاهده پرونده

@@ -12,7 +12,6 @@
#include "cam_req_mgr_util.h"
struct cam_req_mgr_trigger_notify;
struct cam_req_mgr_notify_rup;
struct cam_req_mgr_error_notify;
struct cam_req_mgr_add_request;
struct cam_req_mgr_timer_notify;
@@ -23,23 +22,19 @@ struct cam_req_mgr_apply_request;
struct cam_req_mgr_flush_request;
struct cam_req_mgr_link_evt_data;
struct cam_req_mgr_dump_info;
struct cam_req_mgr_request_change_state;
struct cam_req_mgr_signal_info;
/* Request Manager -- camera device driver interface */
/**
* @brief: camera kernel drivers to cam req mgr communication
*
* @cam_req_mgr_notify_trigger: for device which generates trigger to inform CRM
* @cam_req_mgr_notify_rup : for device which generates reg update trigger to inform CRM
* @cam_req_mgr_notify_err : device use this to inform about different errors
* @cam_req_mgr_add_req : to info CRM about new rqeuest received from
* userspace
* @cam_req_mgr_notify_timer : start the timer
*/
typedef int (*cam_req_mgr_notify_trigger)(struct cam_req_mgr_trigger_notify *);
typedef int (*cam_req_mgr_notify_rup)(struct cam_req_mgr_notify_rup *);
typedef bool (*cam_req_mgr_notify_err)(struct cam_req_mgr_error_notify *);
typedef int (*cam_req_mgr_notify_err)(struct cam_req_mgr_error_notify *);
typedef int (*cam_req_mgr_add_req)(struct cam_req_mgr_add_request *);
typedef int (*cam_req_mgr_notify_timer)(struct cam_req_mgr_timer_notify *);
typedef int (*cam_req_mgr_notify_stop)(struct cam_req_mgr_notify_stop *);
@@ -53,9 +48,8 @@ typedef int (*cam_req_mgr_notify_stop)(struct cam_req_mgr_notify_stop *);
* @cam_req_mgr_notify_frame_skip: CRM asks device to apply setting for
* frame skip
* @cam_req_mgr_flush_req : Flush or cancel request
* @cam_req_mgr_process_evt : generic events
* cam_req_mgr_process_evt : generic events
* @cam_req_mgr_dump_req : dump request
* @cam_req_mgr_change_state : CRM asks device to change its state
*/
typedef int (*cam_req_mgr_get_dev_info) (struct cam_req_mgr_device_info *);
typedef int (*cam_req_mgr_link_setup)(struct cam_req_mgr_core_dev_link_setup *);
@@ -65,8 +59,6 @@ typedef int (*cam_req_mgr_notify_frame_skip)(
typedef int (*cam_req_mgr_flush_req)(struct cam_req_mgr_flush_request *);
typedef int (*cam_req_mgr_process_evt)(struct cam_req_mgr_link_evt_data *);
typedef int (*cam_req_mgr_dump_req)(struct cam_req_mgr_dump_info *);
typedef int (*cam_req_mgr_change_state)(struct cam_req_mgr_request_change_state *);
typedef int (*cam_req_mgr_signal_buf_done)(struct cam_req_mgr_signal_info *);
/**
* @brief : cam_req_mgr_crm_cb - func table
@@ -76,7 +68,6 @@ typedef int (*cam_req_mgr_signal_buf_done)(struct cam_req_mgr_signal_info *);
* @add_req : payload to inform which device and what request is received
* @notify_timer : payload for timer start event
* @notify_stop : payload to inform stop event
* @notify_rup : payload to inform reg update ack
*/
struct cam_req_mgr_crm_cb {
cam_req_mgr_notify_trigger notify_trigger;
@@ -84,7 +75,6 @@ struct cam_req_mgr_crm_cb {
cam_req_mgr_add_req add_req;
cam_req_mgr_notify_timer notify_timer;
cam_req_mgr_notify_stop notify_stop;
cam_req_mgr_notify_rup notify_rup;
};
/**
@@ -97,7 +87,6 @@ struct cam_req_mgr_crm_cb {
* @flush_req : payload to flush request
* @process_evt : payload to generic event
* @dump_req : payload to dump request
* @change_state : payload to change state
*/
struct cam_req_mgr_kmd_ops {
cam_req_mgr_get_dev_info get_dev_info;
@@ -107,8 +96,6 @@ struct cam_req_mgr_kmd_ops {
cam_req_mgr_flush_req flush_req;
cam_req_mgr_process_evt process_evt;
cam_req_mgr_dump_req dump_req;
cam_req_mgr_change_state change_state;
cam_req_mgr_signal_buf_done signal_buf_done;
};
/**
@@ -227,17 +214,6 @@ enum cam_req_mgr_link_evt_type {
CAM_REQ_MGR_LINK_EVT_MAX,
};
/**
* enum cam_req_mgr_buf_done_state
* @INIT_BUF_DONE : Initial buf done state of a request
* @SIGNAL_SYNC_BUF_DONE : Ready to signal buf done of a request if in sync mode
* @SIGNAL_NON_SYNC_BUF_DONE : Ready to signal buf done of a request if in non sync mode
*/
enum cam_req_mgr_buf_done_state {
INIT_BUF_DONE,
SIGNAL_SYNC_BUF_DONE,
};
/**
* struct cam_req_mgr_trigger_notify
* @link_hdl : link identifier
@@ -246,10 +222,8 @@ enum cam_req_mgr_buf_done_state {
* @trigger : trigger point of this notification, CRM will send apply
* only to the devices which subscribe to this point.
* @sof_timestamp_val: Captured time stamp value at sof hw event
* @sof_boottime : Captured boot time stamp value at sof hw event
* @req_id : req id which returned buf_done
* @trigger_id: ID to differentiate between the trigger devices
* @fps : Current fps value
*/
struct cam_req_mgr_trigger_notify {
int32_t link_hdl;
@@ -257,22 +231,8 @@ struct cam_req_mgr_trigger_notify {
int64_t frame_id;
uint32_t trigger;
uint64_t sof_timestamp_val;
uint64_t sof_boottime;
uint64_t req_id;
int32_t trigger_id;
int32_t fps;
};
/**
* struct cam_req_mgr_notify_rup
* @link_hdl : link identifier
* @req_id : req id which returned reg update ack
* @state : buf done ready state of the request
*/
struct cam_req_mgr_notify_rup {
int32_t link_hdl;
uint64_t req_id;
int32_t state;
};
/**
@@ -295,8 +255,6 @@ struct cam_req_mgr_timer_notify {
* @frame_id : frame id for internal tracking
* @trigger : trigger point of this notification, CRM will send apply
* @sof_timestamp_val : Captured time stamp value at sof hw event
* @sof_boottime_val : Captured boottime stamp value at sof hw event
* @need_recovery : flag to check if recovery is needed
* @error : what error device hit while processing this req
*/
struct cam_req_mgr_error_notify {
@@ -306,8 +264,6 @@ struct cam_req_mgr_error_notify {
int64_t frame_id;
uint32_t trigger;
uint64_t sof_timestamp_val;
uint64_t sof_boottime_val;
bool need_recovery;
enum cam_req_mgr_device_error error;
};
@@ -352,7 +308,6 @@ struct cam_req_mgr_notify_stop {
* @p_delay : delay between time settings applied and take effect
* @trigger : Trigger point for the client
* @trigger_on : This device provides trigger
* @sof_ts_cb : callback to real time drivers
*/
struct cam_req_mgr_device_info {
int32_t dev_hdl;
@@ -361,7 +316,6 @@ struct cam_req_mgr_device_info {
enum cam_pipeline_delay p_delay;
uint32_t trigger;
bool trigger_on;
int32_t (*sof_ts_cb)(int32_t dev_hdl, void *data);
};
/**
@@ -390,7 +344,6 @@ struct cam_req_mgr_core_dev_link_setup {
* @report_if_bubble : report to crm if failure in applying
* @trigger_point : the trigger point of this apply
* @re_apply : to skip re_apply for buf_done request
* @is_sync_mode : if request need to be apply in sync with other link
*
*/
struct cam_req_mgr_apply_request {
@@ -400,7 +353,6 @@ struct cam_req_mgr_apply_request {
int32_t report_if_bubble;
uint32_t trigger_point;
bool re_apply;
bool is_sync_mode;
};
/**
@@ -462,54 +414,4 @@ struct cam_req_mgr_dump_info {
int32_t link_hdl;
int32_t dev_hdl;
};
/**
* struct cam_req_mgr_dev_info
* @link_hdl : link identifier
* @state : Current substate for the activated state.
* @timestamp : time stamp for the sof event
* @boot_time : boot time stamp for the sof event
* @frame_id : frame id
* @bubble_req : request id for which bubble is detected
* @is_applied : if ISP is in applied state
*
*/
struct cam_req_mgr_dev_info {
int32_t link_hdl;
uint32_t state;
uint64_t timestamp;
uint64_t boot_time;
uint64_t frame_id;
uint64_t bubble_req;
bool is_applied;
};
/**
* struct cam_req_mgr_request_change_state
* @link_hdl : link identifier
* @dev_hdl : device handle or identifier
* @req_id : request id
*
*/
struct cam_req_mgr_request_change_state {
int32_t link_hdl;
int32_t dev_hdl;
uint64_t req_id;
};
/**
* struct cam_req_mgr_signal_info
* @link_hdl : link identifier
* @dev_hdl : device handle or identifier
* req_id : request id to be set for buf done ready
* state : Buf done ready state of a request
*
*/
struct cam_req_mgr_signal_info {
int32_t link_hdl;
int32_t dev_hdl;
uint64_t req_id;
int32_t state;
};
#endif

مشاهده پرونده

@@ -410,7 +410,6 @@ int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info)
strlcpy(info->name, CAM_ACTUATOR_NAME, sizeof(info->name));
info->p_delay = 1;
info->trigger = CAM_TRIGGER_POINT_SOF;
info->sof_ts_cb = NULL;
return 0;
}

مشاهده پرونده

@@ -1781,8 +1781,6 @@ int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info)
strlcpy(info->name, CAM_FLASH_NAME, sizeof(info->name));
info->p_delay = CAM_FLASH_PIPELINE_DELAY;
info->trigger = CAM_TRIGGER_POINT_SOF;
info->sof_ts_cb = NULL;
return 0;
}

مشاهده پرونده

@@ -1258,7 +1258,6 @@ int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info)
else
info->p_delay = 2;
info->trigger = CAM_TRIGGER_POINT_SOF;
info->sof_ts_cb = NULL;
return rc;
}

مشاهده پرونده

@@ -836,18 +836,6 @@ struct cam_isp_acquire_hw_info {
__u64 data;
};
/**
* struct cam_fps_config - FPS info per request
*
* @fps : Fps value
* @reserved: : Reserved field for alignment
*
*/
struct cam_fps_config {
__u32 fps;
__u32 reserved_params[3];
} __attribute__((packed));
/**
* struct cam_isp_vfe_wm_config - VFE write master config per port
*

مشاهده پرونده

@@ -84,13 +84,9 @@
* @CAM_REQ_MGR_SYNC_MODE_NO_SYNC: Req mgr will apply non-sync mode for this
* request.
* @CAM_REQ_MGR_SYNC_MODE_SYNC: Req mgr will apply sync mode for this request.
* @CAM_REQ_MGR_SYNC_MODE_INITIAL_SYNC: Req mgr will apply initial sync
* mode for this request. Only first few request before sync mode will apply
* transition sync mode.
*/
#define CAM_REQ_MGR_SYNC_MODE_NO_SYNC 0
#define CAM_REQ_MGR_SYNC_MODE_SYNC 1
#define CAM_REQ_MGR_SYNC_MODE_INITIAL_SYNC 2
/**
* struct cam_req_mgr_event_data