msm: camera: isp: Add debug info in ISP state machine

Monitor frame events driving the state machine.
To better understand what led to the state machine stalling update
debug info that is logged on errors.

CRs-Fixed: 3309151
Change-Id: Iccf0efd82069b342e5d4b1731292604d37263b0b
Signed-off-by: Karthik Anantha Ram <quic_kartanan@quicinc.com>
This commit is contained in:
Karthik Anantha Ram
2021-10-21 21:08:33 -07:00
committed by Camera Software Integration
parent ba3f945f46
commit 81a980cf07
6 changed files with 300 additions and 97 deletions

View File

@@ -102,18 +102,18 @@ static void __cam_isp_ctx_update_event_record(
break;
}
INC_HEAD(&ctx_isp->event_record_head[event],
INC_HEAD(&ctx_isp->dbg_monitors.event_record_head[event],
CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, &iterator);
cur_time = ktime_get();
if (req) {
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
ctx_isp->event_record[event][iterator].req_id =
ctx_isp->dbg_monitors.event_record[event][iterator].req_id =
req->request_id;
req_isp->event_timestamp[event] = cur_time;
} else {
ctx_isp->event_record[event][iterator].req_id = 0;
ctx_isp->dbg_monitors.event_record[event][iterator].req_id = 0;
}
ctx_isp->event_record[event][iterator].timestamp = cur_time;
ctx_isp->dbg_monitors.event_record[event][iterator].timestamp = cur_time;
}
static int __cam_isp_ctx_handle_sof_freeze_evt(
@@ -169,7 +169,7 @@ static int __cam_isp_ctx_print_event_record(struct cam_isp_context *ctx_isp)
struct cam_context *ctx = ctx_isp->base;
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
state_head = atomic64_read(&ctx_isp->event_record_head[i]);
state_head = atomic64_read(&ctx_isp->dbg_monitors.event_record_head[i]);
if (state_head == -1) {
return 0;
@@ -188,7 +188,7 @@ static int __cam_isp_ctx_print_event_record(struct cam_isp_context *ctx_isp)
memset(buf, 0, CAM_ISP_CONTEXT_DBG_BUF_LEN);
for (j = 0; j < num_entries; j++) {
record = &ctx_isp->event_record[i][index];
record = &ctx_isp->dbg_monitors.event_record[i][index];
ts = ktime_to_timespec64(record->timestamp);
len += scnprintf(buf + len, CAM_ISP_CONTEXT_DBG_BUF_LEN - len,
"%llu[%lld:%06lld] ", record->req_id, ts.tv_sec,
@@ -222,19 +222,19 @@ static int __cam_isp_ctx_dump_event_record(
}
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
state_head = atomic64_read(&ctx_isp->event_record_head[i]);
state_head = atomic64_read(&ctx_isp->dbg_monitors.event_record_head[i]);
if (state_head == -1) {
if (state_head == -1)
return 0;
} else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) {
else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) {
num_entries = state_head + 1;
oldest_entry = 0;
} else {
num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
div_u64_rem(state_head + 1,
CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES,
div_u64_rem(state_head + 1, CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES,
&oldest_entry);
}
index = oldest_entry;
if (dump_args->buf_len <= dump_args->offset) {
@@ -254,9 +254,8 @@ static int __cam_isp_ctx_dump_event_record(
remain_len, min_len);
return -ENOSPC;
}
for (j = 0; j < num_entries; j++) {
record = &ctx_isp->event_record[i][index];
record = &ctx_isp->dbg_monitors.event_record[i][index];
rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_events,
record, sizeof(uint64_t), "ISP_EVT_%s:",
@@ -267,8 +266,8 @@ static int __cam_isp_ctx_dump_event_record(
rc);
return rc;
}
index = (index + 1) %
CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
index = (index + 1) % CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
}
}
return rc;
@@ -402,7 +401,7 @@ static int __cam_isp_ctx_minidump_cb(void *priv, void *args)
md->rxd_epoch = atomic_read(&ctx_isp->rxd_epoch);
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
memcpy(md->event_record[i], ctx_isp->event_record[i],
memcpy(md->event_record[i], ctx_isp->dbg_monitors.event_record[i],
sizeof(struct cam_isp_context_event_record) *
CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES);
}
@@ -465,17 +464,18 @@ static void __cam_isp_ctx_update_state_monitor_array(
int iterator;
struct cam_context *ctx = ctx_isp->base;
INC_HEAD(&ctx_isp->state_monitor_head,
INC_HEAD(&ctx_isp->dbg_monitors.state_monitor_head,
CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &iterator);
ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
ctx_isp->dbg_monitors.state_monitor[iterator].curr_state =
ctx_isp->substate_activated;
ctx_isp->cam_isp_ctx_state_monitor[iterator].frame_id =
ctx_isp->dbg_monitors.state_monitor[iterator].frame_id =
ctx_isp->frame_id;
ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
ctx_isp->dbg_monitors.state_monitor[iterator].trigger =
trigger_type;
ctx_isp->cam_isp_ctx_state_monitor[iterator].req_id =
ctx_isp->dbg_monitors.state_monitor[iterator].req_id =
req_id;
if (trigger_type == CAM_ISP_STATE_CHANGE_TRIGGER_CDM_DONE)
ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp =
ctx->cdm_done_ts;
@@ -484,6 +484,79 @@ static void __cam_isp_ctx_update_state_monitor_array(
&ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp);
}
static int __cam_isp_ctx_update_frame_timing_record(
enum cam_isp_hw_event_type hw_evt, struct cam_isp_context *ctx_isp)
{
uint32_t index = 0;
/* Update event index on IFE SOF - primary event */
if (hw_evt == CAM_ISP_HW_EVENT_SOF)
INC_HEAD(&ctx_isp->dbg_monitors.frame_monitor_head,
CAM_ISP_CTX_MAX_FRAME_RECORDS, &index);
else
div_u64_rem(atomic64_read(&ctx_isp->dbg_monitors.frame_monitor_head),
CAM_ISP_CTX_MAX_FRAME_RECORDS, &index);
switch (hw_evt) {
case CAM_ISP_HW_EVENT_SOF:
CAM_GET_BOOT_TIMESTAMP(ctx_isp->dbg_monitors.frame_monitor[index].sof_ts);
break;
case CAM_ISP_HW_EVENT_EOF:
CAM_GET_BOOT_TIMESTAMP(ctx_isp->dbg_monitors.frame_monitor[index].eof_ts);
break;
case CAM_ISP_HW_EVENT_EPOCH:
CAM_GET_BOOT_TIMESTAMP(ctx_isp->dbg_monitors.frame_monitor[index].epoch_ts);
break;
case CAM_ISP_HW_SECONDARY_EVENT:
CAM_GET_BOOT_TIMESTAMP(ctx_isp->dbg_monitors.frame_monitor[index].secondary_sof_ts);
break;
default:
break;
}
return 0;
}
static void __cam_isp_ctx_dump_frame_timing_record(
struct cam_isp_context *ctx_isp)
{
int i = 0;
int64_t state_head = 0;
uint32_t index, num_entries, oldest_entry;
state_head = atomic64_read(&ctx_isp->dbg_monitors.frame_monitor_head);
if (state_head == -1)
return;
if (state_head < CAM_ISP_CTX_MAX_FRAME_RECORDS) {
num_entries = state_head + 1;
oldest_entry = 0;
} else {
num_entries = CAM_ISP_CTX_MAX_FRAME_RECORDS;
div_u64_rem(state_head + 1, CAM_ISP_CTX_MAX_FRAME_RECORDS,
&oldest_entry);
}
index = oldest_entry;
for (i = 0; i < num_entries; i++) {
CAM_INFO(CAM_ISP,
"Index: %u SOF_TS: %llu.%llu EPOCH_TS: %llu.%llu EOF_TS: %llu.%llu SEC_SOF: %llu.%llu",
index,
ctx_isp->dbg_monitors.frame_monitor[index].sof_ts.tv_sec,
ctx_isp->dbg_monitors.frame_monitor[index].sof_ts.tv_nsec / NSEC_PER_USEC,
ctx_isp->dbg_monitors.frame_monitor[index].epoch_ts.tv_sec,
ctx_isp->dbg_monitors.frame_monitor[index].epoch_ts.tv_nsec / NSEC_PER_USEC,
ctx_isp->dbg_monitors.frame_monitor[index].eof_ts.tv_sec,
ctx_isp->dbg_monitors.frame_monitor[index].eof_ts.tv_nsec / NSEC_PER_USEC,
ctx_isp->dbg_monitors.frame_monitor[index].secondary_sof_ts.tv_sec,
ctx_isp->dbg_monitors.frame_monitor[index].secondary_sof_ts.tv_nsec /
NSEC_PER_USEC);
index = (index + 1) % CAM_ISP_CTX_MAX_FRAME_RECORDS;
}
}
static const char *__cam_isp_ctx_substate_val_to_type(
enum cam_isp_ctx_activated_substate type)
{
@@ -548,17 +621,18 @@ static void __cam_isp_ctx_dump_state_monitor_array(
uint32_t index, num_entries, oldest_entry;
struct tm ts;
state_head = atomic64_read(&ctx_isp->state_monitor_head);
state_head = atomic64_read(&ctx_isp->dbg_monitors.state_monitor_head);
if (state_head == -1) {
if (state_head == -1)
return;
} else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
num_entries = state_head;
if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
num_entries = state_head + 1;
oldest_entry = 0;
} else {
num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
div_u64_rem(state_head + 1,
CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
div_u64_rem(state_head + 1, CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES,
&oldest_entry);
}
CAM_ERR(CAM_ISP,
@@ -570,16 +644,15 @@ static void __cam_isp_ctx_dump_state_monitor_array(
time64_to_tm(ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp.tv_sec,
0, &ts);
CAM_ERR(CAM_ISP,
"Idx[%d] time[%d-%d %d:%d:%d.%lld]:Substate[%s] Frame[%lld] Req[%llu] evt[%s]",
index,
ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec,
ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp.tv_nsec / 1000000,
__cam_isp_ctx_substate_val_to_type(
ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
ctx_isp->cam_isp_ctx_state_monitor[index].frame_id,
ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
__cam_isp_hw_evt_val_to_type(
ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
"Idx[%d] time[%d-%d %d:%d:%d.%lld]:Substate[%s] Frame[%lld] Req[%llu] evt[%s]",
index, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec,
ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp.tv_nsec / 1000000,
__cam_isp_ctx_substate_val_to_type(
ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
ctx_isp->cam_isp_ctx_state_monitor[index].frame_id,
ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
__cam_isp_hw_evt_val_to_type(
ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
}
@@ -610,7 +683,7 @@ static int __cam_isp_ctx_user_dump_state_monitor_array(
int index;
uint32_t oldest_entry;
uint32_t num_entries;
uint64_t state_head;
uint64_t state_head = 0;
if (!dump_args || !ctx_isp) {
CAM_ERR(CAM_ISP, "Invalid args %pK %pK",
@@ -618,7 +691,7 @@ static int __cam_isp_ctx_user_dump_state_monitor_array(
return -EINVAL;
}
state_head = atomic64_read(&ctx_isp->state_monitor_head);
state_head = atomic64_read(&ctx_isp->dbg_monitors.state_monitor_head);
if (state_head == -1) {
return 0;
@@ -638,12 +711,12 @@ static int __cam_isp_ctx_user_dump_state_monitor_array(
rc = cam_common_user_dump_helper(dump_args,
cam_isp_ctx_user_dump_state_monitor_array_info,
&ctx_isp->cam_isp_ctx_state_monitor[index],
&ctx_isp->dbg_monitors.state_monitor[index],
sizeof(uint64_t), "ISP_STATE_MONITOR.%s.%s:",
__cam_isp_ctx_substate_val_to_type(
ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
ctx_isp->dbg_monitors.state_monitor[index].curr_state),
__cam_isp_hw_evt_val_to_type(
ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
ctx_isp->dbg_monitors.state_monitor[index].trigger));
if (rc) {
CAM_ERR(CAM_ISP, "CAM ISP CONTEXT: Event record dump failed, rc: %d", rc);
@@ -1520,7 +1593,11 @@ static void __cam_isp_ctx_handle_buf_done_fail_log(
}
}
if (!ctx_isp->sof_dbg_irq_en)
ctx_isp->congestion_cnt++;
/* Trigger SOF freeze debug dump on 3 or greater instances of congestion */
if ((ctx_isp->congestion_cnt >= CAM_ISP_CONTEXT_CONGESTION_CNT_MAX) &&
(!ctx_isp->sof_dbg_irq_en))
__cam_isp_ctx_handle_sof_freeze_evt(ctx);
}
@@ -1531,6 +1608,7 @@ static void __cam_isp_context_reset_internal_recovery_params(
atomic_set(&ctx_isp->process_bubble, 0);
ctx_isp->aeb_error_cnt = 0;
ctx_isp->bubble_frame_cnt = 0;
ctx_isp->congestion_cnt = 0;
ctx_isp->sof_dbg_irq_en = false;
}
@@ -3744,6 +3822,8 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
if (!ctx_isp->offline_context)
__cam_isp_ctx_pause_crm_timer(ctx);
__cam_isp_ctx_dump_frame_timing_record(ctx_isp);
__cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
__cam_isp_get_notification_evt_params(error_event_data->error_type,
@@ -4162,6 +4242,9 @@ static void __cam_isp_ctx_notify_aeb_error_for_sec_event(
/* Change state to HALT, stop further processing of HW events */
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
/* Dump AEB debug info */
__cam_isp_ctx_dump_frame_timing_record(ctx_isp);
}
static int __cam_isp_ctx_trigger_internal_recovery(
@@ -4280,6 +4363,8 @@ static int __cam_isp_ctx_handle_secondary_events(
CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF,
ctx_isp->last_applied_req_id);
__cam_isp_ctx_update_frame_timing_record(CAM_ISP_HW_SECONDARY_EVENT, ctx_isp);
/* Slave RDI's frame starting post IFE EPOCH - Fatal */
if ((ctx_isp->substate_activated ==
CAM_ISP_CTX_ACTIVATED_APPLIED) ||
@@ -4696,6 +4781,10 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
rc = -EFAULT;
goto end;
}
/* Reset congestion counter */
ctx_isp->congestion_cnt = 0;
req_isp->bubble_report = apply->report_if_bubble;
/*
@@ -5481,6 +5570,7 @@ static int __cam_isp_ctx_flush_req_in_top_state(
end:
ctx_isp->bubble_frame_cnt = 0;
ctx_isp->congestion_cnt = 0;
ctx_isp->sof_dbg_irq_en = false;
atomic_set(&ctx_isp->process_bubble, 0);
atomic_set(&ctx_isp->rxd_epoch, 0);
@@ -6288,10 +6378,11 @@ static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
ctx_isp->vfe_bus_comp_grp = NULL;
ctx_isp->sfe_bus_comp_grp = NULL;
atomic64_set(&ctx_isp->state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1);
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
atomic64_set(&ctx_isp->event_record_head[i], -1);
atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1);
/*
* Ideally, we should never have any active request here.
* But we still add some sanity check code here to help the debug
@@ -6366,9 +6457,10 @@ static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
ctx_isp->v4l2_event_sub_ids = 0;
ctx_isp->resume_hw_in_flushed = false;
atomic64_set(&ctx_isp->state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1);
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
atomic64_set(&ctx_isp->event_record_head[i], -1);
atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1);
/*
* Ideally, we should never have any active request here.
* But we still add some sanity check code here to help the debug
@@ -6862,9 +6954,10 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
ctx_isp->hw_acquired = true;
ctx_isp->split_acquire = false;
ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
atomic64_set(&ctx_isp->state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1);
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
atomic64_set(&ctx_isp->event_record_head[i], -1);
atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1);
CAM_INFO(CAM_ISP, "Ctx_type: %u, ctx_id: %u, hw_mgr_ctx: %u", isp_hw_cmd_args.u.ctx_type,
ctx->ctx_id, param.hw_mgr_ctx_id);
@@ -7052,10 +7145,11 @@ static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
ctx_isp->hw_acquired = true;
ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
atomic64_set(&ctx_isp->state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1);
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
atomic64_set(&ctx_isp->event_record_head[i], -1);
atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1);
trace_cam_context_state("ISP", ctx);
CAM_INFO(CAM_ISP,
@@ -7530,6 +7624,7 @@ static inline void __cam_isp_context_reset_ctx_params(
ctx_isp->reported_req_id = 0;
ctx_isp->reported_frame_id = 0;
ctx_isp->bubble_frame_cnt = 0;
ctx_isp->congestion_cnt = 0;
ctx_isp->recovery_req_id = 0;
ctx_isp->aeb_error_cnt = 0;
ctx_isp->out_of_sync_cnt = 0;
@@ -7598,10 +7693,11 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
(req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
CAM_ISP_CTX_ACTIVATED_SOF;
atomic64_set(&ctx_isp->state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1);
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
atomic64_set(&ctx_isp->event_record_head[i], -1);
atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1);
/*
* In case of CSID TPG we might receive SOF and RUP IRQs
@@ -7777,14 +7873,16 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
ctx_isp->last_applied_req_id = 0;
ctx_isp->req_info.last_bufdone_req_id = 0;
ctx_isp->bubble_frame_cnt = 0;
ctx_isp->congestion_cnt = 0;
ctx_isp->sof_dbg_irq_en = false;
atomic_set(&ctx_isp->process_bubble, 0);
atomic_set(&ctx_isp->internal_recovery_set, 0);
atomic_set(&ctx_isp->rxd_epoch, 0);
atomic64_set(&ctx_isp->state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1);
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
atomic64_set(&ctx_isp->event_record_head[i], -1);
atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1);
CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u link: 0x%x",
ctx->state, ctx->ctx_id, ctx->link_hdl);
@@ -8279,7 +8377,12 @@ static int __cam_isp_ctx_handle_irq_in_activated(void *context,
__cam_isp_ctx_dump_state_monitor_array(ctx_isp);
}
CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s], ctx:%u link: 0x%x",
if ((evt_id == CAM_ISP_HW_EVENT_SOF) ||
(evt_id == CAM_ISP_HW_EVENT_EOF) ||
(evt_id == CAM_ISP_HW_EVENT_EPOCH))
__cam_isp_ctx_update_frame_timing_record(evt_id, ctx_isp);
CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s] on ctx: %d",
ctx->state, __cam_isp_ctx_substate_val_to_type(
ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl);
@@ -8675,6 +8778,7 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
ctx->active_req_cnt = 0;
ctx->reported_req_id = 0;
ctx->bubble_frame_cnt = 0;
ctx->congestion_cnt = 0;
ctx->req_info.last_bufdone_req_id = 0;
ctx->v4l2_event_sub_ids = 0;
@@ -8705,13 +8809,15 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
/* initializing current state for error logging */
for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
ctx->cam_isp_ctx_state_monitor[i].curr_state =
ctx->dbg_monitors.state_monitor[i].curr_state =
CAM_ISP_CTX_ACTIVATED_MAX;
}
atomic64_set(&ctx->state_monitor_head, -1);
atomic64_set(&ctx->dbg_monitors.state_monitor_head, -1);
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
atomic64_set(&ctx->event_record_head[i], -1);
atomic64_set(&ctx->dbg_monitors.event_record_head[i], -1);
atomic64_set(&ctx->dbg_monitors.frame_monitor_head, -1);
if (!isp_ctx_debug.dentry)
cam_isp_context_debug_register();