Просмотр исходного кода

Merge "msm: camera: isp: Add recovery support for custom AEB use-case" into camera-kernel.lnx.5.0

Savita Patted 3 лет назад
Родитель
Сommit
b03c35a297

+ 456 - 27
drivers/cam_isp/cam_isp_context.c

@@ -430,6 +430,12 @@ static const char *__cam_isp_hw_evt_val_to_type(
 		return "DONE";
 	case CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH:
 		return "FLUSH";
+	case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF:
+		return "SEC_EVT_SOF";
+	case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH:
+		return "SEC_EVT_EPOCH";
+	case CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP:
+		return "OUT_OF_SYNC_FRAME_DROP";
 	default:
 		return "CAM_ISP_EVENT_INVALID";
 	}
@@ -999,6 +1005,7 @@ static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
 			timestamp;
 		break;
 	case CAM_ISP_HW_EVENT_DONE:
+	case CAM_ISP_HW_SECONDARY_EVENT:
 		break;
 	default:
 		CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
@@ -1173,6 +1180,92 @@ static void __cam_isp_ctx_handle_buf_done_fail_log(
 	}
 }
 
+static void __cam_isp_context_reset_internal_recovery_params(
+	struct cam_isp_context    *ctx_isp)
+{
+	atomic_set(&ctx_isp->internal_recovery_set, 0);
+	atomic_set(&ctx_isp->process_bubble, 0);
+	ctx_isp->recovery_req_id = 0;
+}
+
+static int __cam_isp_context_try_internal_recovery(
+	struct cam_isp_context    *ctx_isp)
+{
+	int rc = 0;
+	struct cam_context        *ctx = ctx_isp->base;
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+
+	/*
+	 * Start with wait list, if recovery is stil set
+	 * errored request has not been moved to pending yet.
+	 * Buf done for errored request has not occurred recover
+	 * from here
+	 */
+	if (!list_empty(&ctx->wait_req_list)) {
+		req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
+		req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+		if (req->request_id == ctx_isp->recovery_req_id) {
+			rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
+				CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
+			if (rc) {
+				/* Unable to do bubble recovery reset back to normal */
+				CAM_WARN(CAM_ISP,
+					"Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
+					req->request_id, ctx->ctx_id, ctx->link_hdl);
+				__cam_isp_context_reset_internal_recovery_params(ctx_isp);
+				req_isp->bubble_detected = false;
+				goto end;
+			}
+
+			list_del_init(&req->list);
+			list_add(&req->list, &ctx->pending_req_list);
+			ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+			CAM_INFO(CAM_ISP,
+				"Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
+				ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
+			goto end;
+		}
+	}
+
+	/*
+	 * If not in wait list only other possibility is request is in pending list
+	 * on error detection, bubble detect is set assuming new frame after detection
+	 * comes in, there is an rup it's moved to active list and it finishes with
+	 * it's buf done's
+	 */
+	if (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, list);
+		req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+		if (req->request_id == ctx_isp->recovery_req_id) {
+			rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
+				CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
+			if (rc) {
+				/* Unable to do bubble recovery reset back to normal */
+				CAM_WARN(CAM_ISP,
+					"Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
+					req->request_id, ctx->ctx_id, ctx->link_hdl);
+				__cam_isp_context_reset_internal_recovery_params(ctx_isp);
+				req_isp->bubble_detected = false;
+				goto end;
+			}
+			ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+			CAM_INFO(CAM_ISP,
+				"Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
+				ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
+			goto end;
+		}
+	}
+
+	/* If request is not found in either of the lists skip recovery */
+	__cam_isp_context_reset_internal_recovery_params(ctx_isp);
+
+end:
+	return rc;
+}
+
 static int __cam_isp_ctx_handle_buf_done_for_req_list(
 	struct cam_isp_context *ctx_isp,
 	struct cam_ctx_request *req)
@@ -1248,6 +1341,9 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
 		ctx_isp->last_bufdone_err_apply_req_id = 0;
 	}
 
+	if (atomic_read(&ctx_isp->internal_recovery_set) && !ctx_isp->active_req_cnt)
+		__cam_isp_context_try_internal_recovery(ctx_isp);
+
 	cam_cpas_notify_event("IFE BufDone", buf_done_req_id);
 
 	__cam_isp_ctx_update_state_monitor_array(ctx_isp,
@@ -3112,6 +3208,208 @@ end:
 	return rc;
 }
 
+static void __cam_isp_ctx_notify_aeb_error_for_sec_event(
+	struct cam_isp_context *ctx_isp)
+{
+	struct cam_context *ctx = ctx_isp->base;
+
+	CAM_ERR(CAM_ISP,
+		"AEB slave RDI's current request's SOF seen after next req is applied, EPOCH height need to be re-configured for ctx: %u on link: 0x%x",
+		ctx->ctx_id, ctx->link_hdl);
+
+	/* Pause CRM timer */
+	__cam_isp_ctx_pause_crm_timer(ctx);
+
+	/* Trigger reg dump */
+	__cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
+
+	/* Notify CRM on fatal error */
+	__cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_FATAL,
+		ctx_isp->last_applied_req_id, ctx_isp);
+
+	/* Notify userland on error */
+	__cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_ERROR_TYPE_RECOVERY,
+		CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING, ctx_isp->last_applied_req_id, ctx);
+
+	/* Change state to HALT, stop further processing of HW events */
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
+}
+
+static int __cam_isp_ctx_trigger_internal_recovery(
+	bool sync_frame_drop, struct cam_isp_context *ctx_isp)
+{
+	int                                 rc = 0;
+	bool                                do_recovery = true;
+	struct cam_context                 *ctx = ctx_isp->base;
+	struct cam_ctx_request             *req = NULL;
+	struct cam_isp_ctx_req             *req_isp = NULL;
+
+	if (list_empty(&ctx->wait_req_list)) {
+		/*
+		 * If the wait list is empty, and we encounter a "silent" frame drop
+		 * then the settings applied on the previous frame, did not reflect
+		 * at the next frame boundary, it's expected to latch a frame after.
+		 * No need to recover. If it's an out of sync drop use pending req
+		 */
+		if (sync_frame_drop && !list_empty(&ctx->pending_req_list))
+			req = list_first_entry(&ctx->pending_req_list,
+				struct cam_ctx_request, list);
+		else
+			do_recovery = false;
+	}
+
+	/* If both wait and pending list have no request to recover on */
+	if (!do_recovery) {
+		CAM_WARN(CAM_ISP,
+			"No request to perform recovery - ctx: %u on link: 0x%x last_applied: %lld last_buf_done: %lld",
+			ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id,
+			ctx_isp->req_info.last_bufdone_req_id);
+		goto end;
+	}
+
+	if (!req) {
+		req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
+		if (req->request_id != ctx_isp->last_applied_req_id)
+			CAM_WARN(CAM_ISP,
+				"Top of wait list req: %llu does not match with last applied: %llu in ctx: %u on link: 0x%x",
+				req->request_id, ctx_isp->last_applied_req_id,
+				ctx->ctx_id, ctx->link_hdl);
+	}
+
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+	/*
+	 * Treat this as bubble, after recovery re-start from appropriate sub-state
+	 * This will block servicing any further apply calls from CRM
+	 */
+	atomic_set(&ctx_isp->internal_recovery_set, 1);
+	atomic_set(&ctx_isp->process_bubble, 1);
+	ctx_isp->recovery_req_id = req->request_id;
+
+	/* Wait for active request's to finish before issuing recovery */
+	if (ctx_isp->active_req_cnt) {
+		req_isp->bubble_detected = true;
+		CAM_WARN(CAM_ISP,
+			"Active req cnt: %u wait for all buf dones before kicking in recovery on req: %lld ctx: %u on link: 0x%x",
+			ctx_isp->active_req_cnt, ctx_isp->recovery_req_id,
+			ctx->ctx_id, ctx->link_hdl);
+	} else {
+		rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
+				ctx_isp->recovery_req_id, ctx_isp);
+		if (rc) {
+			/* Unable to do bubble recovery reset back to normal */
+			CAM_WARN(CAM_ISP,
+				"Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
+				ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
+			__cam_isp_context_reset_internal_recovery_params(ctx_isp);
+			goto end;
+		}
+
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+		list_del_init(&req->list);
+		list_add(&req->list, &ctx->pending_req_list);
+	}
+
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_handle_secondary_events(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	bool recover = false, sync_frame_drop = false;
+	struct cam_context *ctx = ctx_isp->base;
+	struct cam_isp_hw_secondary_event_data *sec_evt_data =
+		(struct cam_isp_hw_secondary_event_data *)evt_data;
+
+	/* Current scheme to handle only for custom AEB */
+	if (!ctx_isp->aeb_enabled) {
+		CAM_WARN(CAM_ISP,
+			"Recovery not supported for non-AEB ctx: %u on link: 0x%x reject sec evt: %u",
+			ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
+		goto end;
+	}
+
+	if (atomic_read(&ctx_isp->internal_recovery_set)) {
+		CAM_WARN(CAM_ISP,
+			"Internal recovery in progress in ctx: %u on link: 0x%x reject sec evt: %u",
+			ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
+		goto end;
+	}
+
+	/*
+	 * In case of custom AEB ensure first exposure frame has
+	 * not moved forward with its settings without second/third
+	 * expoure frame coming in. Also track for bubble, in case of system
+	 * delays it's possible for the IFE settings to be not written to
+	 * HW on a given frame. If these scenarios occurs flag as error,
+	 * and recover.
+	 */
+	switch (sec_evt_data->evt_type) {
+	case CAM_ISP_HW_SEC_EVENT_SOF:
+		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+			CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF,
+			ctx_isp->last_applied_req_id);
+
+		/* Slave RDI's frame starting post IFE EPOCH - Fatal */
+		if ((ctx_isp->substate_activated ==
+			CAM_ISP_CTX_ACTIVATED_APPLIED) ||
+			(ctx_isp->substate_activated ==
+			CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED))
+			__cam_isp_ctx_notify_aeb_error_for_sec_event(ctx_isp);
+		break;
+	case CAM_ISP_HW_SEC_EVENT_EPOCH:
+		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+			CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH,
+			ctx_isp->last_applied_req_id);
+
+		/*
+		 * Master RDI frame dropped in CSID, due to programming delay no RUP/AUP
+		 * On such occasions use CSID CAMIF EPOCH for bubble detection, flag
+		 * on detection and perform necessary bubble recovery
+		 */
+		if ((ctx_isp->substate_activated ==
+			CAM_ISP_CTX_ACTIVATED_APPLIED) ||
+			(ctx_isp->substate_activated ==
+			CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) {
+			recover = true;
+			CAM_WARN(CAM_ISP,
+				"Programming delay input frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
+				ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
+		}
+		break;
+	case CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP:
+		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+			CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP,
+			ctx_isp->last_applied_req_id);
+
+		/* Avoid recovery loop if frame is dropped at stream on */
+		if (!ctx_isp->frame_id) {
+			CAM_ERR(CAM_ISP,
+				"Sensor sync [vc mismatch] frame dropped at stream on ctx: %u link: 0x%x frame_id: %u last_applied_req: %lld",
+				ctx->ctx_id, ctx->link_hdl,
+				ctx_isp->frame_id, ctx_isp->last_applied_req_id);
+			rc = -EPERM;
+			break;
+		}
+
+		recover = true;
+		sync_frame_drop = true;
+		CAM_WARN(CAM_ISP,
+			"Sensor sync [vc mismatch] frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
+			ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
+		break;
+	default:
+		break;
+	}
+
+	if (recover && ctx_isp->do_internal_recovery)
+		rc = __cam_isp_ctx_trigger_internal_recovery(sync_frame_drop, ctx_isp);
+
+end:
+	return rc;
+}
+
 static struct cam_isp_ctx_irq_ops
 	cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
 	/* SOF */
@@ -3123,6 +3421,7 @@ static struct cam_isp_ctx_irq_ops
 			__cam_isp_ctx_notify_sof_in_activated_state,
 			__cam_isp_ctx_notify_eof_in_activated_state,
 			NULL,
+			__cam_isp_ctx_handle_secondary_events,
 		},
 	},
 	/* APPLIED */
@@ -3134,6 +3433,7 @@ static struct cam_isp_ctx_irq_ops
 			__cam_isp_ctx_epoch_in_applied,
 			__cam_isp_ctx_notify_eof_in_activated_state,
 			__cam_isp_ctx_buf_done_in_applied,
+			__cam_isp_ctx_handle_secondary_events,
 		},
 	},
 	/* EPOCH */
@@ -3145,6 +3445,7 @@ static struct cam_isp_ctx_irq_ops
 			__cam_isp_ctx_notify_sof_in_activated_state,
 			__cam_isp_ctx_notify_eof_in_activated_state,
 			__cam_isp_ctx_buf_done_in_epoch,
+			__cam_isp_ctx_handle_secondary_events,
 		},
 	},
 	/* BUBBLE */
@@ -3156,6 +3457,7 @@ static struct cam_isp_ctx_irq_ops
 			__cam_isp_ctx_notify_sof_in_activated_state,
 			__cam_isp_ctx_notify_eof_in_activated_state,
 			__cam_isp_ctx_buf_done_in_bubble,
+			__cam_isp_ctx_handle_secondary_events,
 		},
 	},
 	/* Bubble Applied */
@@ -3167,6 +3469,7 @@ static struct cam_isp_ctx_irq_ops
 			__cam_isp_ctx_epoch_in_bubble_applied,
 			NULL,
 			__cam_isp_ctx_buf_done_in_bubble_applied,
+			__cam_isp_ctx_handle_secondary_events,
 		},
 	},
 	/* HW ERROR */
@@ -3882,6 +4185,7 @@ static int __cam_isp_ctx_flush_req_in_top_state(
 		stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
 		stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
 		stop_isp.stop_only = true;
+		stop_isp.internal_trigger = false;
 		stop_args.args = (void *)&stop_isp;
 		rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
 			&stop_args);
@@ -3924,6 +4228,7 @@ end:
 	ctx_isp->bubble_frame_cnt = 0;
 	atomic_set(&ctx_isp->process_bubble, 0);
 	atomic_set(&ctx_isp->rxd_epoch, 0);
+	atomic_set(&ctx_isp->internal_recovery_set, 0);
 	return rc;
 }
 
@@ -4669,6 +4974,8 @@ static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
 	ctx_isp->hw_acquired = false;
 	ctx_isp->init_received = false;
 	ctx_isp->support_consumed_addr = false;
+	ctx_isp->aeb_enabled = false;
+	ctx_isp->do_internal_recovery = false;
 	ctx_isp->req_info.last_bufdone_req_id = 0;
 
 	atomic64_set(&ctx_isp->state_monitor_head, -1);
@@ -5449,6 +5756,9 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
 	ctx_isp->aeb_enabled =
 		(param.op_flags & CAM_IFE_CTX_AEB_EN);
 
+	if ((ctx_isp->aeb_enabled) && (!isp_ctx_debug.disable_internal_recovery))
+		ctx_isp->do_internal_recovery = true;
+
 	/* Query the context has rdi only resource */
 	hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
 	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
@@ -5709,12 +6019,14 @@ static inline void __cam_isp_context_reset_ctx_params(
 {
 	atomic_set(&ctx_isp->process_bubble, 0);
 	atomic_set(&ctx_isp->rxd_epoch, 0);
+	atomic_set(&ctx_isp->internal_recovery_set, 0);
 	ctx_isp->frame_id = 0;
 	ctx_isp->sof_timestamp_val = 0;
 	ctx_isp->boot_timestamp = 0;
 	ctx_isp->active_req_cnt = 0;
 	ctx_isp->reported_req_id = 0;
 	ctx_isp->bubble_frame_cnt = 0;
+	ctx_isp->recovery_req_id = 0;
 }
 
 static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
@@ -5871,6 +6183,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
 
 		stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
 		stop_isp.stop_only = false;
+		stop_isp.internal_trigger = false;
 
 		stop.args = (void *) &stop_isp;
 		ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
@@ -5951,6 +6264,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
 	ctx_isp->req_info.last_bufdone_req_id = 0;
 	ctx_isp->bubble_frame_cnt = 0;
 	atomic_set(&ctx_isp->process_bubble, 0);
+	atomic_set(&ctx_isp->internal_recovery_set, 0);
 	atomic_set(&ctx_isp->rxd_epoch, 0);
 	atomic64_set(&ctx_isp->state_monitor_head, -1);
 
@@ -6065,6 +6379,138 @@ static int __cam_isp_ctx_handle_sof_freeze_evt(
 	return rc;
 }
 
+static int __cam_isp_ctx_reset_and_recover(
+	bool skip_resume, struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *)ctx->ctx_priv;
+	struct cam_isp_stop_args              stop_isp;
+	struct cam_hw_stop_args               stop_args;
+	struct cam_isp_start_args             start_isp;
+	struct cam_hw_cmd_args                hw_cmd_args;
+	struct cam_isp_hw_cmd_args            isp_hw_cmd_args;
+	struct cam_ctx_request               *req;
+	struct cam_isp_ctx_req               *req_isp;
+
+	spin_lock(&ctx->lock);
+	if (ctx_isp->active_req_cnt) {
+		spin_unlock(&ctx->lock);
+		CAM_WARN(CAM_ISP,
+			"Active list not empty: %u in ctx: %u on link: 0x%x, retry recovery for req: %lld after buf_done",
+			ctx_isp->active_req_cnt, ctx->ctx_id,
+			ctx->link_hdl, ctx_isp->recovery_req_id);
+		goto end;
+	}
+
+	if (ctx->state != CAM_CTX_ACTIVATED) {
+		spin_unlock(&ctx->lock);
+		CAM_ERR(CAM_ISP,
+			"In wrong state %d, for recovery ctx: %u in link: 0x%x recovery req: %lld",
+			ctx->state, ctx->ctx_id,
+			ctx->link_hdl, ctx_isp->recovery_req_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/* Cannot start with no request */
+		spin_unlock(&ctx->lock);
+		CAM_ERR(CAM_ISP,
+			"Failed to reset and recover last_applied_req: %llu in ctx: %u on link: 0x%x",
+			ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl);
+		rc = -EFAULT;
+		goto end;
+	}
+	spin_unlock(&ctx->lock);
+
+	if (!ctx_isp->hw_ctx) {
+		CAM_ERR(CAM_ISP,
+			"Invalid hw context pointer ctx: %u on link: 0x%x",
+			ctx->ctx_id, ctx->link_hdl);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->pending_req_list,
+		struct cam_ctx_request, list);
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	req_isp->bubble_detected = false;
+
+	CAM_INFO(CAM_ISP,
+		"Trigger Halt, Reset & Resume for req: %llu ctx: %u in state: %d link: 0x%x",
+		req->request_id, ctx->ctx_id, ctx->state, ctx->link_hdl);
+
+	stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
+	stop_isp.stop_only = true;
+	stop_isp.internal_trigger = true;
+	stop_args.args = (void *)&stop_isp;
+	rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+		&stop_args);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to stop HW rc: %d ctx: %u",
+			rc, ctx->ctx_id);
+		goto end;
+	}
+	CAM_DBG(CAM_ISP, "Stop HW success ctx: %u link: 0x%x",
+		ctx->ctx_id, ctx->link_hdl);
+
+	/* API provides provision to stream off and not resume as well in case of fatal errors */
+	if (skip_resume) {
+		atomic_set(&ctx_isp->internal_recovery_set, 0);
+		CAM_INFO(CAM_ISP,
+			"Halting streaming off IFE/SFE ctx: %u last_applied_req: %lld [recovery_req: %lld] on link: 0x%x",
+			ctx->ctx_id, ctx_isp->last_applied_req_id,
+			ctx_isp->recovery_req_id, ctx->link_hdl);
+		goto end;
+	}
+
+	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+		&hw_cmd_args);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d ctx: %u", rc, ctx->ctx_id);
+		goto end;
+	}
+	CAM_DBG(CAM_ISP, "Resume call success ctx: %u on link: 0x%x",
+		ctx->ctx_id, ctx->link_hdl);
+
+	start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	start_isp.hw_config.request_id = req->request_id;
+	start_isp.hw_config.hw_update_entries = req_isp->cfg;
+	start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
+	start_isp.hw_config.priv  = &req_isp->hw_update_data;
+	start_isp.hw_config.init_packet = 1;
+	start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_IQ;
+	start_isp.hw_config.cdm_reset_before_apply = false;
+	start_isp.start_only = true;
+
+	__cam_isp_context_reset_internal_recovery_params(ctx_isp);
+
+	ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
+		CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF;
+
+	rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+		&start_isp);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Start HW failed");
+		ctx->state = CAM_CTX_READY;
+		goto end;
+	}
+
+	/* IQ applied for this request, on next trigger skip IQ cfg */
+	req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
+	CAM_DBG(CAM_ISP, "Internal Start HW success ctx %u on link: 0x%x",
+		ctx->ctx_id, ctx->link_hdl);
+
+end:
+	return rc;
+}
+
 static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
 	struct cam_req_mgr_link_evt_data *link_evt_data)
 {
@@ -6171,9 +6617,13 @@ static int __cam_isp_ctx_apply_default_settings(
 	if (apply->trigger_point != CAM_TRIGGER_POINT_SOF)
 		return 0;
 
+	if ((ctx_isp->aeb_enabled) && (atomic_read(&ctx_isp->internal_recovery_set)))
+		return __cam_isp_ctx_reset_and_recover(false, ctx);
+
 	CAM_DBG(CAM_ISP,
-		"Enter: apply req in Substate %d request _id:%lld",
-		 ctx_isp->substate_activated, apply->request_id);
+		"Enter: apply req in Substate:%d request _id:%lld ctx:%u on link:0x%x",
+		 ctx_isp->substate_activated, apply->request_id,
+		 ctx->ctx_id, ctx->link_hdl);
 
 	ctx_ops = &ctx_isp->substate_machine[
 		ctx_isp->substate_activated];
@@ -6193,7 +6643,6 @@ static int __cam_isp_ctx_apply_default_settings(
 	return rc;
 }
 
-
 static int __cam_isp_ctx_handle_irq_in_activated(void *context,
 	uint32_t evt_id, void *evt_data)
 {
@@ -6204,29 +6653,6 @@ static int __cam_isp_ctx_handle_irq_in_activated(void *context,
 		(struct cam_isp_context *)ctx->ctx_priv;
 
 	spin_lock(&ctx->lock);
-	/*
-	 * In case of custom AEB ensure first exposure frame has
-	 * not moved forward with its settings without second/third
-	 * expoure frame coming in. If this scenario occurs flag as error,
-	 * and recover
-	 */
-	if ((ctx_isp->aeb_enabled) && (evt_id == CAM_ISP_HW_EVENT_SOF)) {
-		bool is_secondary_evt =
-			((struct cam_isp_hw_sof_event_data *)evt_data)->is_secondary_evt;
-
-		if (is_secondary_evt) {
-			if ((ctx_isp->substate_activated ==
-				CAM_ISP_CTX_ACTIVATED_APPLIED) ||
-				(ctx_isp->substate_activated ==
-				CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) {
-				CAM_ERR(CAM_ISP,
-					"AEB settings mismatch between exposures - needs a reset");
-				rc = -EAGAIN;
-			}
-			goto end;
-		}
-	}
-
 	trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
 		__cam_isp_ctx_get_event_ts(evt_id, evt_data));
 
@@ -6247,7 +6673,7 @@ static int __cam_isp_ctx_handle_irq_in_activated(void *context,
 	CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s]",
 		ctx->state, __cam_isp_ctx_substate_val_to_type(
 		ctx_isp->substate_activated));
-end:
+
 	spin_unlock(&ctx->lock);
 	return rc;
 }
@@ -6508,6 +6934,9 @@ static int cam_isp_context_debug_register(void)
 		isp_ctx_debug.dentry, &isp_ctx_debug.enable_state_monitor_dump);
 	debugfs_create_u8("enable_cdm_cmd_buffer_dump", 0644,
 		isp_ctx_debug.dentry, &isp_ctx_debug.enable_cdm_cmd_buff_dump);
+	debugfs_create_bool("disable_internal_recovery", 0644,
+		isp_ctx_debug.dentry, &isp_ctx_debug.disable_internal_recovery);
+
 	if (IS_ERR(dbgfileptr)) {
 		if (PTR_ERR(dbgfileptr) == -ENODEV)
 			CAM_WARN(CAM_ISP, "DebugFS not enabled in kernel!");

+ 14 - 4
drivers/cam_isp/cam_isp_context.h

@@ -103,6 +103,9 @@ enum cam_isp_state_change_trigger {
 	CAM_ISP_STATE_CHANGE_TRIGGER_DONE,
 	CAM_ISP_STATE_CHANGE_TRIGGER_EOF,
 	CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH,
+	CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF,
+	CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH,
+	CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP,
 	CAM_ISP_STATE_CHANGE_TRIGGER_MAX
 };
 
@@ -111,13 +114,15 @@ enum cam_isp_state_change_trigger {
  *
  * @dentry:                     Debugfs entry
  * @enable_state_monitor_dump:  Enable isp state monitor dump
- * @enable_cdm_cmd_buff_dump: Enable CDM Command buffer dump
+ * @enable_cdm_cmd_buff_dump:   Enable CDM Command buffer dump
+ * @disable_internal_recovery:  Disable internal kernel recovery
  *
  */
 struct cam_isp_ctx_debug {
 	struct dentry  *dentry;
 	uint32_t        enable_state_monitor_dump;
 	uint8_t         enable_cdm_cmd_buff_dump;
+	bool            disable_internal_recovery;
 };
 
 /**
@@ -248,6 +253,7 @@ struct cam_isp_context_event_record {
  * @subscribe_event:           The irq event mask that CRM subscribes to, IFE
  *                             will invoke CRM cb at those event.
  * @last_applied_req_id:       Last applied request id
+ * @recovery_req_id:           Req ID flagged for internal recovery
  * @last_sof_timestamp:        SOF timestamp of the last frame
  * @bubble_frame_cnt:          Count of the frame after bubble
  * @state_monitor_head:        Write index to the state monitoring array
@@ -264,7 +270,6 @@ struct cam_isp_context_event_record {
  * @custom_enabled:            Custom HW enabled for this ctx
  * @use_frame_header_ts:       Use frame header for qtimer ts
  * @support_consumed_addr:     Indicate whether HW has last consumed addr reg
- * @aeb_enabled:               Indicate if stream is for AEB
  * @apply_in_progress          Whether request apply is in progress
  * @use_default_apply:         Use default settings in case of frame skip
  * @init_timestamp:            Timestamp at which this context is initialized
@@ -275,12 +280,14 @@ struct cam_isp_context_event_record {
  * @trigger_id:                ID provided by CRM for each ctx on the link
  * @last_bufdone_err_apply_req_id:  last bufdone error apply request id
  * @v4l2_event_sub_ids         contains individual bits representing subscribed v4l2 ids
+ * @aeb_enabled:               Indicate if stream is for AEB
+ * @do_internal_recovery:      Enable KMD halt/reset/resume internal recovery
  *
  */
 struct cam_isp_context {
 	struct cam_context              *base;
 
-	int64_t                          frame_id;
+	uint64_t                         frame_id;
 	uint32_t                         frame_id_meta;
 	uint32_t                         substate_activated;
 	atomic_t                         process_bubble;
@@ -297,6 +304,7 @@ struct cam_isp_context {
 	int64_t                          reported_req_id;
 	uint32_t                         subscribe_event;
 	int64_t                          last_applied_req_id;
+	uint64_t                         recovery_req_id;
 	uint64_t                         last_sof_timestamp;
 	uint32_t                         bubble_frame_cnt;
 	atomic64_t                       state_monitor_head;
@@ -315,8 +323,8 @@ struct cam_isp_context {
 	bool                                  custom_enabled;
 	bool                                  use_frame_header_ts;
 	bool                                  support_consumed_addr;
-	bool                                  aeb_enabled;
 	atomic_t                              apply_in_progress;
+	atomic_t                              internal_recovery_set;
 	bool                                  use_default_apply;
 	unsigned int                          init_timestamp;
 	uint32_t                              isp_device_type;
@@ -325,6 +333,8 @@ struct cam_isp_context {
 	int32_t                               trigger_id;
 	int64_t                               last_bufdone_err_apply_req_id;
 	uint32_t                              v4l2_event_sub_ids;
+	bool                                  aeb_enabled;
+	bool                                  do_internal_recovery;
 };
 
 /**

+ 123 - 48
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c

@@ -78,12 +78,14 @@ static int cam_ife_mgr_prog_default_settings(
 	bool need_rup_aup, struct cam_ife_hw_mgr_ctx *ctx);
 
 static int cam_ife_mgr_finish_clk_bw_update(
-	struct cam_ife_hw_mgr_ctx             *ctx, uint64_t request_id)
+	struct cam_ife_hw_mgr_ctx *ctx,
+	uint64_t request_id, bool skip_clk_data_rst)
 {
 	int i, rc = 0;
 	struct cam_isp_apply_clk_bw_args clk_bw_args;
 
 	clk_bw_args.request_id = request_id;
+	clk_bw_args.skip_clk_data_rst = skip_clk_data_rst;
 	for (i = 0; i < ctx->num_base; i++) {
 		clk_bw_args.hw_intf = NULL;
 		CAM_DBG(CAM_PERF,
@@ -3437,13 +3439,26 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
 			 * if only being dumped will be considered as a
 			 * no merge resource
 			 */
-			if ((ife_ctx->flags.is_aeb_mode) &&
-				((out_port->res_type - CAM_ISP_SFE_OUT_RES_RDI_0) >=
-				ife_ctx->sfe_info.num_fetches)) {
-				csid_acquire.en_secondary_evt = true;
-				CAM_DBG(CAM_ISP,
-					"Secondary evt enabled for path: 0x%x",
-					out_port->res_type);
+			if (ife_ctx->flags.is_aeb_mode) {
+				if ((out_port->res_type - CAM_ISP_SFE_OUT_RES_RDI_0) >=
+					ife_ctx->sfe_info.num_fetches) {
+					csid_acquire.sec_evt_config.en_secondary_evt = true;
+					csid_acquire.sec_evt_config.evt_type = CAM_IFE_CSID_EVT_SOF;
+					CAM_DBG(CAM_ISP,
+						"Secondary SOF evt enabled for path: 0x%x",
+						out_port->res_type);
+				}
+
+				/* Enable EPOCH/SYNC frame drop for error monitoring on master */
+				if (out_port->res_type == CAM_ISP_SFE_OUT_RES_RDI_0) {
+					csid_acquire.sec_evt_config.en_secondary_evt = true;
+					csid_acquire.sec_evt_config.evt_type =
+						CAM_IFE_CSID_EVT_EPOCH |
+						CAM_IFE_CSID_EVT_SENSOR_SYNC_FRAME_DROP;
+					CAM_DBG(CAM_ISP,
+						"Secondary EPOCH & frame drop evt enabled for path: 0x%x",
+						out_port->res_type);
+				}
 			}
 		}
 
@@ -5847,7 +5862,7 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
 	}
 
 	/* Apply the updated values in top layer to the HW*/
-	rc = cam_ife_mgr_finish_clk_bw_update(ctx, cfg->request_id);
+	rc = cam_ife_mgr_finish_clk_bw_update(ctx, cfg->request_id, false);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Failed in finishing clk/bw update rc: %d", rc);
 		cam_ife_mgr_print_blob_info(ctx, cfg->request_id, hw_update_data);
@@ -6172,6 +6187,12 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 			ctx->base[i].idx, csid_halt_type);
 	}
 
+	/* Ensure HW layer does not reset any clk data since it's
+	 * internal stream off/resume
+	 */
+	if (stop_isp->internal_trigger)
+		cam_ife_mgr_finish_clk_bw_update(ctx, 0, true);
+
 	/* check to avoid iterating loop */
 	if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) {
 		CAM_DBG(CAM_ISP, "Going to stop SFE Out");
@@ -6207,12 +6228,14 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 
 	cam_tasklet_stop(ctx->common.tasklet_info);
 
-	/* reset scratch buffer expect init again for SFE */
-	if (ctx->sfe_info.scratch_config)
-		memset(ctx->sfe_info.scratch_config, 0,
-			sizeof(struct cam_sfe_scratch_buf_cfg));
+	/* reset scratch buffer/mup expect INIT again for UMD triggered stop/flush */
+	if (!stop_isp->internal_trigger) {
+		ctx->current_mup = 0;
+		if (ctx->sfe_info.scratch_config)
+			memset(ctx->sfe_info.scratch_config, 0,
+				sizeof(struct cam_sfe_scratch_buf_cfg));
+	}
 	ctx->sfe_info.skip_scratch_cfg_streamon = false;
-	ctx->current_mup = 0;
 
 	cam_ife_mgr_pause_hw(ctx);
 
@@ -6224,6 +6247,16 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 			"config done completion timeout for last applied req_id=%llu ctx_index %",
 			ctx->applied_req_id, ctx->ctx_index);
 
+	/* Reset CDM for KMD internal stop */
+	if (stop_isp->internal_trigger) {
+		rc = cam_cdm_reset_hw(ctx->cdm_handle);
+		if (rc) {
+			CAM_WARN(CAM_ISP, "CDM: %u reset failed rc: %d in ctx: %u",
+				ctx->cdm_id, rc, ctx->ctx_index);
+			rc = 0;
+		}
+	}
+
 	if (stop_isp->stop_only)
 		goto end;
 
@@ -11426,6 +11459,31 @@ end:
 	return 0;
 }
 
+static int cam_ife_hw_mgr_handle_csid_frame_drop(
+	struct cam_isp_hw_event_info         *event_info,
+	struct cam_ife_hw_mgr_ctx            *ctx)
+{
+	int rc = 0;
+	cam_hw_event_cb_func ife_hw_irq_cb = ctx->common.event_cb;
+
+	/*
+	 * Support frame drop as secondary event
+	 */
+	if (event_info->is_secondary_evt) {
+		struct cam_isp_hw_secondary_event_data sec_evt_data;
+
+		CAM_DBG(CAM_ISP,
+			"Received CSID[%u] sensor sync frame drop res: %d as secondary evt",
+			event_info->hw_idx, event_info->res_id);
+
+		sec_evt_data.evt_type = CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP;
+		rc = ife_hw_irq_cb(ctx->common.cb_priv,
+			CAM_ISP_HW_SECONDARY_EVENT, (void *)&sec_evt_data);
+	}
+
+	return rc;
+}
+
 static int cam_ife_hw_mgr_handle_csid_error(
 	struct cam_ife_hw_mgr_ctx      *ctx,
 	struct cam_isp_hw_event_info   *event_info)
@@ -11448,6 +11506,9 @@ static int cam_ife_hw_mgr_handle_csid_error(
 	CAM_DBG(CAM_ISP, "Entry CSID[%u] error %d", event_info->hw_idx, err_type);
 
 	spin_lock(&g_ife_hw_mgr.ctx_lock);
+	if (err_type & CAM_ISP_HW_ERROR_CSID_SENSOR_FRAME_DROP)
+		cam_ife_hw_mgr_handle_csid_frame_drop(event_info, ctx);
+
 	if ((err_type & CAM_ISP_HW_ERROR_CSID_FATAL) &&
 		g_ife_hw_mgr.debug_cfg.enable_csid_recovery) {
 
@@ -11526,6 +11587,50 @@ static int cam_ife_hw_mgr_handle_csid_rup(
 	return 0;
 }
 
+static int cam_ife_hw_mgr_handle_csid_camif_sof(
+	struct cam_ife_hw_mgr_ctx            *ctx,
+	struct cam_isp_hw_event_info         *event_info)
+{
+	int rc = 0;
+	cam_hw_event_cb_func ife_hw_irq_sof_cb = ctx->common.event_cb;
+
+	if (event_info->is_secondary_evt) {
+		struct cam_isp_hw_secondary_event_data sec_evt_data;
+
+		CAM_DBG(CAM_ISP,
+			"Received CSID[%u] CAMIF SOF res: %d as secondary evt",
+			event_info->hw_idx, event_info->res_id);
+
+		sec_evt_data.evt_type = CAM_ISP_HW_SEC_EVENT_SOF;
+		rc = ife_hw_irq_sof_cb(ctx->common.cb_priv,
+			CAM_ISP_HW_SECONDARY_EVENT, (void *)&sec_evt_data);
+	}
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_handle_csid_camif_epoch(
+	struct cam_ife_hw_mgr_ctx            *ctx,
+	struct cam_isp_hw_event_info         *event_info)
+{
+	int rc = 0;
+	cam_hw_event_cb_func ife_hw_irq_epoch_cb = ctx->common.event_cb;
+
+	if (event_info->is_secondary_evt) {
+		struct cam_isp_hw_secondary_event_data sec_evt_data;
+
+		CAM_DBG(CAM_ISP,
+			"Received CSID[%u] CAMIF EPOCH res: %d as secondary evt",
+			event_info->hw_idx, event_info->res_id);
+
+		sec_evt_data.evt_type = CAM_ISP_HW_SEC_EVENT_EPOCH;
+		rc = ife_hw_irq_epoch_cb(ctx->common.cb_priv,
+			CAM_ISP_HW_SECONDARY_EVENT, (void *)&sec_evt_data);
+	}
+
+	return rc;
+}
+
 static int cam_ife_hw_mgr_handle_hw_dump_info(
 	void                                 *ctx,
 	void                                 *evt_info)
@@ -11792,40 +11897,6 @@ static int cam_ife_hw_mgr_handle_hw_epoch(
 	return 0;
 }
 
-static int cam_ife_hw_mgr_handle_csid_camif_sof(
-	struct cam_ife_hw_mgr_ctx            *ctx,
-	struct cam_isp_hw_event_info         *event_info)
-{
-	struct cam_isp_hw_sof_event_data      sof_done_event_data;
-	cam_hw_event_cb_func                  ife_hw_irq_sof_cb;
-
-	/*
-	 * Currently SOF update is from IFE TOP - this CSID CAMIF SOF
-	 * is only to monitor second/third exposure frame for custom
-	 * AEB use-case hence the checks
-	 */
-	if (!(ctx->flags.is_aeb_mode && event_info->is_secondary_evt)) {
-		CAM_DBG(CAM_ISP,
-			"Received CSID CAMIF SOF aeb_mode: %d secondary_evt: %d - skip update",
-			ctx->flags.is_aeb_mode, event_info->is_secondary_evt);
-		return 0;
-	}
-
-	CAM_DBG(CAM_ISP,
-		"Received CSID CAMIF SOF res: %d as secondary evt",
-		event_info->res_id);
-
-	ife_hw_irq_sof_cb = ctx->common.event_cb;
-	sof_done_event_data.is_secondary_evt = true;
-	sof_done_event_data.boot_time = 0;
-	sof_done_event_data.timestamp = 0;
-
-	ife_hw_irq_sof_cb(ctx->common.cb_priv,
-		CAM_ISP_HW_EVENT_SOF, (void *)&sof_done_event_data);
-
-	return 0;
-}
-
 static int cam_ife_hw_mgr_handle_hw_sof(
 	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx,
 	struct cam_isp_hw_event_info         *event_info)
@@ -12103,6 +12174,10 @@ static int cam_ife_hw_mgr_handle_csid_event(
 		rc = cam_ife_hw_mgr_handle_csid_camif_sof(ctx, event_info);
 		break;
 
+	case CAM_ISP_HW_EVENT_EPOCH:
+		rc = cam_ife_hw_mgr_handle_csid_camif_epoch(ctx, event_info);
+		break;
+
 	default:
 		CAM_ERR(CAM_ISP, "Event: %u not handled for CSID", evt_id);
 		rc = -EINVAL;

+ 24 - 3
drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h

@@ -63,9 +63,19 @@ enum cam_isp_hw_event_type {
 	CAM_ISP_HW_EVENT_EPOCH,
 	CAM_ISP_HW_EVENT_EOF,
 	CAM_ISP_HW_EVENT_DONE,
+	CAM_ISP_HW_SECONDARY_EVENT,
 	CAM_ISP_HW_EVENT_MAX
 };
 
+/**
+ *  enum cam_isp_hw_secondary-event_type - Collection of the ISP hardware secondary events
+ */
+enum cam_isp_hw_secondary_event_type {
+	CAM_ISP_HW_SEC_EVENT_SOF,
+	CAM_ISP_HW_SEC_EVENT_EPOCH,
+	CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP,
+	CAM_ISP_HW_SEC_EVENT_EVENT_MAX,
+};
 
 /**
  * enum cam_isp_hw_err_type - Collection of the ISP error types for
@@ -81,6 +91,7 @@ enum cam_isp_hw_err_type {
 	CAM_ISP_HW_ERROR_CSID_FIFO_OVERFLOW = 0x0040,
 	CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW = 0x0080,
 	CAM_ISP_HW_ERROR_CSID_FRAME_SIZE = 0x0100,
+	CAM_ISP_HW_ERROR_CSID_SENSOR_FRAME_DROP = 0x0200,
 };
 
 /**
@@ -96,12 +107,14 @@ enum cam_isp_hw_stop_cmd {
  * struct cam_isp_stop_args - hardware stop arguments
  *
  * @hw_stop_cmd:               Hardware stop command type information
- * @stop_only                  Send stop only to hw drivers. No Deinit to be
+ * @internal_trigger:          Stop triggered internally for reset & recovery
+ * @stop_only:                 Send stop only to hw drivers. No Deinit to be
  *                             done.
  *
  */
 struct cam_isp_stop_args {
 	enum cam_isp_hw_stop_cmd      hw_stop_cmd;
+	bool                          internal_trigger;
 	bool                          stop_only;
 };
 
@@ -216,13 +229,11 @@ struct cam_isp_prepare_hw_update_data {
 /**
  * struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
  *
- * @is_secondary_event: Event notified as secondary
  * @timestamp         : Time stamp for the sof event
  * @boot_time         : Boot time stamp for the sof event
  *
  */
 struct cam_isp_hw_sof_event_data {
-	bool           is_secondary_evt;
 	uint64_t       timestamp;
 	uint64_t       boot_time;
 };
@@ -295,6 +306,16 @@ struct cam_isp_hw_error_event_data {
 	bool                 enable_req_dump;
 };
 
+/**
+ * struct cam_isp_hw_secondary_event_data - Event payload for secondary events
+ *
+ * @evt_type     : Event notified as secondary
+ *
+ */
+struct cam_isp_hw_secondary_event_data {
+	enum cam_isp_hw_secondary_event_type  evt_type;
+};
+
 /* enum cam_isp_hw_mgr_command - Hardware manager command type */
 enum cam_isp_hw_mgr_command {
 	CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT,

+ 1 - 1
drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid680.h

@@ -1141,7 +1141,7 @@ static struct cam_ife_csid_ver2_common_reg_info
 	.timestamp_strobe_val                    = 0x2,
 	.rst_location_shift_val                  = 4,
 	.rst_mode_shift_val                      = 0,
-	.epoch_div_factor                        = 4,
+	.epoch_div_factor                        = 2,
 	.global_reset                            = 1,
 	.rup_supported                           = 1,
 	.only_master_rup                         = 1,

+ 75 - 57
drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.c

@@ -898,6 +898,7 @@ static int cam_ife_csid_ver2_handle_event_err(
 	struct cam_ife_csid_ver2_hw  *csid_hw,
 	uint32_t                      irq_status,
 	uint32_t                      err_type,
+	bool                          is_secondary,
 	struct cam_isp_resource_node *res)
 {
 	struct cam_isp_hw_error_event_info   err_evt_info;
@@ -913,22 +914,25 @@ static int cam_ife_csid_ver2_handle_event_err(
 	evt.hw_idx   = csid_hw->hw_intf->hw_idx;
 	evt.reg_val  = irq_status;
 	evt.hw_type  = CAM_ISP_HW_TYPE_CSID;
+	evt.is_secondary_evt = is_secondary;
 	err_evt_info.err_type = err_type;
 	evt.event_data = (void *)&err_evt_info;
 
-	if (res) {
-		cam_ife_csid_ver2_print_debug_reg_status(csid_hw, res);
-		path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv;
-		evt.res_id   = res->res_id;
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"csid[%u] Res:%s Err 0x%x status 0x%x time_stamp: %lld:%lld",
-			csid_hw->hw_intf->hw_idx, res->res_name, err_type,
-			irq_status, path_cfg->error_ts.tv_sec,
-			path_cfg->error_ts.tv_nsec);
-	} else {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"csid[%u] Rx Err: 0x%x status 0x%x",
-			csid_hw->hw_intf->hw_idx, err_type, irq_status);
+	if (!is_secondary) {
+		if (res) {
+			cam_ife_csid_ver2_print_debug_reg_status(csid_hw, res);
+			path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv;
+			evt.res_id   = res->res_id;
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"csid[%u] Res:%s Err 0x%x status 0x%x time_stamp: %lld:%lld",
+				csid_hw->hw_intf->hw_idx, res->res_name, err_type,
+				irq_status, path_cfg->error_ts.tv_sec,
+				path_cfg->error_ts.tv_nsec);
+		} else {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"csid[%u] Rx Err: 0x%x status 0x%x",
+				csid_hw->hw_intf->hw_idx, err_type, irq_status);
+		}
 	}
 
 	evt.in_core_idx =
@@ -1117,7 +1121,7 @@ static int cam_ife_csid_ver2_rx_err_bottom_half(
 
 	if (event_type)
 		cam_ife_csid_ver2_handle_event_err(csid_hw,
-			rx_irq_status, event_type, NULL);
+			rx_irq_status, event_type, false, NULL);
 unlock:
 	spin_unlock(&csid_hw->lock_state);
 end:
@@ -1354,7 +1358,7 @@ static int cam_ife_csid_ver2_top_err_irq_bottom_half(
 
 	if (event_type)
 		cam_ife_csid_ver2_handle_event_err(csid_hw,
-			irq_status, event_type, NULL);
+			irq_status, event_type, false, NULL);
 
 	cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload,
 		&csid_hw->path_free_payload_list,
@@ -1507,6 +1511,7 @@ static int cam_ife_csid_ver2_ipp_bottom_half(
 		cam_ife_csid_ver2_handle_event_err(csid_hw,
 			irq_status_ipp,
 			err_type,
+			false,
 			res);
 unlock:
 	spin_unlock(&csid_hw->lock_state);
@@ -1604,6 +1609,7 @@ static int cam_ife_csid_ver2_ppp_bottom_half(
 		cam_ife_csid_ver2_handle_event_err(csid_hw,
 			irq_status_ppp,
 			err_type,
+			false,
 			res);
 unlock:
 	spin_unlock(&csid_hw->lock_state);
@@ -1631,7 +1637,7 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
 	uint32_t                                      irq_status_rdi;
 	uint32_t                                      err_mask;
 	uint32_t                                      err_type = 0;
-	bool                                         skip_sof_notify = false;
+	bool                                          skip_evt_notify = false;
 	struct cam_isp_hw_event_info                  evt_info;
 
 	if (!handler_priv || !evt_payload_priv) {
@@ -1712,7 +1718,7 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
 	if (err_type) {
 
 		cam_ife_csid_ver2_handle_event_err(csid_hw,
-			irq_status_rdi, err_type, res);
+			irq_status_rdi, err_type, false, res);
 		goto end;
 	}
 
@@ -1726,19 +1732,34 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
 	evt_info.reg_val = irq_status_rdi;
 	evt_info.hw_type = CAM_ISP_HW_TYPE_CSID;
 
-	/* Check for secondary evt */
-	if ((path_cfg->en_secondary_evt) &&
-		(irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_SOF)) {
-		evt_info.is_secondary_evt = true;
-		CAM_DBG(CAM_ISP,
-			"CSID[%u] RDI:%u notify CAMIF SOF as secondary evt",
-			csid_hw->hw_intf->hw_idx, res->res_id);
+	/* Check for specific secondary events */
+	if (path_cfg->sec_evt_config.en_secondary_evt) {
+		if ((irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_SOF) &&
+			(path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_SOF)) {
+			evt_info.is_secondary_evt = true;
+			csid_hw->event_cb(csid_hw->token,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+			skip_evt_notify = true;
+		}
 
-		csid_hw->event_cb(csid_hw->token,
-			CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
-		skip_sof_notify = true;
+		if ((irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_EPOCH0) &&
+			(path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_EPOCH)) {
+			evt_info.is_secondary_evt = true;
+			csid_hw->event_cb(csid_hw->token,
+				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
+			skip_evt_notify = true;
+		}
+
+		if ((irq_status_rdi &
+			IFE_CSID_VER2_PATH_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP) &&
+			(path_cfg->sec_evt_config.evt_type &
+			CAM_IFE_CSID_EVT_SENSOR_SYNC_FRAME_DROP)) {
+			cam_ife_csid_ver2_handle_event_err(csid_hw,
+				irq_status_rdi, CAM_ISP_HW_ERROR_CSID_SENSOR_FRAME_DROP, true, res);
+		}
 	}
 
+	evt_info.is_secondary_evt = false;
 	if (!path_cfg->handle_camif_irq)
 		goto end;
 
@@ -1747,7 +1768,7 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
 				CAM_ISP_HW_EVENT_EOF,
 				(void *)&evt_info);
 
-	if (!skip_sof_notify && (irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_SOF))
+	if (!skip_evt_notify && (irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_SOF))
 		csid_hw->event_cb(csid_hw->token,
 				CAM_ISP_HW_EVENT_SOF,
 				(void *)&evt_info);
@@ -1757,7 +1778,7 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
 				CAM_ISP_HW_EVENT_REG_UPDATE,
 				(void *)&evt_info);
 
-	if (irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_EPOCH0)
+	if (!skip_evt_notify && (irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_EPOCH0))
 		csid_hw->event_cb(csid_hw->token,
 				CAM_ISP_HW_EVENT_EPOCH,
 				(void *)&evt_info);
@@ -2115,7 +2136,8 @@ static int cam_ife_csid_hw_ver2_config_path_data(
 	path_cfg->vertical_bin = reserve->in_port->vertical_bin;
 	path_cfg->qcfa_bin = reserve->in_port->qcfa_bin;
 	path_cfg->num_bytes_out = reserve->in_port->num_bytes_out;
-	path_cfg->en_secondary_evt = reserve->en_secondary_evt;
+	path_cfg->sec_evt_config.en_secondary_evt = reserve->sec_evt_config.en_secondary_evt;
+	path_cfg->sec_evt_config.evt_type = reserve->sec_evt_config.evt_type;
 
 	if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
 		path_cfg->start_pixel = reserve->in_port->left_start;
@@ -2296,41 +2318,32 @@ static int cam_ife_csid_ver_config_camif(
 	struct cam_csid_hw_reserve_resource_args  *reserve,
 	struct cam_ife_csid_ver2_path_cfg *path_cfg)
 {
-	int rc = 0;
-	uint32_t epoch0 = 0;
 	struct cam_ife_csid_ver2_reg_info *csid_reg;
-	const struct cam_ife_csid_ver2_path_reg_info  *rdi_reg = NULL;
 
 	csid_reg = (struct cam_ife_csid_ver2_reg_info *)
-		    csid_hw->core_info->csid_reg;
+		csid_hw->core_info->csid_reg;
 
 	switch (reserve->res_id) {
-	case  CAM_IFE_PIX_PATH_RES_IPP:
-		epoch0 = (path_cfg->end_line - path_cfg->start_line)/
-			csid_reg->cmn_reg->epoch_div_factor;
-		break;
+	case CAM_IFE_PIX_PATH_RES_IPP:
 	case CAM_IFE_PIX_PATH_RES_RDI_0:
 	case CAM_IFE_PIX_PATH_RES_RDI_1:
 	case CAM_IFE_PIX_PATH_RES_RDI_2:
 	case CAM_IFE_PIX_PATH_RES_RDI_3:
 	case CAM_IFE_PIX_PATH_RES_RDI_4:
-		rdi_reg = csid_reg->path_reg[reserve->res_id];
-		if (!rdi_reg) {
-			rc = -EINVAL;
-			CAM_ERR(CAM_ISP, "CSID[%d] invalid res %d",
-				csid_hw->hw_intf->hw_idx, reserve->res_id);
-			goto end;
-		}
-		epoch0 = rdi_reg->epoch0_cfg_val;
+		path_cfg->camif_data.epoch0 =
+		(path_cfg->end_line - path_cfg->start_line) /
+		csid_reg->cmn_reg->epoch_div_factor;
+
+		CAM_DBG(CAM_ISP, "CSID[%d] res_id: %u epoch0: 0x%x",
+			csid_hw->hw_intf->hw_idx, reserve->res_id,
+			path_cfg->camif_data.epoch0);
+		break;
+	default:
+		CAM_DBG(CAM_ISP, "No CAMIF epoch update for res: %u", reserve->res_id);
 		break;
 	}
 
-	path_cfg->camif_data.epoch0 = epoch0;
-
-end:
-	CAM_DBG(CAM_ISP, "CSID[%d] epoch0: 0x%x",
-			csid_hw->hw_intf->hw_idx, epoch0);
-	return rc;
+	return 0;
 }
 
 int cam_ife_csid_hw_ver2_hw_cfg(
@@ -3081,12 +3094,17 @@ static int cam_ife_csid_ver2_program_rdi_path(
 		path_cfg->handle_camif_irq = true;
 	}
 
-	/* Currently CAMIF SOF is the secondary evt enabled for HW mgr */
-	if (path_cfg->en_secondary_evt) {
-		val |= IFE_CSID_VER2_PATH_CAMIF_SOF;
+	/* Enable secondary events dictated by HW mgr for RDI paths */
+	if (path_cfg->sec_evt_config.en_secondary_evt) {
+		if (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_SOF)
+			val |= IFE_CSID_VER2_PATH_CAMIF_SOF;
+
+		if (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_EPOCH)
+			val |= IFE_CSID_VER2_PATH_CAMIF_EPOCH0;
+
 		CAM_DBG(CAM_ISP,
-			"Enable camif SOF irq for res: %s",
-			res->res_name);
+			"Enable camif: %d evt irq for res: %s",
+			path_cfg->sec_evt_config.evt_type, res->res_name);
 	}
 
 	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;

+ 36 - 37
drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.h

@@ -134,6 +134,7 @@ struct cam_ife_csid_ver2_camif_data {
  * @cid:                    cid value for path
  * @path_format:            Array of Path format which contains format
  *                          info i.e Decode format, Packing format etc
+ * @sec_evt_config:         Secondary event config from HW mgr for a given path
  * @in_format:              Array of input format which contains format type
  * @out_format:             output format
  * @start_pixel:            start pixel for horizontal crop
@@ -165,47 +166,45 @@ struct cam_ife_csid_ver2_camif_data {
  *                          If we know the number of paths to avoid configuring discard
  *                          for before processing discard config we can skip it for
  *                          the corresponding paths
- * @en_secondary_evt:       Enable secondary evt for this path, to notify
- *                          hw manager
  * @sfe_inline_shdr:        flag to indicate if sfe is inline shdr
  *
  */
 struct cam_ife_csid_ver2_path_cfg {
-	struct cam_ife_csid_ver2_camif_data camif_data;
-	struct timespec64                   error_ts;
-	struct cam_ife_csid_path_format     path_format[CAM_ISP_VC_DT_CFG];
-	uint32_t                            cid;
-	uint32_t                            in_format[CAM_ISP_VC_DT_CFG];
-	uint32_t                            out_format;
-	uint32_t                            start_pixel;
-	uint32_t                            end_pixel;
-	uint32_t                            width;
-	uint32_t                            start_line;
-	uint32_t                            end_line;
-	uint32_t                            height;
-	uint32_t                            master_idx;
-	uint64_t                            clk_rate;
-	uint32_t                            horizontal_bin;
-	uint32_t                            vertical_bin;
-	uint32_t                            qcfa_bin;
-	uint32_t                            hor_ver_bin;
-	uint32_t                            num_bytes_out;
-	uint32_t                            irq_handle;
-	uint32_t                            err_irq_handle;
-	uint32_t                            discard_irq_handle;
-	uint32_t                            irq_reg_idx;
-	uint32_t                            sof_cnt;
-	uint32_t                            num_frames_discard;
-	enum cam_isp_hw_sync_mode           sync_mode;
-	bool                                vfr_en;
-	bool                                frame_id_dec_en;
-	bool                                crop_enable;
-	bool                                drop_enable;
-	bool                                handle_camif_irq;
-	bool                                discard_init_frames;
-	bool                                skip_discard_frame_cfg;
-	bool                                en_secondary_evt;
-	bool                                sfe_inline_shdr;
+	struct cam_ife_csid_ver2_camif_data  camif_data;
+	struct timespec64                    error_ts;
+	struct cam_ife_csid_path_format      path_format[CAM_ISP_VC_DT_CFG];
+	struct cam_csid_secondary_evt_config sec_evt_config;
+	uint32_t                             cid;
+	uint32_t                             in_format[CAM_ISP_VC_DT_CFG];
+	uint32_t                             out_format;
+	uint32_t                             start_pixel;
+	uint32_t                             end_pixel;
+	uint32_t                             width;
+	uint32_t                             start_line;
+	uint32_t                             end_line;
+	uint32_t                             height;
+	uint32_t                             master_idx;
+	uint64_t                             clk_rate;
+	uint32_t                             horizontal_bin;
+	uint32_t                             vertical_bin;
+	uint32_t                             qcfa_bin;
+	uint32_t                             hor_ver_bin;
+	uint32_t                             num_bytes_out;
+	uint32_t                             irq_handle;
+	uint32_t                             err_irq_handle;
+	uint32_t                             discard_irq_handle;
+	uint32_t                             irq_reg_idx;
+	uint32_t                             sof_cnt;
+	uint32_t                             num_frames_discard;
+	enum cam_isp_hw_sync_mode            sync_mode;
+	bool                                 vfr_en;
+	bool                                 frame_id_dec_en;
+	bool                                 crop_enable;
+	bool                                 drop_enable;
+	bool                                 handle_camif_irq;
+	bool                                 discard_init_frames;
+	bool                                 skip_discard_frame_cfg;
+	bool                                 sfe_inline_shdr;
 };
 
 struct cam_ife_csid_ver2_top_reg_info {

+ 25 - 3
drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h

@@ -53,6 +53,17 @@ enum cam_ife_cid_res_id {
 	CAM_IFE_CSID_CID_MAX,
 };
 
+/**
+ * enum cam_ife_csid_secondary_evt_type - Specify the event type
+ */
+enum cam_ife_csid_secondary_evt_type {
+	CAM_IFE_CSID_EVT_SOF = 1,
+	CAM_IFE_CSID_EVT_EPOCH,
+	CAM_IFE_CSID_EVT_EOF,
+	CAM_IFE_CSID_EVT_SENSOR_SYNC_FRAME_DROP,
+	CAM_IFE_CSID_EVT_MAX,
+};
+
 /**
  * struct cam_ife_csid_hw_caps- get the CSID hw capability
  * @num_rdis:             number of rdis supported by CSID HW device
@@ -142,6 +153,17 @@ struct cam_isp_in_port_generic_info {
 	struct cam_isp_out_port_generic_info    *data;
 };
 
+/**
+ * struct cam_csid_secondary_evt_config - secondary event enablement
+ * @evt_type:           Type of secondary event enabled [SOF/EPOCH/EOF...]
+ * @en_secondary_evt:   Enable secondary event
+ *
+ */
+struct cam_csid_secondary_evt_config {
+	enum cam_ife_csid_secondary_evt_type evt_type;
+	bool                                 en_secondary_evt;
+};
+
 /**
  * struct cam_csid_hw_reserve_resource- hw reserve
  * @res_type :           Reource type CID or PATH
@@ -159,13 +181,13 @@ struct cam_isp_in_port_generic_info {
  * @dual_core_id:        In case of dual csid, core id of another hw
  *                       reserve
  * @node_res :           Reserved resource structure pointer
+ * @sec_evt_config:      Config to enable secondary events for the given resource
+ *                       depending on the use-case
  * @crop_enable :        Flag to indicate CSID crop enable
  * @drop_enable :        Flag to indicate CSID drop enable
  * @sfe_inline_shdr:     Flag to indicate if sfe is inline shdr
  * @is_offline :         Flag to indicate offline
  * @need_top_cfg:        Flag to indicate if top cfg is needed
- * @en_secondary_evt:    Flag to enable secondary event for the given resource
- *                       depending on the use-case
  * @tasklet:             Tasklet to schedule bottom halves
  * @buf_done_controller: IRQ controller for buf done for version 680 hw
  * @cdm_ops:             CDM Ops
@@ -185,12 +207,12 @@ struct cam_csid_hw_reserve_resource_args {
 	uint32_t                                  master_idx;
 	uint32_t                                  dual_core_id;
 	struct cam_isp_resource_node             *node_res;
+	struct cam_csid_secondary_evt_config      sec_evt_config;
 	bool                                      crop_enable;
 	bool                                      drop_enable;
 	bool                                      sfe_inline_shdr;
 	bool                                      is_offline;
 	bool                                      need_top_cfg;
-	bool                                      en_secondary_evt;
 	void                                     *tasklet;
 	void                                     *buf_done_controller;
 	void                                     *cdm_ops;

+ 2 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h

@@ -48,10 +48,12 @@ struct cam_isp_bw_control_args {
  *
  * @hw_intf:             Isp hw intf pointer
  * @request_id:          Request Id
+ * @skip_clk_data_rst:   Skip resetting any clk info
  */
 struct cam_isp_apply_clk_bw_args {
 	struct cam_hw_intf                *hw_intf;
 	uint64_t                           request_id;
+	bool                               skip_clk_data_rst;
 };
 
 /*

+ 14 - 3
drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.c

@@ -62,6 +62,7 @@ struct cam_sfe_top_priv {
 	struct cam_sfe_top_hw_info     *hw_info;
 	uint32_t                        num_clc_module;
 	struct cam_sfe_top_debug_info  (*clc_dbg_mod_info)[CAM_SFE_TOP_DBG_REG_MAX][8];
+	bool                            skip_clk_data_rst;
 };
 
 struct cam_sfe_path_data {
@@ -587,7 +588,7 @@ int cam_sfe_top_calc_hw_clk_rate(
 			max_req_clk_rate = top_priv->req_clk_rate[i];
 	}
 
-	if (start_stop) {
+	if (start_stop && !top_priv->skip_clk_data_rst) {
 		/* need to vote current clk immediately */
 		*final_clk_rate = max_req_clk_rate;
 		/* Reset everything, we can start afresh */
@@ -816,6 +817,13 @@ static int cam_sfe_top_apply_clk_bw_update(struct cam_sfe_top_priv *top_priv,
 		goto end;
 	}
 
+	if (clk_bw_args->skip_clk_data_rst) {
+		top_priv->skip_clk_data_rst = true;
+		CAM_DBG(CAM_SFE, "SFE:%u requested to avoid clk data rst",
+			hw_intf->hw_idx);
+		return 0;
+	}
+
 	rc = cam_sfe_top_calc_hw_clk_rate(top_priv, false, &final_clk_rate, request_id);
 	if (rc) {
 		CAM_ERR(CAM_SFE,
@@ -951,6 +959,7 @@ static int cam_sfe_top_apply_clock_start_stop(struct cam_sfe_top_priv *top_priv)
 
 end:
 	top_priv->clk_state = CAM_CLK_BW_STATE_INIT;
+	top_priv->skip_clk_data_rst = false;
 	return rc;
 }
 
@@ -1667,7 +1676,8 @@ int cam_sfe_top_stop(
 	sfe_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	for (i = 0; i < CAM_SFE_TOP_IN_PORT_MAX; i++) {
 		if (top_priv->in_rsrc[i].res_id == sfe_res->res_id) {
-			top_priv->req_clk_rate[i] = 0;
+			if (!top_priv->skip_clk_data_rst)
+				top_priv->req_clk_rate[i] = 0;
 			memset(&top_priv->req_axi_vote[i], 0,
 				sizeof(struct cam_axi_vote));
 			top_priv->axi_vote_control[i] =
@@ -1715,7 +1725,8 @@ int cam_sfe_top_stop(
 	 * when all resources are streamed off
 	 */
 	if (!top_priv->start_stop_cnt) {
-		top_priv->applied_clk_rate = 0;
+		if (!top_priv->skip_clk_data_rst)
+			top_priv->applied_clk_rate = 0;
 
 		if (top_priv->error_irq_handle > 0) {
 			cam_irq_controller_unsubscribe_irq(

+ 8 - 1
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.c

@@ -146,7 +146,7 @@ static int cam_vfe_top_calc_hw_clk_rate(
 			max_req_clk_rate = top_common->req_clk_rate[i];
 	}
 
-	if (start_stop) {
+	if (start_stop && !top_common->skip_clk_data_rst) {
 		/* need to vote current clk immediately */
 		*final_clk_rate = max_req_clk_rate;
 		/* Reset everything, we can start afresh */
@@ -597,6 +597,12 @@ int cam_vfe_top_apply_clk_bw_update(struct cam_vfe_top_priv_common *top_common,
 		goto end;
 	}
 
+	if (clk_bw_args->skip_clk_data_rst) {
+		top_common->skip_clk_data_rst = true;
+		CAM_DBG(CAM_ISP, "VFE:%u requested to avoid clk data rst", hw_intf->hw_idx);
+		return 0;
+	}
+
 	rc = cam_vfe_top_calc_hw_clk_rate(top_common, false, &final_clk_rate, request_id);
 	if (rc) {
 		CAM_ERR(CAM_ISP,
@@ -734,6 +740,7 @@ int cam_vfe_top_apply_clock_start_stop(struct cam_vfe_top_priv_common *top_commo
 
 end:
 	top_common->clk_state = CAM_CLK_BW_STATE_INIT;
+	top_common->skip_clk_data_rst = false;
 	return rc;
 }
 

+ 1 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.h

@@ -36,6 +36,7 @@ struct cam_vfe_top_priv_common {
 	struct cam_hw_soc_info         *soc_info;
 	unsigned long                   applied_clk_rate;
 	unsigned long                   req_clk_rate[CAM_VFE_TOP_MUX_MAX];
+	bool                            skip_clk_data_rst;
 
 };
 

+ 2 - 1
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver4.c

@@ -788,7 +788,8 @@ int cam_vfe_top_ver4_stop(void *device_priv,
 		for (i = 0; i < top_priv->top_common.num_mux; i++) {
 			if (top_priv->top_common.mux_rsrc[i].res_id ==
 				mux_res->res_id) {
-				top_priv->top_common.req_clk_rate[i] = 0;
+				if (!top_priv->top_common.skip_clk_data_rst)
+					top_priv->top_common.req_clk_rate[i] = 0;
 				memset(&top_priv->top_common.req_axi_vote[i],
 					0, sizeof(struct cam_axi_vote));
 				top_priv->top_common.axi_vote_control[i] =