Kaynağa Gözat

Merge 65c163e41083a7cfea63bd3a3c604fb5ddc8c8b0 on remote branch

Change-Id: I5437d3a0aeaf620754c37ed002df40944235a253
Linux Build Service Account 7 ay önce
ebeveyn
işleme
d0aff24e75

+ 6 - 4
drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c

@@ -2442,6 +2442,10 @@ static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 	if (is_shdr_en && !is_shdr_master)
 		tfe_ctx->is_shdr_slave = true;
 
+	CAM_INFO(CAM_ISP, "ctx %d TFE index %d is_dual=%d is_shdr=%d shdr_master=%d",
+		tfe_ctx->ctx_index, tfe_ctx->base[0].idx, tfe_ctx->is_dual,
+		is_shdr_en, is_shdr_master);
+
 	for (i = 0; i < acquire_hw_info->num_inputs; i++) {
 		cam_tfe_hw_mgr_preprocess_port(tfe_ctx, &in_port[i], &num_pix_port_per_in,
 			&num_rdi_port_per_in, &num_pd_port_per_in, &pdaf_enable, &lcr_enable);
@@ -3501,8 +3505,6 @@ static int cam_tfe_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 		cam_tfe_mgr_csid_change_halt_mode(ctx,
 			CAM_TFE_CSID_HALT_MODE_INTERNAL);
 
-	CAM_DBG(CAM_ISP, "Stopping master CSID idx %d", master_base_idx);
-
 	/* Stop the master CSID path first */
 	cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid,
 		master_base_idx, csid_halt_type);
@@ -3648,7 +3650,7 @@ static int cam_tfe_mgr_restart_hw(void *start_hw_args)
 
 	CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
 	/* Start the TFE CSID HW devices */
-	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+	list_for_each_entry_reverse(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
 		rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx);
 		if (rc) {
 			CAM_ERR(CAM_ISP, "Can not start TFE CSID (%d)",
@@ -3853,7 +3855,7 @@ start_only:
 	CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d",
 		ctx->ctx_index);
 	/* Start the TFE CSID HW devices */
-	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+	list_for_each_entry_reverse(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
 		rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx);
 		if (rc) {
 			CAM_ERR(CAM_ISP, "Can not start TFE CSID (%d)",

+ 7 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid665.h

@@ -51,6 +51,7 @@ static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_665_ipp_reg_offset = {
 	.early_eof_en_shift_val              = 29,
 	.halt_master_sel_shift               = 4,
 	.halt_mode_shift                     = 2,
+	.halt_mode_mask                      = 3,
 	.halt_master_sel_master_val          = 1,
 	.halt_master_sel_slave_val           = 0,
 	.binning_supported                   = 3,
@@ -59,6 +60,7 @@ static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_665_ipp_reg_offset = {
 	.is_multi_vc_dt_supported            = true,
 	.format_measure_en_shift_val         = 0,
 	.measure_en_hbi_vbi_cnt_val          = 0xc,
+	.cgc_mode_en_shift_val               = 9,
 };
 
 static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_665_ppp_reg_offset = {
@@ -103,6 +105,7 @@ static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_665_ppp_reg_offset = {
 	.early_eof_en_shift_val              = 29,
 	.halt_master_sel_shift               = 4,
 	.halt_mode_shift                     = 2,
+	.halt_mode_mask                      = 3,
 	.halt_master_sel_master_val          = 3,
 	.halt_master_sel_slave_val           = 2,
 	.binning_supported                   = 0,
@@ -111,6 +114,7 @@ static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_665_ppp_reg_offset = {
 	.is_multi_vc_dt_supported            = true,
 	.format_measure_en_shift_val         = 0,
 	.measure_en_hbi_vbi_cnt_val          = 0xc,
+	.cgc_mode_en_shift_val               = 9,
 };
 
 static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_665_rdi_0_reg_offset = {
@@ -156,6 +160,7 @@ static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_665_rdi_0_reg_offset = {
 	.is_multi_vc_dt_supported                 = true,
 	.format_measure_en_shift_val              = 0,
 	.measure_en_hbi_vbi_cnt_val               = 0xc,
+	.cgc_mode_en_shift_val                    = 8,
 };
 
 static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_665_rdi_1_reg_offset = {
@@ -201,6 +206,7 @@ static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_665_rdi_1_reg_offset = {
 	.is_multi_vc_dt_supported                 = true,
 	.format_measure_en_shift_val              = 0,
 	.measure_en_hbi_vbi_cnt_val               = 0xc,
+	.cgc_mode_en_shift_val                    = 8,
 };
 
 static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_665_rdi_2_reg_offset = {
@@ -246,6 +252,7 @@ static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_665_rdi_2_reg_offset = {
 	.is_multi_vc_dt_supported                 = true,
 	.format_measure_en_shift_val              = 0,
 	.measure_en_hbi_vbi_cnt_val               = 0xc,
+	.cgc_mode_en_shift_val                    = 8,
 };
 
 static struct cam_tfe_csid_csi2_rx_reg_offset

+ 7 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid770.h

@@ -51,6 +51,7 @@ static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_770_ipp_reg_offset = {
 	.early_eof_en_shift_val              = 29,
 	.halt_master_sel_shift               = 4,
 	.halt_mode_shift                     = 2,
+	.halt_mode_mask                      = 3,
 	.halt_master_sel_master_val          = 1,
 	.halt_master_sel_slave_val           = 0,
 	.binning_supported                   = 3,
@@ -59,6 +60,7 @@ static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_770_ipp_reg_offset = {
 	.is_multi_vc_dt_supported            = true,
 	.format_measure_en_shift_val         = 0,
 	.measure_en_hbi_vbi_cnt_val          = 0xc,
+	.cgc_mode_en_shift_val               = 9,
 };
 
 static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_770_ppp_reg_offset = {
@@ -103,6 +105,7 @@ static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_770_ppp_reg_offset = {
 	.early_eof_en_shift_val              = 29,
 	.halt_master_sel_shift               = 4,
 	.halt_mode_shift                     = 2,
+	.halt_mode_mask                      = 3,
 	.halt_master_sel_master_val          = 3,
 	.halt_master_sel_slave_val           = 2,
 	.binning_supported                   = 0,
@@ -111,6 +114,7 @@ static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_770_ppp_reg_offset = {
 	.is_multi_vc_dt_supported            = true,
 	.format_measure_en_shift_val         = 0,
 	.measure_en_hbi_vbi_cnt_val          = 0xc,
+	.cgc_mode_en_shift_val               = 9,
 };
 
 static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_770_rdi_0_reg_offset = {
@@ -156,6 +160,7 @@ static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_770_rdi_0_reg_offset = {
 	.is_multi_vc_dt_supported                 = true,
 	.format_measure_en_shift_val              = 0,
 	.measure_en_hbi_vbi_cnt_val               = 0xc,
+	.cgc_mode_en_shift_val                    = 8,
 };
 
 static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_770_rdi_1_reg_offset = {
@@ -201,6 +206,7 @@ static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_770_rdi_1_reg_offset = {
 	.is_multi_vc_dt_supported                 = true,
 	.format_measure_en_shift_val              = 0,
 	.measure_en_hbi_vbi_cnt_val               = 0xc,
+	.cgc_mode_en_shift_val                    = 8,
 };
 
 static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_770_rdi_2_reg_offset = {
@@ -246,6 +252,7 @@ static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_770_rdi_2_reg_offset = {
 	.is_multi_vc_dt_supported                 = true,
 	.format_measure_en_shift_val              = 0,
 	.measure_en_hbi_vbi_cnt_val               = 0xc,
+	.cgc_mode_en_shift_val                    = 8,
 };
 
 static struct cam_tfe_csid_csi2_rx_reg_offset

+ 178 - 106
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c

@@ -997,6 +997,8 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
 	path_data->bayer_bin = reserve->in_port->bayer_bin;
 	path_data->qcfa_bin = reserve->in_port->qcfa_bin;
 	path_data->crop_enable = reserve->crop_enable;
+	path_data->is_shdr_master = reserve->in_port->is_shdr_master;
+	path_data->is_shdr = reserve->in_port->shdr_en;
 
 	csid_hw->event_cb = reserve->event_cb;
 	csid_hw->event_cb_priv = reserve->event_cb_prv;
@@ -1702,33 +1704,31 @@ static int cam_tfe_csid_enable_pxl_path(
 
 	CAM_DBG(CAM_ISP, "Enable IPP path");
 
-	/* Set master or slave path */
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
-		/* Set halt mode as master */
-		val = (TFE_CSID_HALT_MODE_MASTER  <<
-			pxl_reg->halt_mode_shift) |
-			(pxl_reg->halt_master_sel_master_val <<
-			pxl_reg->halt_master_sel_shift);
-	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
-		/* Set halt mode as slave and set master idx */
-		val = (TFE_CSID_HALT_MODE_SLAVE << pxl_reg->halt_mode_shift);
+	if ((path_data->is_shdr && path_data->is_shdr_master) ||
+		(path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
+		/* Set halt mode for master */
+		val = (TFE_CSID_HALT_MODE_MASTER << pxl_reg->halt_mode_shift) |
+			(TFE_CSID_HALT_CMD_SOURCE_NONE << pxl_reg->halt_master_sel_shift) |
+			(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
+	else if ((path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) ||
+			(path_data->is_shdr))
+		/* Set halt mode for slave */
+		val = (TFE_CSID_HALT_MODE_SLAVE << pxl_reg->halt_mode_shift) |
+			(TFE_CSID_HALT_CMD_SOURCE_EXTERNAL << pxl_reg->halt_master_sel_shift) |
+			(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
 	else
-		/* Default is internal halt mode */
-		val = 1 << pxl_reg->halt_master_sel_shift;
-
-	/*
-	 * Resume at frame boundary if Master or No Sync.
-	 * Slave will get resume command from Master.
-	 */
-	if ((path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
-		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) && !path_data->init_frame_drop)
-		val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY;
+		/* Set halt mode for default */
+		val = (TFE_CSID_HALT_MODE_INTERNAL << pxl_reg->halt_mode_shift) |
+			(TFE_CSID_HALT_CMD_SOURCE_NONE << pxl_reg->halt_master_sel_shift) |
+			(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
 
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		pxl_reg->csid_pxl_ctrl_addr);
 
-	CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x",
-		csid_hw->hw_intf->hw_idx, val);
+	CAM_DBG(CAM_ISP, "CSID:%d sync_mode=%d IPP_Ctrl:0x%x is_shdr=%d shdr_master=%d",
+		csid_hw->hw_intf->hw_idx, path_data->sync_mode,
+		cam_io_r_mb(soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_ctrl_addr),
+		path_data->is_shdr, path_data->is_shdr_master);
 
 	/* Enable the required pxl path interrupts */
 	val = TFE_CSID_PATH_INFO_RST_DONE |
@@ -1807,7 +1807,6 @@ static int cam_tfe_csid_disable_pxl_path(
 	enum cam_tfe_csid_halt_cmd       stop_cmd)
 {
 	int rc = 0;
-	uint32_t val = 0;
 	const struct cam_tfe_csid_reg_offset       *csid_reg;
 	struct cam_hw_soc_info                     *soc_info;
 	struct cam_tfe_csid_path_cfg               *path_data;
@@ -1858,32 +1857,13 @@ static int cam_tfe_csid_disable_pxl_path(
 	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
 		pxl_reg->csid_pxl_irq_mask_addr);
 
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
-		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
-		/* configure Halt */
-		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
-		pxl_reg->csid_pxl_ctrl_addr);
-		val &= ~0x3;
-		val |= stop_cmd;
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			pxl_reg->csid_pxl_ctrl_addr);
-	}
-
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE &&
-		stop_cmd == CAM_TFE_CSID_HALT_IMMEDIATELY) {
-		/* configure Halt for slave */
-		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
-			pxl_reg->csid_pxl_ctrl_addr);
-		val &= ~0xF;
-		val |= stop_cmd;
-		val |= (TFE_CSID_HALT_MODE_MASTER << 2);
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			pxl_reg->csid_pxl_ctrl_addr);
-	}
-
 	path_data->init_frame_drop = 0;
 	path_data->res_sof_cnt     = 0;
 
+	CAM_DBG(CAM_ISP, "halt CSID:%d sync_mode:%d res_id:%d IPP path pxl_ctrl=0x%x",
+		csid_hw->hw_intf->hw_idx, path_data->sync_mode, res->res_id,
+		cam_io_r_mb(soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_ctrl_addr));
+
 	return rc;
 }
 
@@ -1918,34 +1898,29 @@ static int cam_tfe_csid_enable_ppp_path(
 
 	CAM_DBG(CAM_ISP, "CSID:%d Enable PPP path", csid_hw->hw_intf->hw_idx);
 
-	/* Set master or slave path */
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
-		/* Set halt mode as master */
-		val = (TFE_CSID_HALT_MODE_SLAVE  << ppp_reg->halt_mode_shift) |
-			(ppp_reg->halt_master_sel_master_val <<
-			ppp_reg->halt_master_sel_shift);
-	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
-		/* Set halt mode as slave and set master idx */
-		val = (TFE_CSID_HALT_MODE_SLAVE  << ppp_reg->halt_mode_shift) |
-			(ppp_reg->halt_master_sel_slave_val <<
-			ppp_reg->halt_master_sel_shift);
+	if ((path_data->is_shdr && path_data->is_shdr_master) ||
+		(path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
+		/* Set halt mode for master shdr/dual */
+		val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
+			(TFE_CSID_HALT_CMD_SOURCE_INTERNAL1 << ppp_reg->halt_master_sel_shift) |
+			(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << ppp_reg->halt_cmd_shift);
+	else if ((path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) ||
+			(path_data->is_shdr))
+		/* Set halt mode for slave shdr/dual*/
+		val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
+			(TFE_CSID_HALT_CMD_SOURCE_INTERNAL2 << ppp_reg->halt_master_sel_shift) |
+			(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << ppp_reg->halt_cmd_shift);
 	else
-		/* Default is internal halt mode */
-		val = (TFE_CSID_HALT_MODE_SLAVE  << ppp_reg->halt_mode_shift) |
-			(ppp_reg->halt_master_sel_master_val <<
-			ppp_reg->halt_master_sel_shift);
-
-	/*
-	 * Resume at frame boundary if Master or No Sync.
-	 * Slave will get resume command from Master.
-	 */
-	if ((path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
-		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) && !path_data->init_frame_drop)
-		val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY;
+		/* Set halt mode for default */
+		val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
+			(TFE_CSID_HALT_CMD_SOURCE_INTERNAL1 << ppp_reg->halt_master_sel_shift) |
+			(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << ppp_reg->halt_cmd_shift);
 
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr);
 
-	CAM_DBG(CAM_ISP, "CSID:%d PPP Ctrl val: 0x%x", csid_hw->hw_intf->hw_idx, val);
+	CAM_DBG(CAM_ISP, "CSID:%d sync_mode:%d PPP Ctrl val: 0x%x",
+		csid_hw->hw_intf->hw_idx, path_data->sync_mode,
+		cam_io_r_mb(soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr));
 
 	/* Enable the required ppp path interrupts */
 	val = TFE_CSID_PATH_INFO_RST_DONE | TFE_CSID_PATH_ERROR_FIFO_OVERFLOW |
@@ -1974,7 +1949,6 @@ static int cam_tfe_csid_disable_ppp_path(
 	enum cam_tfe_csid_halt_cmd       stop_cmd)
 {
 	int rc = 0;
-	uint32_t val = 0;
 	const struct cam_tfe_csid_reg_offset       *csid_reg;
 	struct cam_hw_soc_info                     *soc_info;
 	struct cam_tfe_csid_path_cfg               *path_data;
@@ -2024,33 +1998,12 @@ static int cam_tfe_csid_disable_ppp_path(
 	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
 		ppp_reg->csid_pxl_irq_mask_addr);
 
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
-		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
-		/* configure Halt */
-		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
-		ppp_reg->csid_pxl_ctrl_addr);
-		val &= ~0x3F;
-		val |= (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift);
-		val |= (ppp_reg->halt_master_sel_master_val <<
-			ppp_reg->halt_master_sel_shift);
-		val |= stop_cmd;
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			ppp_reg->csid_pxl_ctrl_addr);
-	}
+	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d Skip prgramming halt mode for PPP path",
+		csid_hw->hw_intf->hw_idx, res->res_id);
 
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE &&
-		stop_cmd == CAM_TFE_CSID_HALT_IMMEDIATELY) {
-		/* configure Halt for slave */
-		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
-			ppp_reg->csid_pxl_ctrl_addr);
-		val &= ~0x3F;
-		val |= (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift);
-		val |= (ppp_reg->halt_master_sel_slave_val <<
-			ppp_reg->halt_master_sel_shift);
-		val |= stop_cmd;
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			ppp_reg->csid_pxl_ctrl_addr);
-	}
+	CAM_DBG(CAM_ISP, "CSID:%d sync_mode:%d res_id:%d PPP path halt_ctrl_reg=0x%x",
+		csid_hw->hw_intf->hw_idx, path_data->sync_mode, res->res_id,
+		cam_io_r_mb(soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr));
 
 	path_data->init_frame_drop = 0;
 	path_data->res_sof_cnt     = 0;
@@ -2354,6 +2307,9 @@ static int cam_tfe_csid_poll_stop_status(
 	uint32_t csid_status_addr = 0, val = 0, res_id = 0;
 	const struct cam_tfe_csid_reg_offset       *csid_reg;
 	struct cam_hw_soc_info                     *soc_info;
+	uint32_t csid_ctrl_reg = 0;
+	uint32_t csid_cfg0_reg = 0;
+	uint32_t csid_cfg1_reg = 0;
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
@@ -2367,6 +2323,10 @@ static int cam_tfe_csid_poll_stop_status(
 			csid_status_addr =
 			csid_reg->ipp_reg->csid_pxl_status_addr;
 
+			csid_ctrl_reg = csid_reg->ipp_reg->csid_pxl_ctrl_addr;
+			csid_cfg0_reg = csid_reg->ipp_reg->csid_pxl_cfg0_addr;
+			csid_cfg1_reg = csid_reg->ipp_reg->csid_pxl_cfg1_addr;
+
 			if (csid_hw->ipp_res.res_state !=
 				CAM_ISP_RESOURCE_STATE_STREAMING)
 				continue;
@@ -2374,6 +2334,9 @@ static int cam_tfe_csid_poll_stop_status(
 		} else if (res_id == CAM_TFE_CSID_PATH_RES_PPP) {
 			csid_status_addr =
 			csid_reg->ppp_reg->csid_pxl_status_addr;
+			csid_ctrl_reg = csid_reg->ppp_reg->csid_pxl_ctrl_addr;
+			csid_cfg0_reg = csid_reg->ppp_reg->csid_pxl_cfg0_addr;
+			csid_cfg1_reg = csid_reg->ppp_reg->csid_pxl_cfg1_addr;
 
 			if (csid_hw->ppp_res.res_state !=
 				CAM_ISP_RESOURCE_STATE_STREAMING)
@@ -2382,6 +2345,9 @@ static int cam_tfe_csid_poll_stop_status(
 		} else {
 			csid_status_addr =
 				csid_reg->rdi_reg[res_id]->csid_rdi_status_addr;
+			csid_ctrl_reg = csid_reg->rdi_reg[res_id]->csid_rdi_ctrl_addr;
+			csid_cfg0_reg = csid_reg->rdi_reg[res_id]->csid_rdi_cfg0_addr;
+			csid_cfg1_reg = csid_reg->rdi_reg[res_id]->csid_rdi_cfg1_addr;
 
 			if (csid_hw->rdi_res[res_id].res_state !=
 				CAM_ISP_RESOURCE_STATE_STREAMING)
@@ -2402,6 +2368,13 @@ static int cam_tfe_csid_poll_stop_status(
 		if (rc < 0) {
 			CAM_ERR(CAM_ISP, "CSID:%d res:%d halt failed rc %d",
 				csid_hw->hw_intf->hw_idx, res_id, rc);
+
+			CAM_ERR(CAM_ISP, "CSID:%d status:0x%x ctrl_reg:0x%x cfg0:0x%x cfg1:0x%x",
+				csid_hw->hw_intf->hw_idx,
+				cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_status_addr),
+				cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_ctrl_reg),
+				cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_cfg0_reg),
+				cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_cfg1_reg));
 			rc = -ETIMEDOUT;
 			break;
 		}
@@ -3089,12 +3062,16 @@ static int cam_tfe_csid_stop(void *hw_priv,
 	void *stop_args, uint32_t arg_size)
 {
 	int rc = 0;
-	struct cam_tfe_csid_hw               *csid_hw;
-	struct cam_hw_info                   *csid_hw_info;
-	struct cam_isp_resource_node         *res;
-	struct cam_tfe_csid_hw_stop_args     *csid_stop;
-	uint32_t  i;
+	struct cam_tfe_csid_hw                    *csid_hw;
+	struct cam_hw_info                        *csid_hw_info;
+	struct cam_isp_resource_node              *res;
+	struct cam_tfe_csid_hw_stop_args          *csid_stop;
+	struct cam_hw_soc_info                    *soc_info;
+	const struct cam_tfe_csid_reg_offset      *csid_reg;
+	const struct cam_tfe_csid_pxl_reg_offset  *pxl_reg;
+	uint32_t  i, val = 0;
 	uint32_t res_mask = 0;
+	void __iomem *mem_base;
 
 	if (!hw_priv || !stop_args ||
 		(arg_size != sizeof(struct cam_tfe_csid_hw_stop_args))) {
@@ -3110,9 +3087,74 @@ static int cam_tfe_csid_stop(void *hw_priv,
 
 	csid_hw_info = (struct cam_hw_info  *)hw_priv;
 	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
-	CAM_DBG(CAM_ISP, "CSID:%d num_res %d",
-		csid_hw->hw_intf->hw_idx,
-		csid_stop->num_res);
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	/* Disalbe cgc for all the path */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		switch (res->res_type) {
+		case CAM_ISP_RESOURCE_PIX_PATH:
+			if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) {
+				pxl_reg = csid_reg->ipp_reg;
+				val = cam_io_r_mb(mem_base + pxl_reg->csid_pxl_cfg0_addr);
+				val = val | (1 << pxl_reg->cgc_mode_en_shift_val);
+				cam_io_w_mb(val, mem_base + pxl_reg->csid_pxl_cfg0_addr);
+			} else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
+				pxl_reg = csid_reg->ppp_reg;
+				val = cam_io_r_mb(mem_base + pxl_reg->csid_pxl_cfg0_addr);
+				val = val | (1 << pxl_reg->cgc_mode_en_shift_val);
+				cam_io_w_mb(val, mem_base + pxl_reg->csid_pxl_cfg0_addr);
+			}
+			CAM_DBG(CAM_ISP, "CSID:%d cgc change res_type %d res_id %d val:0x%x",
+				csid_hw->hw_intf->hw_idx,
+				res->res_type, res->res_id, val);
+			break;
+		default:
+			CAM_DBG(CAM_ISP, "CSID:%d Invalid res type%d",
+				csid_hw->hw_intf->hw_idx, res->res_type);
+			break;
+		}
+	}
+
+	/* csid ctrl to resume at frame boundary */
+	cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
+		mem_base + csid_reg->cmn_reg->csid_ctrl_addr);
+
+	/* halt to global */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		switch (res->res_type) {
+		case CAM_ISP_RESOURCE_PIX_PATH:
+			if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) {
+				pxl_reg = csid_reg->ipp_reg;
+				val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+					pxl_reg->csid_pxl_ctrl_addr);
+				val &= ~(pxl_reg->halt_mode_mask << pxl_reg->halt_mode_shift);
+				val |= (TFE_CSID_HALT_MODE_GLOBAL << pxl_reg->halt_mode_shift);
+				cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+					pxl_reg->csid_pxl_ctrl_addr);
+			} else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
+				pxl_reg = csid_reg->ppp_reg;
+				val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+					pxl_reg->csid_pxl_ctrl_addr);
+				val &= ~(pxl_reg->halt_mode_mask << pxl_reg->halt_mode_shift);
+				val |= (TFE_CSID_HALT_MODE_GLOBAL << pxl_reg->halt_mode_shift);
+				cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+					pxl_reg->csid_pxl_ctrl_addr);
+			}
+			CAM_DBG(CAM_ISP, "CSID:%d global change res_type %d res_id %d val:0x%x",
+				csid_hw->hw_intf->hw_idx,
+				res->res_type, res->res_id, val);
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+				csid_hw->hw_intf->hw_idx,
+				res->res_type);
+			break;
+		}
+	}
 
 	/* Stop the resource first */
 	for (i = 0; i < csid_stop->num_res; i++) {
@@ -3132,7 +3174,6 @@ static int cam_tfe_csid_stop(void *hw_priv,
 			else
 				rc = cam_tfe_csid_disable_rdi_path(csid_hw,
 					res, csid_stop->stop_cmd);
-
 			break;
 		default:
 			CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
@@ -3142,9 +3183,40 @@ static int cam_tfe_csid_stop(void *hw_priv,
 		}
 	}
 
+	/* issue global cmd */
+	cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY,
+		mem_base + csid_reg->cmn_reg->csid_ctrl_addr);
+
 	if (res_mask)
 		rc = cam_tfe_csid_poll_stop_status(csid_hw, res_mask);
 
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		CAM_DBG(CAM_ISP, "CSID:%d cgc change to dynamic res_type %d res_id %d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id);
+		switch (res->res_type) {
+		case CAM_ISP_RESOURCE_PIX_PATH:
+			if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) {
+				pxl_reg = csid_reg->ipp_reg;
+				val = cam_io_r_mb(mem_base + pxl_reg->csid_pxl_cfg0_addr);
+				val &= ~(1 << pxl_reg->cgc_mode_en_shift_val);
+				cam_io_w_mb(val, mem_base + pxl_reg->csid_pxl_cfg0_addr);
+			} else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
+				pxl_reg = csid_reg->ipp_reg;
+				val = cam_io_r_mb(mem_base + pxl_reg->csid_pxl_cfg0_addr);
+				val &= ~(1 << pxl_reg->cgc_mode_en_shift_val);
+				cam_io_w_mb(val, mem_base + pxl_reg->csid_pxl_cfg0_addr);
+			} else
+				continue;
+		default:
+			CAM_DBG(CAM_ISP, "CSID:%d Invalid res type%d",
+				csid_hw->hw_intf->hw_idx,
+				res->res_type);
+			break;
+		}
+	}
+
 	for (i = 0; i < csid_stop->num_res; i++) {
 		res = csid_stop->node_res[i];
 		res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;

+ 8 - 1
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.h

@@ -160,6 +160,8 @@ struct cam_tfe_csid_pxl_reg_offset {
 	uint32_t early_eof_en_shift_val;
 	uint32_t halt_master_sel_shift;
 	uint32_t halt_mode_shift;
+	uint32_t halt_mode_mask;
+	uint32_t halt_cmd_shift;
 	uint32_t halt_master_sel_master_val;
 	uint32_t halt_master_sel_slave_val;
 	uint32_t binning_supported;
@@ -168,6 +170,7 @@ struct cam_tfe_csid_pxl_reg_offset {
 	uint32_t format_measure_en_shift_val;
 	uint32_t measure_en_hbi_vbi_cnt_val;
 	bool     is_multi_vc_dt_supported;
+	uint32_t cgc_mode_en_shift_val;
 };
 
 struct cam_tfe_csid_rdi_reg_offset {
@@ -215,6 +218,7 @@ struct cam_tfe_csid_rdi_reg_offset {
 	uint32_t format_measure_en_shift_val;
 	uint32_t measure_en_hbi_vbi_cnt_val;
 	bool     is_multi_vc_dt_supported;
+	uint32_t cgc_mode_en_shift_val;
 };
 
 struct cam_tfe_csid_csi2_rx_reg_offset {
@@ -451,7 +455,8 @@ struct cam_tfe_csid_cid_data {
  *                      one more frame than pix.
  * @res_sof_cnt         path resource sof count value. it used for initial
  *                      frame drop
- *
+ * @is_shdr_master      flag to indicate path to be shdr master
+ * @is_shdr             flag to indicate if shdr mode is enabled
  */
 struct cam_tfe_csid_path_cfg {
 	struct vc_dt_data               vc_dt[CAM_ISP_TFE_VC_DT_CFG];
@@ -479,6 +484,8 @@ struct cam_tfe_csid_path_cfg {
 	uint32_t                        usage_type;
 	uint32_t                        init_frame_drop;
 	uint32_t                        res_sof_cnt;
+	bool                            is_shdr_master;
+	bool                            is_shdr;
 };
 
 /**

+ 4 - 3
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.c

@@ -2494,8 +2494,8 @@ static int cam_tfe_camif_resource_start(
 	cam_io_w_mb(val, rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg_0);
 
-	CAM_DBG(CAM_ISP, "TFE:%d core_cfg 0 val:0x%x", core_info->core_index,
-		val);
+	CAM_DBG(CAM_ISP, "TFE:%d core_cfg_0 val:0x%x", core_info->core_index,
+		cam_io_r_mb(rsrc_data->mem_base + rsrc_data->common_reg->core_cfg_0));
 
 	if (cam_cpas_get_cpas_hw_version(&camera_hw_version))
 		CAM_ERR(CAM_ISP, "Failed to get HW version");
@@ -2620,7 +2620,8 @@ static int cam_tfe_ppp_resource_start(
 	ppp_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	CAM_DBG(CAM_ISP, "TFE: %d Start PPP Done, core_cfg 0 val:0x%x",
-		core_info->core_index, val);
+		core_info->core_index,
+		cam_io_r_mb(rsrc_data->mem_base + rsrc_data->common_reg->core_cfg_0));
 	return 0;
 }
 

+ 1 - 1
drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c

@@ -202,7 +202,7 @@ static int cam_jpeg_add_command_buffers(struct cam_packet *packet,
 
 			cmd_buf_kaddr = (uint32_t *)kaddr;
 
-			if ((cmd_desc[i].offset / sizeof(uint32_t)) >= len) {
+			if (cmd_desc[i].offset >= len) {
 				CAM_ERR(CAM_JPEG, "Invalid offset: %u cmd buf len: %zu",
 					cmd_desc[i].offset, len);
 				cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);

+ 1 - 1
drivers/cam_req_mgr/cam_req_mgr_core.c

@@ -5228,7 +5228,7 @@ int cam_req_mgr_schedule_request_v2(
 	}
 
 	if (sched_req->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) {
-		if ((sched_req->num_links <= 0) &&
+		if ((sched_req->num_links <= 0) ||
 			(sched_req->num_links > MAXIMUM_LINKS_PER_SESSION)) {
 			CAM_ERR(CAM_CRM, "link:0x%x req:%lld invalid num_links:%d",
 				link->link_hdl, sched_req->req_id, sched_req->num_links);

+ 23 - 16
drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.c

@@ -425,6 +425,7 @@ static int32_t cam_eeprom_parse_memory_map(
 	int32_t                            rc = 0;
 	int32_t                            cnt = 0;
 	int32_t                            processed_size = 0;
+	int32_t                            payload_count;
 	uint8_t                            generic_op_code;
 	struct cam_eeprom_memory_map_t    *map = data->map;
 	struct common_header              *cmm_hdr =
@@ -454,24 +455,25 @@ static int32_t cam_eeprom_parse_memory_map(
 	switch (cmm_hdr->cmd_type) {
 	case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
 		i2c_random_wr = (struct cam_cmd_i2c_random_wr *)cmd_buf;
+		payload_count = i2c_random_wr->header.count;
 
-		if (i2c_random_wr->header.count == 0 ||
-		    i2c_random_wr->header.count >= MSM_EEPROM_MAX_MEM_MAP_CNT ||
+		if (payload_count == 0 ||
+		    payload_count >= MSM_EEPROM_MAX_MEM_MAP_CNT ||
 		    (size_t)*num_map >= ((MSM_EEPROM_MAX_MEM_MAP_CNT *
 				MSM_EEPROM_MEMORY_MAP_MAX_SIZE) -
-				i2c_random_wr->header.count)) {
+				payload_count)) {
 			CAM_ERR(CAM_EEPROM, "OOB Error");
 			return -EINVAL;
 		}
 		cmd_length_in_bytes   = sizeof(struct cam_cmd_i2c_random_wr) +
-			((i2c_random_wr->header.count - 1) *
+			((payload_count - 1) *
 			sizeof(struct i2c_random_wr_payload));
 
 		if (cmd_length_in_bytes > remain_buf_len) {
 			CAM_ERR(CAM_EEPROM, "Not enough buffer remaining");
 			return -EINVAL;
 		}
-		for (cnt = 0; cnt < (i2c_random_wr->header.count);
+		for (cnt = 0; cnt < (payload_count);
 			cnt++) {
 			map[*num_map + cnt].page.addr =
 				i2c_random_wr->random_wr_payload[cnt].reg_addr;
@@ -484,15 +486,16 @@ static int32_t cam_eeprom_parse_memory_map(
 			map[*num_map + cnt].page.valid_size = 1;
 		}
 
-		*num_map += (i2c_random_wr->header.count - 1);
+		*num_map += (payload_count - 1);
 		processed_size +=
 			cmd_length_in_bytes;
 		break;
 	case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
 		i2c_cont_rd = (struct cam_cmd_i2c_continuous_rd *)cmd_buf;
 		cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_continuous_rd);
+		payload_count = i2c_cont_rd->header.count;
 
-		if (i2c_cont_rd->header.count >= U32_MAX - data->num_data) {
+		if (payload_count >= U32_MAX - data->num_data) {
 			CAM_ERR(CAM_EEPROM,
 				"int overflow on eeprom memory block");
 			return -EINVAL;
@@ -501,7 +504,7 @@ static int32_t cam_eeprom_parse_memory_map(
 		map[*num_map].mem.addr_type = i2c_cont_rd->header.addr_type;
 		map[*num_map].mem.data_type = i2c_cont_rd->header.data_type;
 		map[*num_map].mem.valid_size =
-			i2c_cont_rd->header.count;
+			payload_count;
 		processed_size +=
 			cmd_length_in_bytes;
 		data->num_data += map[*num_map].mem.valid_size;
@@ -1087,6 +1090,8 @@ static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
 {
 	struct cam_buf_io_cfg *io_cfg;
 	uint32_t              i = 0;
+	size_t                plane_offset;
+	int32_t               mem_handle;
 	int                   rc = 0;
 	uintptr_t              buf_addr;
 	size_t                buf_size;
@@ -1096,6 +1101,8 @@ static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
 	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
 		&csl_packet->payload +
 		csl_packet->io_configs_offset);
+	plane_offset = io_cfg->offsets[0];
+	mem_handle   = io_cfg->mem_handle[0];
 
 	CAM_DBG(CAM_EEPROM, "number of IO configs: %d:",
 		csl_packet->num_io_configs);
@@ -1103,21 +1110,21 @@ static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
 	for (i = 0; i < csl_packet->num_io_configs; i++) {
 		CAM_DBG(CAM_EEPROM, "Direction: %d:", io_cfg->direction);
 		if (io_cfg->direction == CAM_BUF_OUTPUT) {
-			rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0],
+			rc = cam_mem_get_cpu_buf(mem_handle,
 				&buf_addr, &buf_size);
 			if (rc) {
 				CAM_ERR(CAM_EEPROM, "Fail in get buffer: %d",
 					rc);
 				return rc;
 			}
-			if (buf_size <= io_cfg->offsets[0]) {
+			if (buf_size <= plane_offset) {
 				CAM_ERR(CAM_EEPROM, "Not enough buffer");
-				cam_mem_put_cpu_buf(io_cfg->mem_handle[0]);
+				cam_mem_put_cpu_buf(mem_handle);
 				rc = -EINVAL;
 				return rc;
 			}
 
-			remain_len = buf_size - io_cfg->offsets[0];
+			remain_len = buf_size - plane_offset;
 			CAM_DBG(CAM_EEPROM, "buf_addr : %pK, buf_size : %zu\n",
 				(void *)buf_addr, buf_size);
 
@@ -1125,16 +1132,16 @@ static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
 			if (!read_buffer) {
 				CAM_ERR(CAM_EEPROM,
 					"invalid buffer to copy data");
-				cam_mem_put_cpu_buf(io_cfg->mem_handle[0]);
+				cam_mem_put_cpu_buf(mem_handle);
 				rc = -EINVAL;
 				return rc;
 			}
-			read_buffer += io_cfg->offsets[0];
+			read_buffer += plane_offset;
 
 			if (remain_len < e_ctrl->cal_data.num_data) {
 				CAM_ERR(CAM_EEPROM,
 					"failed to copy, Invalid size");
-				cam_mem_put_cpu_buf(io_cfg->mem_handle[0]);
+				cam_mem_put_cpu_buf(mem_handle);
 				rc = -EINVAL;
 				return rc;
 			}
@@ -1143,7 +1150,7 @@ static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
 				e_ctrl->cal_data.num_data);
 			memcpy(read_buffer, e_ctrl->cal_data.mapdata,
 					e_ctrl->cal_data.num_data);
-			cam_mem_put_cpu_buf(io_cfg->mem_handle[0]);
+			cam_mem_put_cpu_buf(mem_handle);
 		} else {
 			CAM_ERR(CAM_EEPROM, "Invalid direction");
 			rc = -EINVAL;