Browse Source

msm: camera: tfe: Drop first rdi frame in dual tfe scenario

In Dual tfe scenario, tfe csid drop first frame in the ipp pix
path to sync both master and slave, but rdi frame will go to tfe.
this will cause issues in the camif and rdi sub sampling scenarios.
in sub sampling scenarios both camif and rdi should be subsampled
on same frames. To achieve this drop the rdi first frame from csid
in dual tfe scenarios.

Change-Id: I73e98a6d3d973af93bbe6afef7585a95578f2a2b
CRs-Fixed: 2923045
Signed-off-by: Ravikishore Pampana <[email protected]>
Ravikishore Pampana 4 năm trước cách đây
mục cha
commit
3dcb2cb0bd

+ 215 - 6
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c

@@ -225,6 +225,153 @@ static int cam_tfe_match_vc_dt_pair(int32_t *vc, uint32_t *dt,
 	return 0;
 }
 
+static void cam_tfe_csid_enable_path_for_init_frame_drop(
+	struct cam_tfe_csid_hw *csid_hw,
+	int res_id)
+{
+	struct cam_tfe_csid_path_cfg             *path_data;
+	const struct cam_tfe_csid_pxl_reg_offset *pxl_reg = NULL;
+	const struct cam_tfe_csid_rdi_reg_offset *rdi_reg = NULL;
+	const struct cam_tfe_csid_reg_offset     *csid_reg;
+	struct cam_hw_soc_info                   *soc_info;
+	struct cam_isp_resource_node             *res;
+	uint32_t val;
+
+	if (!csid_hw) {
+		CAM_WARN(CAM_ISP, "csid_hw cannot be NULL");
+		return;
+	}
+
+	csid_reg  = csid_hw->csid_info->csid_reg;
+	soc_info  = &csid_hw->hw_info->soc_info;
+
+	if (res_id == CAM_TFE_CSID_PATH_RES_IPP) {
+		res = &csid_hw->ipp_res;
+		pxl_reg = csid_reg->ipp_reg;
+	} else if (res_id >= CAM_TFE_CSID_PATH_RES_RDI_0 &&
+			res_id <= CAM_TFE_CSID_PATH_RES_RDI_2) {
+		res = &csid_hw->rdi_res[res_id];
+		rdi_reg = csid_reg->rdi_reg[res_id];
+	} else {
+		CAM_ERR(CAM_ISP, "Invalid res_id");
+		return;
+	}
+
+	path_data = (struct cam_tfe_csid_path_cfg *)res->res_priv;
+
+	if (!path_data || !path_data->init_frame_drop)
+		return;
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING)
+		return;
+
+	path_data->res_sof_cnt++;
+	if ((path_data->res_sof_cnt + 1) <
+			path_data->res_sof_cnt) {
+		CAM_WARN(CAM_ISP, "Res %d sof count overflow %d",
+			res_id, path_data->res_sof_cnt);
+		return;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d res_id %d SOF cnt:%d init_frame_drop:%d",
+		csid_hw->hw_intf->hw_idx, res_id, path_data->res_sof_cnt,
+		path_data->init_frame_drop);
+
+	if ((path_data->res_sof_cnt ==
+		path_data->init_frame_drop) &&
+		pxl_reg) {
+		CAM_DBG(CAM_ISP, "CSID:%d Enabling pixel IPP Path",
+			csid_hw->hw_intf->hw_idx);
+		if (path_data->sync_mode !=
+			CAM_ISP_HW_SYNC_SLAVE) {
+			val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				pxl_reg->csid_pxl_ctrl_addr);
+			val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY;
+			cam_io_w_mb(val,
+				soc_info->reg_map[0].mem_base +
+				pxl_reg->csid_pxl_ctrl_addr);
+		}
+
+		if (!(csid_hw->csid_debug &
+				TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
+			val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				pxl_reg->csid_pxl_irq_mask_addr);
+			val &= ~(TFE_CSID_PATH_INFO_INPUT_SOF);
+			cam_io_w_mb(val,
+				soc_info->reg_map[0].mem_base +
+				pxl_reg->csid_pxl_irq_mask_addr);
+		}
+	} else if ((path_data->res_sof_cnt ==
+		path_data->init_frame_drop) && rdi_reg) {
+		CAM_DBG(CAM_ISP, "Enabling RDI %d Path", res_id);
+		cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
+			soc_info->reg_map[0].mem_base +
+			rdi_reg->csid_rdi_ctrl_addr);
+		if (!(csid_hw->csid_debug &
+				TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
+			val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				rdi_reg->csid_rdi_irq_mask_addr);
+			val &= ~(TFE_CSID_PATH_INFO_INPUT_SOF);
+			cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+				rdi_reg->csid_rdi_irq_mask_addr);
+		}
+	}
+}
+
+static bool cam_tfe_csid_check_path_active(struct cam_tfe_csid_hw   *csid_hw)
+{
+	const struct cam_tfe_csid_reg_offset  *csid_reg;
+	struct cam_hw_soc_info                *soc_info;
+	uint32_t i;
+	uint32_t path_status = 1;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* check the IPP path status */
+	if (csid_reg->cmn_reg->num_pix) {
+		path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_pxl_status_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d IPP path status:%d",
+			csid_hw->hw_intf->hw_idx, path_status);
+		/* if status is 0 then it is active */
+		if (!path_status)
+			goto end;
+	}
+
+	/* Check the RDI path status */
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+		path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->rdi_reg[i]->csid_rdi_status_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d RDI:%d path status:%d",
+			csid_hw->hw_intf->hw_idx, i,  path_status);
+		/* if status is 0 then it is active */
+		if (!path_status)
+			goto end;
+	}
+
+end:
+	/* if status is 0 then path is active */
+	return path_status ? false : true;
+}
+
+static void cam_tfe_csid_reset_path_data(
+	struct cam_tfe_csid_hw       *csid_hw,
+	struct cam_isp_resource_node *res)
+{
+	struct cam_tfe_csid_path_cfg *path_data = NULL;
+
+	if (!csid_hw || !res) {
+		CAM_WARN(CAM_ISP, "csid_hw or res cannot be NULL");
+		return;
+	}
+	path_data = res->res_priv;
+
+	if (path_data) {
+		path_data->init_frame_drop = 0;
+		path_data->res_sof_cnt     = 0;
+	}
+}
+
 static int cam_tfe_csid_cid_get(struct cam_tfe_csid_hw *csid_hw,
 	int32_t *vc, uint32_t *dt, uint32_t num_valid_vc_dt,  uint32_t *cid)
 {
@@ -276,6 +423,7 @@ static int cam_tfe_csid_global_reset(struct cam_tfe_csid_hw *csid_hw)
 {
 	struct cam_hw_soc_info                *soc_info;
 	const struct cam_tfe_csid_reg_offset  *csid_reg;
+	struct cam_tfe_csid_path_cfg          *path_data = NULL;
 	int rc = 0;
 	uint32_t val = 0, i;
 	uint32_t status;
@@ -377,6 +525,18 @@ static int cam_tfe_csid_global_reset(struct cam_tfe_csid_hw *csid_hw)
 	csid_hw->error_irq_count = 0;
 	csid_hw->prev_boot_timestamp = 0;
 
+	if (csid_hw->pxl_pipe_enable) {
+		path_data = (struct cam_tfe_csid_path_cfg *)
+			csid_hw->ipp_res.res_priv;
+		path_data->res_sof_cnt = 0;
+	}
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+		path_data = (struct cam_tfe_csid_path_cfg  *)
+			csid_hw->rdi_res[i].res_priv;
+		path_data->res_sof_cnt = 0;
+	}
+
 	return rc;
 }
 
@@ -712,6 +872,7 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
 	path_data->height  = reserve->in_port->height;
 	path_data->start_line = reserve->in_port->line_start;
 	path_data->end_line = reserve->in_port->line_end;
+	path_data->usage_type = reserve->in_port->usage_type;
 
 	path_data->bayer_bin = reserve->in_port->bayer_bin;
 	path_data->qcfa_bin = reserve->in_port->qcfa_bin;
@@ -913,7 +1074,8 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw  *csid_hw)
 {
 	int rc = 0;
 	const struct cam_tfe_csid_reg_offset      *csid_reg;
-	struct cam_hw_soc_info              *soc_info;
+	struct cam_hw_soc_info                    *soc_info;
+	struct cam_tfe_csid_path_cfg              *path_data = NULL;
 	uint32_t i, val, clk_lvl;
 	unsigned long flags;
 
@@ -994,6 +1156,19 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw  *csid_hw)
 	spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
 	cam_tasklet_start(csid_hw->tasklet);
 
+	if (csid_hw->pxl_pipe_enable ) {
+		path_data = (struct cam_tfe_csid_path_cfg  *)
+			csid_hw->ipp_res.res_priv;
+		path_data->res_sof_cnt = 0;
+	}
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+		path_data = (struct cam_tfe_csid_path_cfg *)
+			csid_hw->rdi_res[i].res_priv;
+		path_data->res_sof_cnt = 0;
+	}
+
+
 	return rc;
 
 disable_soc:
@@ -1651,11 +1826,14 @@ static int cam_tfe_csid_enable_rdi_path(
 {
 	const struct cam_tfe_csid_reg_offset      *csid_reg;
 	struct cam_hw_soc_info                    *soc_info;
+	struct cam_tfe_csid_path_cfg              *path_data;
 	uint32_t id, val;
+	bool path_active = false;
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
 	id = res->res_id;
+	path_data = (struct cam_tfe_csid_path_cfg   *) res->res_priv;
 
 	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
 		res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2 ||
@@ -1667,10 +1845,30 @@ static int cam_tfe_csid_enable_rdi_path(
 		return -EINVAL;
 	}
 
-	/* resume at frame boundary */
-	cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
-			soc_info->reg_map[0].mem_base +
-			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+	/*Drop one frame extra on RDI for dual TFE use case */
+	if (path_data->usage_type == CAM_ISP_TFE_IN_RES_USAGE_DUAL)
+		path_data->init_frame_drop = 1;
+
+	/*resume at frame boundary */
+	if (!path_data->init_frame_drop) {
+		CAM_DBG(CAM_ISP, "Start RDI:%d path", id);
+		/* resume at frame boundary */
+		cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
+				  soc_info->reg_map[0].mem_base +
+				  csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+	} else {
+		path_active = cam_tfe_csid_check_path_active(csid_hw);
+		if (path_active)
+			cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
+					  soc_info->reg_map[0].mem_base +
+					  csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+			CAM_DBG(CAM_ISP,
+				"CSID:%d  %s RDI:%d path frame drop %d",
+				csid_hw->hw_intf->hw_idx,
+				path_active ? "Starting" : "Not Starting", id,
+				path_data->init_frame_drop);
+	}
 
 	/* Enable the required RDI interrupts */
 	val = TFE_CSID_PATH_INFO_RST_DONE | TFE_CSID_PATH_ERROR_FIFO_OVERFLOW |
@@ -1682,7 +1880,8 @@ static int cam_tfe_csid_enable_rdi_path(
 			TFE_CSID_PATH_ERROR_LINE_COUNT;
 	}
 
-	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)
+	if ((csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ) ||
+		(path_data->init_frame_drop && !path_active))
 		val |= TFE_CSID_PATH_INFO_INPUT_SOF;
 	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)
 		val |= TFE_CSID_PATH_INFO_INPUT_EOF;
@@ -1704,10 +1903,12 @@ static int cam_tfe_csid_disable_rdi_path(
 	uint32_t id, val = 0;
 	const struct cam_tfe_csid_reg_offset       *csid_reg;
 	struct cam_hw_soc_info                     *soc_info;
+	struct cam_tfe_csid_path_cfg               *path_data;
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
 	id = res->res_id;
+	path_data = (struct cam_tfe_csid_path_cfg   *) res->res_priv;
 
 	if ((res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2) ||
 		(!csid_reg->rdi_reg[res->res_id])) {
@@ -1743,6 +1944,9 @@ static int cam_tfe_csid_disable_rdi_path(
 	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
 		csid_hw->hw_intf->hw_idx, res->res_id);
 
+	path_data->init_frame_drop = 0;
+	path_data->res_sof_cnt     = 0;
+
 	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
 		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
 
@@ -2111,6 +2315,8 @@ static int cam_tfe_csid_release(void *hw_priv,
 
 	res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 
+	cam_tfe_csid_reset_path_data(csid_hw, res);
+
 end:
 	mutex_unlock(&csid_hw->hw_info->hw_mutex);
 	return rc;
@@ -3406,6 +3612,9 @@ handle_fatal_error:
 			complete(&csid_hw->csid_rdin_complete[i]);
 		}
 
+		if (irq_status[i] & TFE_CSID_PATH_INFO_INPUT_SOF)
+			cam_tfe_csid_enable_path_for_init_frame_drop(csid_hw, i);
+
 		if ((irq_status[i] & TFE_CSID_PATH_INFO_INPUT_SOF) &&
 			(csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
 			if (!csid_hw->sof_irq_triggered)

+ 8 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.h

@@ -385,6 +385,11 @@ struct cam_tfe_csid_cid_data {
  * @sensor_vbi:         Sensor vertical blanking interval
  * @bayer_bin:          Bayer binning
  * @qcfa_bin:           Quad-CFA binning
+ * @usage_type:         dual or single tfe information
+ * @init_frame_drop     init frame drop value. In dual ife case rdi need to drop
+ *                      one more frame than pix.
+ * @res_sof_cnt         path resource sof count value. it used for initial
+ *                      frame drop
  *
  */
 struct cam_tfe_csid_path_cfg {
@@ -410,6 +415,9 @@ struct cam_tfe_csid_path_cfg {
 	uint32_t                        sensor_vbi;
 	uint32_t                        bayer_bin;
 	uint32_t                        qcfa_bin;
+	uint32_t                        usage_type;
+	uint32_t                        init_frame_drop;
+	uint32_t                        res_sof_cnt;
 };
 
 /**