|
@@ -997,6 +997,8 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
|
|
|
path_data->bayer_bin = reserve->in_port->bayer_bin;
|
|
|
path_data->qcfa_bin = reserve->in_port->qcfa_bin;
|
|
|
path_data->crop_enable = reserve->crop_enable;
|
|
|
+ path_data->is_shdr_master = reserve->in_port->is_shdr_master;
|
|
|
+ path_data->is_shdr = reserve->in_port->shdr_en;
|
|
|
|
|
|
csid_hw->event_cb = reserve->event_cb;
|
|
|
csid_hw->event_cb_priv = reserve->event_cb_prv;
|
|
@@ -1702,33 +1704,31 @@ static int cam_tfe_csid_enable_pxl_path(
|
|
|
|
|
|
CAM_DBG(CAM_ISP, "Enable IPP path");
|
|
|
|
|
|
- /* Set master or slave path */
|
|
|
- if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
|
|
|
- /* Set halt mode as master */
|
|
|
- val = (TFE_CSID_HALT_MODE_MASTER <<
|
|
|
- pxl_reg->halt_mode_shift) |
|
|
|
- (pxl_reg->halt_master_sel_master_val <<
|
|
|
- pxl_reg->halt_master_sel_shift);
|
|
|
- else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
|
|
|
- /* Set halt mode as slave and set master idx */
|
|
|
- val = (TFE_CSID_HALT_MODE_SLAVE << pxl_reg->halt_mode_shift);
|
|
|
+ if ((path_data->is_shdr && path_data->is_shdr_master) ||
|
|
|
+ (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
|
|
|
+ /* Set halt mode for master */
|
|
|
+ val = (TFE_CSID_HALT_MODE_MASTER << pxl_reg->halt_mode_shift) |
|
|
|
+ (TFE_CSID_HALT_CMD_SOURCE_NONE << pxl_reg->halt_master_sel_shift) |
|
|
|
+ (CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
|
|
|
+ else if ((path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) ||
|
|
|
+ (path_data->is_shdr))
|
|
|
+ /* Set halt mode for slave */
|
|
|
+ val = (TFE_CSID_HALT_MODE_SLAVE << pxl_reg->halt_mode_shift) |
|
|
|
+ (TFE_CSID_HALT_CMD_SOURCE_EXTERNAL << pxl_reg->halt_master_sel_shift) |
|
|
|
+ (CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
|
|
|
else
|
|
|
- /* Default is internal halt mode */
|
|
|
- val = 1 << pxl_reg->halt_master_sel_shift;
|
|
|
-
|
|
|
- /*
|
|
|
- * Resume at frame boundary if Master or No Sync.
|
|
|
- * Slave will get resume command from Master.
|
|
|
- */
|
|
|
- if ((path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
|
|
|
- path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) && !path_data->init_frame_drop)
|
|
|
- val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY;
|
|
|
+ /* Set halt mode for default */
|
|
|
+ val = (TFE_CSID_HALT_MODE_INTERNAL << pxl_reg->halt_mode_shift) |
|
|
|
+ (TFE_CSID_HALT_CMD_SOURCE_NONE << pxl_reg->halt_master_sel_shift) |
|
|
|
+ (CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
|
|
|
|
|
|
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
pxl_reg->csid_pxl_ctrl_addr);
|
|
|
|
|
|
- CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x",
|
|
|
- csid_hw->hw_intf->hw_idx, val);
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d sync_mode=%d IPP_Ctrl:0x%x is_shdr=%d shdr_master=%d",
|
|
|
+ csid_hw->hw_intf->hw_idx, path_data->sync_mode,
|
|
|
+ cam_io_r_mb(soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_ctrl_addr),
|
|
|
+ path_data->is_shdr, path_data->is_shdr_master);
|
|
|
|
|
|
/* Enable the required pxl path interrupts */
|
|
|
val = TFE_CSID_PATH_INFO_RST_DONE |
|
|
@@ -1807,7 +1807,6 @@ static int cam_tfe_csid_disable_pxl_path(
|
|
|
enum cam_tfe_csid_halt_cmd stop_cmd)
|
|
|
{
|
|
|
int rc = 0;
|
|
|
- uint32_t val = 0;
|
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
|
struct cam_hw_soc_info *soc_info;
|
|
|
struct cam_tfe_csid_path_cfg *path_data;
|
|
@@ -1858,32 +1857,13 @@ static int cam_tfe_csid_disable_pxl_path(
|
|
|
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
|
pxl_reg->csid_pxl_irq_mask_addr);
|
|
|
|
|
|
- if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
|
|
|
- path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
|
|
|
- /* configure Halt */
|
|
|
- val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
- pxl_reg->csid_pxl_ctrl_addr);
|
|
|
- val &= ~0x3;
|
|
|
- val |= stop_cmd;
|
|
|
- cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
- pxl_reg->csid_pxl_ctrl_addr);
|
|
|
- }
|
|
|
-
|
|
|
- if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE &&
|
|
|
- stop_cmd == CAM_TFE_CSID_HALT_IMMEDIATELY) {
|
|
|
- /* configure Halt for slave */
|
|
|
- val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
- pxl_reg->csid_pxl_ctrl_addr);
|
|
|
- val &= ~0xF;
|
|
|
- val |= stop_cmd;
|
|
|
- val |= (TFE_CSID_HALT_MODE_MASTER << 2);
|
|
|
- cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
- pxl_reg->csid_pxl_ctrl_addr);
|
|
|
- }
|
|
|
-
|
|
|
path_data->init_frame_drop = 0;
|
|
|
path_data->res_sof_cnt = 0;
|
|
|
|
|
|
+ CAM_DBG(CAM_ISP, "halt CSID:%d sync_mode:%d res_id:%d IPP path pxl_ctrl=0x%x",
|
|
|
+ csid_hw->hw_intf->hw_idx, path_data->sync_mode, res->res_id,
|
|
|
+ cam_io_r_mb(soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_ctrl_addr));
|
|
|
+
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
@@ -1918,34 +1898,29 @@ static int cam_tfe_csid_enable_ppp_path(
|
|
|
|
|
|
CAM_DBG(CAM_ISP, "CSID:%d Enable PPP path", csid_hw->hw_intf->hw_idx);
|
|
|
|
|
|
- /* Set master or slave path */
|
|
|
- if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
|
|
|
- /* Set halt mode as master */
|
|
|
- val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
|
|
|
- (ppp_reg->halt_master_sel_master_val <<
|
|
|
- ppp_reg->halt_master_sel_shift);
|
|
|
- else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
|
|
|
- /* Set halt mode as slave and set master idx */
|
|
|
- val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
|
|
|
- (ppp_reg->halt_master_sel_slave_val <<
|
|
|
- ppp_reg->halt_master_sel_shift);
|
|
|
+ if ((path_data->is_shdr && path_data->is_shdr_master) ||
|
|
|
+ (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
|
|
|
+ /* Set halt mode for master shdr/dual */
|
|
|
+ val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
|
|
|
+ (TFE_CSID_HALT_CMD_SOURCE_INTERNAL1 << ppp_reg->halt_master_sel_shift) |
|
|
|
+ (CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << ppp_reg->halt_cmd_shift);
|
|
|
+ else if ((path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) ||
|
|
|
+ (path_data->is_shdr))
|
|
|
+ /* Set halt mode for slave shdr/dual*/
|
|
|
+ val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
|
|
|
+ (TFE_CSID_HALT_CMD_SOURCE_INTERNAL2 << ppp_reg->halt_master_sel_shift) |
|
|
|
+ (CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << ppp_reg->halt_cmd_shift);
|
|
|
else
|
|
|
- /* Default is internal halt mode */
|
|
|
- val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
|
|
|
- (ppp_reg->halt_master_sel_master_val <<
|
|
|
- ppp_reg->halt_master_sel_shift);
|
|
|
-
|
|
|
- /*
|
|
|
- * Resume at frame boundary if Master or No Sync.
|
|
|
- * Slave will get resume command from Master.
|
|
|
- */
|
|
|
- if ((path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
|
|
|
- path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) && !path_data->init_frame_drop)
|
|
|
- val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY;
|
|
|
+ /* Set halt mode for default */
|
|
|
+ val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
|
|
|
+ (TFE_CSID_HALT_CMD_SOURCE_INTERNAL1 << ppp_reg->halt_master_sel_shift) |
|
|
|
+ (CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << ppp_reg->halt_cmd_shift);
|
|
|
|
|
|
cam_io_w_mb(val, soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr);
|
|
|
|
|
|
- CAM_DBG(CAM_ISP, "CSID:%d PPP Ctrl val: 0x%x", csid_hw->hw_intf->hw_idx, val);
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d sync_mode:%d PPP Ctrl val: 0x%x",
|
|
|
+ csid_hw->hw_intf->hw_idx, path_data->sync_mode,
|
|
|
+ cam_io_r_mb(soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr));
|
|
|
|
|
|
/* Enable the required ppp path interrupts */
|
|
|
val = TFE_CSID_PATH_INFO_RST_DONE | TFE_CSID_PATH_ERROR_FIFO_OVERFLOW |
|
|
@@ -1974,7 +1949,6 @@ static int cam_tfe_csid_disable_ppp_path(
|
|
|
enum cam_tfe_csid_halt_cmd stop_cmd)
|
|
|
{
|
|
|
int rc = 0;
|
|
|
- uint32_t val = 0;
|
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
|
struct cam_hw_soc_info *soc_info;
|
|
|
struct cam_tfe_csid_path_cfg *path_data;
|
|
@@ -2024,33 +1998,12 @@ static int cam_tfe_csid_disable_ppp_path(
|
|
|
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
|
ppp_reg->csid_pxl_irq_mask_addr);
|
|
|
|
|
|
- if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
|
|
|
- path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
|
|
|
- /* configure Halt */
|
|
|
- val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
- ppp_reg->csid_pxl_ctrl_addr);
|
|
|
- val &= ~0x3F;
|
|
|
- val |= (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift);
|
|
|
- val |= (ppp_reg->halt_master_sel_master_val <<
|
|
|
- ppp_reg->halt_master_sel_shift);
|
|
|
- val |= stop_cmd;
|
|
|
- cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
- ppp_reg->csid_pxl_ctrl_addr);
|
|
|
- }
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d res_id:%d Skip prgramming halt mode for PPP path",
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
|
|
- if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE &&
|
|
|
- stop_cmd == CAM_TFE_CSID_HALT_IMMEDIATELY) {
|
|
|
- /* configure Halt for slave */
|
|
|
- val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
- ppp_reg->csid_pxl_ctrl_addr);
|
|
|
- val &= ~0x3F;
|
|
|
- val |= (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift);
|
|
|
- val |= (ppp_reg->halt_master_sel_slave_val <<
|
|
|
- ppp_reg->halt_master_sel_shift);
|
|
|
- val |= stop_cmd;
|
|
|
- cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
- ppp_reg->csid_pxl_ctrl_addr);
|
|
|
- }
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d sync_mode:%d res_id:%d PPP path halt_ctrl_reg=0x%x",
|
|
|
+ csid_hw->hw_intf->hw_idx, path_data->sync_mode, res->res_id,
|
|
|
+ cam_io_r_mb(soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr));
|
|
|
|
|
|
path_data->init_frame_drop = 0;
|
|
|
path_data->res_sof_cnt = 0;
|
|
@@ -2354,6 +2307,9 @@ static int cam_tfe_csid_poll_stop_status(
|
|
|
uint32_t csid_status_addr = 0, val = 0, res_id = 0;
|
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
|
struct cam_hw_soc_info *soc_info;
|
|
|
+ uint32_t csid_ctrl_reg = 0;
|
|
|
+ uint32_t csid_cfg0_reg = 0;
|
|
|
+ uint32_t csid_cfg1_reg = 0;
|
|
|
|
|
|
csid_reg = csid_hw->csid_info->csid_reg;
|
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
@@ -2367,6 +2323,10 @@ static int cam_tfe_csid_poll_stop_status(
|
|
|
csid_status_addr =
|
|
|
csid_reg->ipp_reg->csid_pxl_status_addr;
|
|
|
|
|
|
+ csid_ctrl_reg = csid_reg->ipp_reg->csid_pxl_ctrl_addr;
|
|
|
+ csid_cfg0_reg = csid_reg->ipp_reg->csid_pxl_cfg0_addr;
|
|
|
+ csid_cfg1_reg = csid_reg->ipp_reg->csid_pxl_cfg1_addr;
|
|
|
+
|
|
|
if (csid_hw->ipp_res.res_state !=
|
|
|
CAM_ISP_RESOURCE_STATE_STREAMING)
|
|
|
continue;
|
|
@@ -2374,6 +2334,9 @@ static int cam_tfe_csid_poll_stop_status(
|
|
|
} else if (res_id == CAM_TFE_CSID_PATH_RES_PPP) {
|
|
|
csid_status_addr =
|
|
|
csid_reg->ppp_reg->csid_pxl_status_addr;
|
|
|
+ csid_ctrl_reg = csid_reg->ppp_reg->csid_pxl_ctrl_addr;
|
|
|
+ csid_cfg0_reg = csid_reg->ppp_reg->csid_pxl_cfg0_addr;
|
|
|
+ csid_cfg1_reg = csid_reg->ppp_reg->csid_pxl_cfg1_addr;
|
|
|
|
|
|
if (csid_hw->ppp_res.res_state !=
|
|
|
CAM_ISP_RESOURCE_STATE_STREAMING)
|
|
@@ -2382,6 +2345,9 @@ static int cam_tfe_csid_poll_stop_status(
|
|
|
} else {
|
|
|
csid_status_addr =
|
|
|
csid_reg->rdi_reg[res_id]->csid_rdi_status_addr;
|
|
|
+ csid_ctrl_reg = csid_reg->rdi_reg[res_id]->csid_rdi_ctrl_addr;
|
|
|
+ csid_cfg0_reg = csid_reg->rdi_reg[res_id]->csid_rdi_cfg0_addr;
|
|
|
+ csid_cfg1_reg = csid_reg->rdi_reg[res_id]->csid_rdi_cfg1_addr;
|
|
|
|
|
|
if (csid_hw->rdi_res[res_id].res_state !=
|
|
|
CAM_ISP_RESOURCE_STATE_STREAMING)
|
|
@@ -2402,6 +2368,13 @@ static int cam_tfe_csid_poll_stop_status(
|
|
|
if (rc < 0) {
|
|
|
CAM_ERR(CAM_ISP, "CSID:%d res:%d halt failed rc %d",
|
|
|
csid_hw->hw_intf->hw_idx, res_id, rc);
|
|
|
+
|
|
|
+ CAM_ERR(CAM_ISP, "CSID:%d status:0x%x ctrl_reg:0x%x cfg0:0x%x cfg1:0x%x",
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
+ cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_status_addr),
|
|
|
+ cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_ctrl_reg),
|
|
|
+ cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_cfg0_reg),
|
|
|
+ cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_cfg1_reg));
|
|
|
rc = -ETIMEDOUT;
|
|
|
break;
|
|
|
}
|
|
@@ -3089,12 +3062,16 @@ static int cam_tfe_csid_stop(void *hw_priv,
|
|
|
void *stop_args, uint32_t arg_size)
|
|
|
{
|
|
|
int rc = 0;
|
|
|
- struct cam_tfe_csid_hw *csid_hw;
|
|
|
- struct cam_hw_info *csid_hw_info;
|
|
|
- struct cam_isp_resource_node *res;
|
|
|
- struct cam_tfe_csid_hw_stop_args *csid_stop;
|
|
|
- uint32_t i;
|
|
|
+ struct cam_tfe_csid_hw *csid_hw;
|
|
|
+ struct cam_hw_info *csid_hw_info;
|
|
|
+ struct cam_isp_resource_node *res;
|
|
|
+ struct cam_tfe_csid_hw_stop_args *csid_stop;
|
|
|
+ struct cam_hw_soc_info *soc_info;
|
|
|
+ const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
|
+ const struct cam_tfe_csid_pxl_reg_offset *pxl_reg;
|
|
|
+ uint32_t i, val = 0;
|
|
|
uint32_t res_mask = 0;
|
|
|
+ void __iomem *mem_base;
|
|
|
|
|
|
if (!hw_priv || !stop_args ||
|
|
|
(arg_size != sizeof(struct cam_tfe_csid_hw_stop_args))) {
|
|
@@ -3110,9 +3087,74 @@ static int cam_tfe_csid_stop(void *hw_priv,
|
|
|
|
|
|
csid_hw_info = (struct cam_hw_info *)hw_priv;
|
|
|
csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info;
|
|
|
- CAM_DBG(CAM_ISP, "CSID:%d num_res %d",
|
|
|
- csid_hw->hw_intf->hw_idx,
|
|
|
- csid_stop->num_res);
|
|
|
+ csid_reg = csid_hw->csid_info->csid_reg;
|
|
|
+ soc_info = &csid_hw->hw_info->soc_info;
|
|
|
+ mem_base = soc_info->reg_map[0].mem_base;
|
|
|
+
|
|
|
+ /* Disalbe cgc for all the path */
|
|
|
+ for (i = 0; i < csid_stop->num_res; i++) {
|
|
|
+ res = csid_stop->node_res[i];
|
|
|
+ switch (res->res_type) {
|
|
|
+ case CAM_ISP_RESOURCE_PIX_PATH:
|
|
|
+ if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) {
|
|
|
+ pxl_reg = csid_reg->ipp_reg;
|
|
|
+ val = cam_io_r_mb(mem_base + pxl_reg->csid_pxl_cfg0_addr);
|
|
|
+ val = val | (1 << pxl_reg->cgc_mode_en_shift_val);
|
|
|
+ cam_io_w_mb(val, mem_base + pxl_reg->csid_pxl_cfg0_addr);
|
|
|
+ } else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
|
|
|
+ pxl_reg = csid_reg->ppp_reg;
|
|
|
+ val = cam_io_r_mb(mem_base + pxl_reg->csid_pxl_cfg0_addr);
|
|
|
+ val = val | (1 << pxl_reg->cgc_mode_en_shift_val);
|
|
|
+ cam_io_w_mb(val, mem_base + pxl_reg->csid_pxl_cfg0_addr);
|
|
|
+ }
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d cgc change res_type %d res_id %d val:0x%x",
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
+ res->res_type, res->res_id, val);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res type%d",
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_type);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* csid ctrl to resume at frame boundary */
|
|
|
+ cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
|
|
|
+ mem_base + csid_reg->cmn_reg->csid_ctrl_addr);
|
|
|
+
|
|
|
+ /* halt to global */
|
|
|
+ for (i = 0; i < csid_stop->num_res; i++) {
|
|
|
+ res = csid_stop->node_res[i];
|
|
|
+ switch (res->res_type) {
|
|
|
+ case CAM_ISP_RESOURCE_PIX_PATH:
|
|
|
+ if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) {
|
|
|
+ pxl_reg = csid_reg->ipp_reg;
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
+ pxl_reg->csid_pxl_ctrl_addr);
|
|
|
+ val &= ~(pxl_reg->halt_mode_mask << pxl_reg->halt_mode_shift);
|
|
|
+ val |= (TFE_CSID_HALT_MODE_GLOBAL << pxl_reg->halt_mode_shift);
|
|
|
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
+ pxl_reg->csid_pxl_ctrl_addr);
|
|
|
+ } else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
|
|
|
+ pxl_reg = csid_reg->ppp_reg;
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
+ pxl_reg->csid_pxl_ctrl_addr);
|
|
|
+ val &= ~(pxl_reg->halt_mode_mask << pxl_reg->halt_mode_shift);
|
|
|
+ val |= (TFE_CSID_HALT_MODE_GLOBAL << pxl_reg->halt_mode_shift);
|
|
|
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
+ pxl_reg->csid_pxl_ctrl_addr);
|
|
|
+ }
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d global change res_type %d res_id %d val:0x%x",
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
+ res->res_type, res->res_id, val);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
+ res->res_type);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
/* Stop the resource first */
|
|
|
for (i = 0; i < csid_stop->num_res; i++) {
|
|
@@ -3132,7 +3174,6 @@ static int cam_tfe_csid_stop(void *hw_priv,
|
|
|
else
|
|
|
rc = cam_tfe_csid_disable_rdi_path(csid_hw,
|
|
|
res, csid_stop->stop_cmd);
|
|
|
-
|
|
|
break;
|
|
|
default:
|
|
|
CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
|
|
@@ -3142,9 +3183,40 @@ static int cam_tfe_csid_stop(void *hw_priv,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /* issue global cmd */
|
|
|
+ cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY,
|
|
|
+ mem_base + csid_reg->cmn_reg->csid_ctrl_addr);
|
|
|
+
|
|
|
if (res_mask)
|
|
|
rc = cam_tfe_csid_poll_stop_status(csid_hw, res_mask);
|
|
|
|
|
|
+ for (i = 0; i < csid_stop->num_res; i++) {
|
|
|
+ res = csid_stop->node_res[i];
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d cgc change to dynamic res_type %d res_id %d",
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
+ res->res_type, res->res_id);
|
|
|
+ switch (res->res_type) {
|
|
|
+ case CAM_ISP_RESOURCE_PIX_PATH:
|
|
|
+ if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) {
|
|
|
+ pxl_reg = csid_reg->ipp_reg;
|
|
|
+ val = cam_io_r_mb(mem_base + pxl_reg->csid_pxl_cfg0_addr);
|
|
|
+ val &= ~(1 << pxl_reg->cgc_mode_en_shift_val);
|
|
|
+ cam_io_w_mb(val, mem_base + pxl_reg->csid_pxl_cfg0_addr);
|
|
|
+ } else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
|
|
|
+ pxl_reg = csid_reg->ipp_reg;
|
|
|
+ val = cam_io_r_mb(mem_base + pxl_reg->csid_pxl_cfg0_addr);
|
|
|
+ val &= ~(1 << pxl_reg->cgc_mode_en_shift_val);
|
|
|
+ cam_io_w_mb(val, mem_base + pxl_reg->csid_pxl_cfg0_addr);
|
|
|
+ } else
|
|
|
+ continue;
|
|
|
+ default:
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res type%d",
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
+ res->res_type);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
for (i = 0; i < csid_stop->num_res; i++) {
|
|
|
res = csid_stop->node_res[i];
|
|
|
res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
|