|
@@ -47,7 +47,7 @@
|
|
/* Max CSI Rx irq error count threshold value */
|
|
/* Max CSI Rx irq error count threshold value */
|
|
#define CAM_TFE_CSID_MAX_IRQ_ERROR_COUNT 5
|
|
#define CAM_TFE_CSID_MAX_IRQ_ERROR_COUNT 5
|
|
|
|
|
|
-static int cam_tfe_csid_is_ipp_format_supported(
|
|
|
|
|
|
+static int cam_tfe_csid_is_ipp_ppp_format_supported(
|
|
uint32_t in_format)
|
|
uint32_t in_format)
|
|
{
|
|
{
|
|
int rc = -EINVAL;
|
|
int rc = -EINVAL;
|
|
@@ -170,7 +170,7 @@ static int cam_tfe_csid_get_format_rdi(
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
-static int cam_tfe_csid_get_format_ipp(
|
|
|
|
|
|
+static int cam_tfe_csid_get_format_ipp_ppp(
|
|
uint32_t in_format,
|
|
uint32_t in_format,
|
|
uint32_t *decode_fmt, uint32_t *plain_fmt)
|
|
uint32_t *decode_fmt, uint32_t *plain_fmt)
|
|
{
|
|
{
|
|
@@ -341,6 +341,17 @@ static bool cam_tfe_csid_check_path_active(struct cam_tfe_csid_hw *csid_hw)
|
|
goto end;
|
|
goto end;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* check the PPP path status */
|
|
|
|
+ if (csid_reg->cmn_reg->num_ppp) {
|
|
|
|
+ path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_status_addr);
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d PPP path status:%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, path_status);
|
|
|
|
+ /* if status is 0 then it is active */
|
|
|
|
+ if (!path_status)
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
+
|
|
/* Check the RDI path status */
|
|
/* Check the RDI path status */
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
@@ -461,6 +472,10 @@ static int cam_tfe_csid_global_reset(struct cam_tfe_csid_hw *csid_hw)
|
|
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
|
|
csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
|
|
|
|
|
|
|
|
+ if (csid_reg->cmn_reg->num_ppp)
|
|
|
|
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
|
|
|
|
+
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
|
|
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
|
|
csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
|
|
@@ -478,6 +493,11 @@ static int cam_tfe_csid_global_reset(struct cam_tfe_csid_hw *csid_hw)
|
|
soc_info->reg_map[0].mem_base +
|
|
soc_info->reg_map[0].mem_base +
|
|
csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
|
|
csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
|
|
|
|
|
|
|
|
+ if (csid_reg->cmn_reg->num_ppp)
|
|
|
|
+ cam_io_w_mb(csid_reg->cmn_reg->ppp_irq_mask_all,
|
|
|
|
+ soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
|
|
|
|
+
|
|
for (i = 0 ; i < csid_reg->cmn_reg->num_rdis; i++)
|
|
for (i = 0 ; i < csid_reg->cmn_reg->num_rdis; i++)
|
|
cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
|
|
cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
|
|
soc_info->reg_map[0].mem_base +
|
|
soc_info->reg_map[0].mem_base +
|
|
@@ -541,6 +561,12 @@ static int cam_tfe_csid_global_reset(struct cam_tfe_csid_hw *csid_hw)
|
|
path_data->res_sof_cnt = 0;
|
|
path_data->res_sof_cnt = 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (csid_reg->cmn_reg->num_ppp) {
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *)
|
|
|
|
+ csid_hw->ppp_res.res_priv;
|
|
|
|
+ path_data->res_sof_cnt = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
path_data = (struct cam_tfe_csid_path_cfg *)
|
|
path_data = (struct cam_tfe_csid_path_cfg *)
|
|
csid_hw->rdi_res[i].res_priv;
|
|
csid_hw->rdi_res[i].res_priv;
|
|
@@ -599,6 +625,24 @@ static int cam_tfe_csid_path_reset(struct cam_tfe_csid_hw *csid_hw,
|
|
val |= TFE_CSID_PATH_INFO_RST_DONE;
|
|
val |= TFE_CSID_PATH_INFO_RST_DONE;
|
|
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
|
|
csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
|
|
|
|
+ } else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
|
|
|
|
+ if (!csid_reg->ppp_reg) {
|
|
|
|
+ CAM_ERR(CAM_ISP, "CSID:%d PPP not supported :%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
|
+ res->res_id);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ reset_strb_addr = csid_reg->ppp_reg->csid_pxl_rst_strobes_addr;
|
|
|
|
+ complete = &csid_hw->csid_ppp_complete;
|
|
|
|
+ reset_strb_val = csid_reg->cmn_reg->ppp_path_rst_stb_all;
|
|
|
|
+
|
|
|
|
+ /* Enable path reset done interrupt */
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
|
|
|
|
+ val |= TFE_CSID_PATH_INFO_RST_DONE;
|
|
|
|
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
|
|
} else {
|
|
} else {
|
|
id = res->res_id;
|
|
id = res->res_id;
|
|
if (!csid_reg->rdi_reg[id]) {
|
|
if (!csid_reg->rdi_reg[id]) {
|
|
@@ -820,7 +864,7 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
|
|
goto end;
|
|
goto end;
|
|
}
|
|
}
|
|
|
|
|
|
- if (cam_tfe_csid_is_ipp_format_supported(
|
|
|
|
|
|
+ if (cam_tfe_csid_is_ipp_ppp_format_supported(
|
|
reserve->in_port->format)) {
|
|
reserve->in_port->format)) {
|
|
CAM_ERR(CAM_ISP,
|
|
CAM_ERR(CAM_ISP,
|
|
"CSID:%d res id:%d un support format %d",
|
|
"CSID:%d res id:%d un support format %d",
|
|
@@ -841,6 +885,43 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
|
|
|
|
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ case CAM_TFE_CSID_PATH_RES_PPP:
|
|
|
|
+ if (csid_hw->ppp_res.res_state !=
|
|
|
|
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
|
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
|
+ "CSID:%d PPP resource not available %d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
|
+ csid_hw->ppp_res.res_state);
|
|
|
|
+ rc = -EINVAL;
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (cam_tfe_csid_is_ipp_ppp_format_supported(
|
|
|
|
+ reserve->in_port->format)) {
|
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
|
+ "CSID:%d res id:%d un support format %d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, reserve->res_id,
|
|
|
|
+ reserve->in_port->format);
|
|
|
|
+ rc = -EINVAL;
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ rc = cam_tfe_csid_cid_reserve(csid_hw, reserve, &cid_value);
|
|
|
|
+ if (rc) {
|
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
|
+ "CSID:%d res id:%d invalid cid %d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, reserve->res_id, cid_value);
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* assign the PPP resource */
|
|
|
|
+ res = &csid_hw->ppp_res;
|
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
|
+ "CSID:%d PPP resource:%d acquired successfully",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
+
|
|
|
|
+ break;
|
|
|
|
+
|
|
case CAM_TFE_CSID_PATH_RES_RDI_0:
|
|
case CAM_TFE_CSID_PATH_RES_RDI_0:
|
|
case CAM_TFE_CSID_PATH_RES_RDI_1:
|
|
case CAM_TFE_CSID_PATH_RES_RDI_1:
|
|
case CAM_TFE_CSID_PATH_RES_RDI_2:
|
|
case CAM_TFE_CSID_PATH_RES_RDI_2:
|
|
@@ -899,6 +980,7 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
|
|
|
|
|
|
path_data->bayer_bin = reserve->in_port->bayer_bin;
|
|
path_data->bayer_bin = reserve->in_port->bayer_bin;
|
|
path_data->qcfa_bin = reserve->in_port->qcfa_bin;
|
|
path_data->qcfa_bin = reserve->in_port->qcfa_bin;
|
|
|
|
+ path_data->crop_enable = reserve->crop_enable;
|
|
|
|
|
|
csid_hw->event_cb = reserve->event_cb;
|
|
csid_hw->event_cb = reserve->event_cb;
|
|
csid_hw->event_cb_priv = reserve->event_cb_prv;
|
|
csid_hw->event_cb_priv = reserve->event_cb_prv;
|
|
@@ -912,10 +994,6 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- /* Enable crop only for ipp */
|
|
|
|
- if (reserve->res_id == CAM_TFE_CSID_PATH_RES_IPP)
|
|
|
|
- path_data->crop_enable = true;
|
|
|
|
-
|
|
|
|
CAM_DBG(CAM_ISP,
|
|
CAM_DBG(CAM_ISP,
|
|
"Res id: %d height:%d line_start %d line_end %d crop_en %d",
|
|
"Res id: %d height:%d line_start %d line_end %d crop_en %d",
|
|
reserve->res_id, reserve->in_port->height,
|
|
reserve->res_id, reserve->in_port->height,
|
|
@@ -1177,6 +1255,11 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw *csid_hw)
|
|
soc_info->reg_map[0].mem_base +
|
|
soc_info->reg_map[0].mem_base +
|
|
csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
|
|
csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
|
|
|
|
|
|
|
|
+ if (csid_reg->cmn_reg->num_ppp)
|
|
|
|
+ cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
|
|
|
|
+ soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
|
|
|
|
+
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
|
|
cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
|
|
cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
|
|
soc_info->reg_map[0].mem_base +
|
|
soc_info->reg_map[0].mem_base +
|
|
@@ -1207,6 +1290,12 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw *csid_hw)
|
|
path_data->res_sof_cnt = 0;
|
|
path_data->res_sof_cnt = 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (csid_reg->cmn_reg->num_ppp) {
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *)
|
|
|
|
+ csid_hw->ppp_res.res_priv;
|
|
|
|
+ path_data->res_sof_cnt = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
path_data = (struct cam_tfe_csid_path_cfg *)
|
|
path_data = (struct cam_tfe_csid_path_cfg *)
|
|
csid_hw->rdi_res[i].res_priv;
|
|
csid_hw->rdi_res[i].res_priv;
|
|
@@ -1294,15 +1383,22 @@ static int cam_tfe_csid_init_config_pxl_path(
|
|
csid_reg = csid_hw->csid_info->csid_reg;
|
|
csid_reg = csid_hw->csid_info->csid_reg;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
|
|
|
|
- pxl_reg = csid_reg->ipp_reg;
|
|
|
|
|
|
+ if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
|
|
|
|
+ pxl_reg = csid_reg->ipp_reg;
|
|
|
|
+ else
|
|
|
|
+ pxl_reg = csid_reg->ppp_reg;
|
|
|
|
+
|
|
if (!pxl_reg) {
|
|
if (!pxl_reg) {
|
|
- CAM_ERR(CAM_ISP, "CSID:%d IPP :%d is not supported on HW",
|
|
|
|
- csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
|
|
+ CAM_ERR(CAM_ISP, "CSID:%d %s :%d is not supported on HW",
|
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
|
+ (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) ? "IPP" : "PPP",
|
|
|
|
+ res->res_id);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- CAM_DBG(CAM_ISP, "Config IPP Path");
|
|
|
|
- rc = cam_tfe_csid_get_format_ipp(path_data->in_format,
|
|
|
|
|
|
+ CAM_DBG(CAM_ISP, "Config %s Path",
|
|
|
|
+ (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) ? "IPP" : "PPP");
|
|
|
|
+ rc = cam_tfe_csid_get_format_ipp_ppp(path_data->in_format,
|
|
&decode_format, &plain_format);
|
|
&decode_format, &plain_format);
|
|
if (rc)
|
|
if (rc)
|
|
return rc;
|
|
return rc;
|
|
@@ -1481,18 +1577,25 @@ static int cam_tfe_csid_deinit_pxl_path(
|
|
csid_reg = csid_hw->csid_info->csid_reg;
|
|
csid_reg = csid_hw->csid_info->csid_reg;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
|
|
|
|
- pxl_reg = csid_reg->ipp_reg;
|
|
|
|
|
|
+ if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
|
|
|
|
+ pxl_reg = csid_reg->ipp_reg;
|
|
|
|
+ else
|
|
|
|
+ pxl_reg = csid_reg->ppp_reg;
|
|
|
|
+
|
|
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
|
|
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
|
|
CAM_ERR(CAM_ISP,
|
|
CAM_ERR(CAM_ISP,
|
|
- "CSID:%d IPP Res type %d res_id:%d in wrong state %d",
|
|
|
|
|
|
+ "CSID:%d %s Res type %d res_id:%d in wrong state %d",
|
|
csid_hw->hw_intf->hw_idx,
|
|
csid_hw->hw_intf->hw_idx,
|
|
|
|
+ (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) ? "IPP" : "PPP",
|
|
res->res_type, res->res_id, res->res_state);
|
|
res->res_type, res->res_id, res->res_state);
|
|
rc = -EINVAL;
|
|
rc = -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
if (!pxl_reg) {
|
|
if (!pxl_reg) {
|
|
- CAM_ERR(CAM_ISP, "CSID:%d IPP %d is not supported on HW",
|
|
|
|
- csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
|
|
+ CAM_ERR(CAM_ISP, "CSID:%d %s %d is not supported on HW",
|
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
|
+ (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) ? "IPP" : "PPP",
|
|
|
|
+ res->res_id);
|
|
rc = -EINVAL;
|
|
rc = -EINVAL;
|
|
goto end;
|
|
goto end;
|
|
}
|
|
}
|
|
@@ -1666,6 +1769,7 @@ static int cam_tfe_csid_disable_pxl_path(
|
|
}
|
|
}
|
|
|
|
|
|
pxl_reg = csid_reg->ipp_reg;
|
|
pxl_reg = csid_reg->ipp_reg;
|
|
|
|
+
|
|
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
|
|
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
|
|
CAM_DBG(CAM_ISP, "CSID:%d IPP path Res:%d Invalid state%d",
|
|
CAM_DBG(CAM_ISP, "CSID:%d IPP path Res:%d Invalid state%d",
|
|
csid_hw->hw_intf->hw_idx, res->res_id, res->res_state);
|
|
csid_hw->hw_intf->hw_idx, res->res_id, res->res_state);
|
|
@@ -1718,6 +1822,164 @@ static int cam_tfe_csid_disable_pxl_path(
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int cam_tfe_csid_enable_ppp_path(
|
|
|
|
+ struct cam_tfe_csid_hw *csid_hw,
|
|
|
|
+ struct cam_isp_resource_node *res)
|
|
|
|
+{
|
|
|
|
+ const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
|
|
+ struct cam_hw_soc_info *soc_info;
|
|
|
|
+ struct cam_tfe_csid_path_cfg *path_data;
|
|
|
|
+ const struct cam_tfe_csid_pxl_reg_offset *ppp_reg = NULL;
|
|
|
|
+ uint32_t val = 0;
|
|
|
|
+
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv;
|
|
|
|
+ csid_reg = csid_hw->csid_info->csid_reg;
|
|
|
|
+ soc_info = &csid_hw->hw_info->soc_info;
|
|
|
|
+ ppp_reg = csid_reg->ppp_reg;
|
|
|
|
+
|
|
|
|
+ if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
|
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
|
+ "CSID:%d PPP path res type:%d res_id:%d Invalid state%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
|
+ res->res_type, res->res_id, res->res_state);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!ppp_reg) {
|
|
|
|
+ CAM_ERR(CAM_ISP, "CSID:%d PPP resid: %d not supported on HW",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d Enable PPP path", csid_hw->hw_intf->hw_idx);
|
|
|
|
+
|
|
|
|
+ /* Set master or slave path */
|
|
|
|
+ if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
|
|
|
|
+ /* Set halt mode as master */
|
|
|
|
+ val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
|
|
|
|
+ (ppp_reg->halt_master_sel_master_val <<
|
|
|
|
+ ppp_reg->halt_master_sel_shift);
|
|
|
|
+ else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
|
|
|
|
+ /* Set halt mode as slave and set master idx */
|
|
|
|
+ val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
|
|
|
|
+ (ppp_reg->halt_master_sel_slave_val <<
|
|
|
|
+ ppp_reg->halt_master_sel_shift);
|
|
|
|
+ else
|
|
|
|
+ /* Default is internal halt mode */
|
|
|
|
+ val = 0;
|
|
|
|
+
|
|
|
|
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr);
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d PPP Ctrl val: 0x%x", csid_hw->hw_intf->hw_idx, val);
|
|
|
|
+
|
|
|
|
+ /* Enable the required ppp path interrupts */
|
|
|
|
+ val = TFE_CSID_PATH_INFO_RST_DONE | TFE_CSID_PATH_ERROR_FIFO_OVERFLOW |
|
|
|
|
+ TFE_CSID_PATH_PPP_ERROR_CCIF_VIOLATION | TFE_CSID_PATH_PPP_OVERFLOW_IRQ;
|
|
|
|
+
|
|
|
|
+ if (csid_reg->cmn_reg->format_measure_support)
|
|
|
|
+ val |= TFE_CSID_PATH_ERROR_PIX_COUNT | TFE_CSID_PATH_ERROR_LINE_COUNT;
|
|
|
|
+
|
|
|
|
+ if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)
|
|
|
|
+ val |= TFE_CSID_PATH_INFO_INPUT_SOF;
|
|
|
|
+ if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)
|
|
|
|
+ val |= TFE_CSID_PATH_INFO_INPUT_EOF;
|
|
|
|
+
|
|
|
|
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_irq_mask_addr);
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d Enable PPP IRQ mask 0x%x", csid_hw->hw_intf->hw_idx, val);
|
|
|
|
+
|
|
|
|
+ res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cam_tfe_csid_disable_ppp_path(
|
|
|
|
+ struct cam_tfe_csid_hw *csid_hw,
|
|
|
|
+ struct cam_isp_resource_node *res,
|
|
|
|
+ enum cam_tfe_csid_halt_cmd stop_cmd)
|
|
|
|
+{
|
|
|
|
+ int rc = 0;
|
|
|
|
+ uint32_t val = 0;
|
|
|
|
+ const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
|
|
+ struct cam_hw_soc_info *soc_info;
|
|
|
|
+ struct cam_tfe_csid_path_cfg *path_data;
|
|
|
|
+ const struct cam_tfe_csid_pxl_reg_offset *ppp_reg;
|
|
|
|
+
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv;
|
|
|
|
+ csid_reg = csid_hw->csid_info->csid_reg;
|
|
|
|
+ soc_info = &csid_hw->hw_info->soc_info;
|
|
|
|
+
|
|
|
|
+ if (res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) {
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
|
|
|
|
+ res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_id, res->res_state);
|
|
|
|
+ return rc;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ppp_reg = csid_reg->ppp_reg;
|
|
|
|
+ if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d IPP path Res:%d Invalid state%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_id, res->res_state);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!ppp_reg) {
|
|
|
|
+ CAM_ERR(CAM_ISP, "CSID:%d PPP %d is not supported on HW",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (stop_cmd != CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY &&
|
|
|
|
+ stop_cmd != CAM_TFE_CSID_HALT_IMMEDIATELY) {
|
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
|
+ "CSID:%d PPP path un supported stop command:%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, stop_cmd);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d res_id:%d PPP path",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
+
|
|
|
|
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
|
|
+ ppp_reg->csid_pxl_irq_mask_addr);
|
|
|
|
+
|
|
|
|
+ if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
|
|
|
|
+ path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
|
|
|
|
+ /* configure Halt */
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ ppp_reg->csid_pxl_ctrl_addr);
|
|
|
|
+ val &= ~0x3F;
|
|
|
|
+ val |= (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift);
|
|
|
|
+ val |= (ppp_reg->halt_master_sel_master_val <<
|
|
|
|
+ ppp_reg->halt_master_sel_shift);
|
|
|
|
+ val |= stop_cmd;
|
|
|
|
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
|
+ ppp_reg->csid_pxl_ctrl_addr);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE &&
|
|
|
|
+ stop_cmd == CAM_TFE_CSID_HALT_IMMEDIATELY) {
|
|
|
|
+ /* configure Halt for slave */
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ ppp_reg->csid_pxl_ctrl_addr);
|
|
|
|
+ val &= ~0x3F;
|
|
|
|
+ val |= (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift);
|
|
|
|
+ val |= (ppp_reg->halt_master_sel_slave_val <<
|
|
|
|
+ ppp_reg->halt_master_sel_shift);
|
|
|
|
+ val |= stop_cmd;
|
|
|
|
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
|
+ ppp_reg->csid_pxl_ctrl_addr);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return rc;
|
|
|
|
+}
|
|
|
|
+
|
|
static int cam_tfe_csid_init_config_rdi_path(
|
|
static int cam_tfe_csid_init_config_rdi_path(
|
|
struct cam_tfe_csid_hw *csid_hw,
|
|
struct cam_tfe_csid_hw *csid_hw,
|
|
struct cam_isp_resource_node *res)
|
|
struct cam_isp_resource_node *res)
|
|
@@ -2032,6 +2294,14 @@ static int cam_tfe_csid_poll_stop_status(
|
|
CAM_ISP_RESOURCE_STATE_STREAMING)
|
|
CAM_ISP_RESOURCE_STATE_STREAMING)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
|
|
+ } else if (res_id == CAM_TFE_CSID_PATH_RES_PPP) {
|
|
|
|
+ csid_status_addr =
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_status_addr;
|
|
|
|
+
|
|
|
|
+ if (csid_hw->ppp_res.res_state !=
|
|
|
|
+ CAM_ISP_RESOURCE_STATE_STREAMING)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
} else {
|
|
} else {
|
|
csid_status_addr =
|
|
csid_status_addr =
|
|
csid_reg->rdi_reg[res_id]->csid_rdi_status_addr;
|
|
csid_reg->rdi_reg[res_id]->csid_rdi_status_addr;
|
|
@@ -2129,6 +2399,19 @@ static int cam_tfe_csid_get_time_stamp(
|
|
csid_reg->ipp_reg->csid_pxl_timestamp_perv0_sof_addr,
|
|
csid_reg->ipp_reg->csid_pxl_timestamp_perv0_sof_addr,
|
|
&time_stamp->prev_time_stamp_val);
|
|
&time_stamp->prev_time_stamp_val);
|
|
}
|
|
}
|
|
|
|
+ } else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
|
|
|
|
+ torn = __cam_tfe_csid_read_timestamp(
|
|
|
|
+ soc_info->reg_map[0].mem_base,
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_timestamp_curr1_sof_addr,
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_timestamp_curr0_sof_addr,
|
|
|
|
+ &time_stamp->time_stamp_val);
|
|
|
|
+ if (time_stamp->get_prev_timestamp) {
|
|
|
|
+ prev_torn = __cam_tfe_csid_read_timestamp(
|
|
|
|
+ soc_info->reg_map[0].mem_base,
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_timestamp_perv1_sof_addr,
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_timestamp_perv0_sof_addr,
|
|
|
|
+ &time_stamp->prev_time_stamp_val);
|
|
|
|
+ }
|
|
} else {
|
|
} else {
|
|
id = res->res_id;
|
|
id = res->res_id;
|
|
rdi_reg = csid_reg->rdi_reg[id];
|
|
rdi_reg = csid_reg->rdi_reg[id];
|
|
@@ -2229,6 +2512,11 @@ static int cam_tfe_csid_print_hbi_vbi(
|
|
csid_reg->ipp_reg->csid_pxl_format_measure1_addr);
|
|
csid_reg->ipp_reg->csid_pxl_format_measure1_addr);
|
|
vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
csid_reg->ipp_reg->csid_pxl_format_measure2_addr);
|
|
csid_reg->ipp_reg->csid_pxl_format_measure2_addr);
|
|
|
|
+ } else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
|
|
|
|
+ hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_format_measure1_addr);
|
|
|
|
+ vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_format_measure2_addr);
|
|
} else if ((res->res_id >= CAM_TFE_CSID_PATH_RES_RDI_0) &&
|
|
} else if ((res->res_id >= CAM_TFE_CSID_PATH_RES_RDI_0) &&
|
|
(res->res_id <= CAM_TFE_CSID_PATH_RES_RDI_2)) {
|
|
(res->res_id <= CAM_TFE_CSID_PATH_RES_RDI_2)) {
|
|
rdi_reg = csid_reg->rdi_reg[res->res_id];
|
|
rdi_reg = csid_reg->rdi_reg[res->res_id];
|
|
@@ -2286,6 +2574,7 @@ static int cam_tfe_csid_get_hw_caps(void *hw_priv,
|
|
|
|
|
|
hw_caps->num_rdis = csid_reg->cmn_reg->num_rdis;
|
|
hw_caps->num_rdis = csid_reg->cmn_reg->num_rdis;
|
|
hw_caps->num_pix = csid_hw->pxl_pipe_enable;
|
|
hw_caps->num_pix = csid_hw->pxl_pipe_enable;
|
|
|
|
+ hw_caps->num_ppp = csid_reg->cmn_reg->num_ppp;
|
|
hw_caps->major_version = csid_reg->cmn_reg->major_version;
|
|
hw_caps->major_version = csid_reg->cmn_reg->major_version;
|
|
hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
|
|
hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
|
|
hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
|
|
hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
|
|
@@ -2551,7 +2840,8 @@ static int cam_tfe_csid_init_hw(void *hw_priv,
|
|
if (rc)
|
|
if (rc)
|
|
goto end;
|
|
goto end;
|
|
|
|
|
|
- if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
|
|
|
|
|
|
+ if ((res->res_id == CAM_TFE_CSID_PATH_RES_IPP) ||
|
|
|
|
+ (res->res_id == CAM_TFE_CSID_PATH_RES_PPP))
|
|
rc = cam_tfe_csid_init_config_pxl_path(csid_hw, res);
|
|
rc = cam_tfe_csid_init_config_pxl_path(csid_hw, res);
|
|
else
|
|
else
|
|
rc = cam_tfe_csid_init_config_rdi_path(csid_hw, res);
|
|
rc = cam_tfe_csid_init_config_rdi_path(csid_hw, res);
|
|
@@ -2607,7 +2897,8 @@ static int cam_tfe_csid_deinit_hw(void *hw_priv,
|
|
|
|
|
|
CAM_DBG(CAM_ISP, "De-Init IPP Path: %d", res->res_id);
|
|
CAM_DBG(CAM_ISP, "De-Init IPP Path: %d", res->res_id);
|
|
|
|
|
|
- if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
|
|
|
|
|
|
+ if ((res->res_id == CAM_TFE_CSID_PATH_RES_IPP) ||
|
|
|
|
+ (res->res_id == CAM_TFE_CSID_PATH_RES_PPP))
|
|
rc = cam_tfe_csid_deinit_pxl_path(csid_hw, res);
|
|
rc = cam_tfe_csid_deinit_pxl_path(csid_hw, res);
|
|
else
|
|
else
|
|
rc = cam_tfe_csid_deinit_rdi_path(csid_hw, res);
|
|
rc = cam_tfe_csid_deinit_rdi_path(csid_hw, res);
|
|
@@ -2662,6 +2953,8 @@ static int cam_tfe_csid_start(void *hw_priv, void *start_args,
|
|
case CAM_ISP_RESOURCE_PIX_PATH:
|
|
case CAM_ISP_RESOURCE_PIX_PATH:
|
|
if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
|
|
if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
|
|
rc = cam_tfe_csid_enable_pxl_path(csid_hw, res);
|
|
rc = cam_tfe_csid_enable_pxl_path(csid_hw, res);
|
|
|
|
+ else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP)
|
|
|
|
+ rc = cam_tfe_csid_enable_ppp_path(csid_hw, res);
|
|
else
|
|
else
|
|
rc = cam_tfe_csid_enable_rdi_path(csid_hw, res);
|
|
rc = cam_tfe_csid_enable_rdi_path(csid_hw, res);
|
|
break;
|
|
break;
|
|
@@ -2756,6 +3049,9 @@ static int cam_tfe_csid_stop(void *hw_priv,
|
|
if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
|
|
if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
|
|
rc = cam_tfe_csid_disable_pxl_path(csid_hw,
|
|
rc = cam_tfe_csid_disable_pxl_path(csid_hw,
|
|
res, csid_stop->stop_cmd);
|
|
res, csid_stop->stop_cmd);
|
|
|
|
+ else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP)
|
|
|
|
+ rc = cam_tfe_csid_disable_ppp_path(csid_hw,
|
|
|
|
+ res, csid_stop->stop_cmd);
|
|
else
|
|
else
|
|
rc = cam_tfe_csid_disable_rdi_path(csid_hw,
|
|
rc = cam_tfe_csid_disable_rdi_path(csid_hw,
|
|
res, csid_stop->stop_cmd);
|
|
res, csid_stop->stop_cmd);
|
|
@@ -2835,6 +3131,22 @@ static int cam_tfe_csid_sof_irq_debug(
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (csid_reg->ppp_reg) {
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
|
|
|
|
+
|
|
|
|
+ if (val) {
|
|
|
|
+ if (sof_irq_enable)
|
|
|
|
+ val |= TFE_CSID_PATH_INFO_INPUT_SOF;
|
|
|
|
+ else
|
|
|
|
+ val &= ~TFE_CSID_PATH_INFO_INPUT_SOF;
|
|
|
|
+
|
|
|
|
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
|
|
|
|
+ val = 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
|
|
csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
|
|
@@ -2984,6 +3296,29 @@ static int cam_tfe_csid_get_regdump(struct cam_tfe_csid_hw *csid_hw,
|
|
csid_reg->ipp_reg->csid_pxl_vcrop_addr);
|
|
csid_reg->ipp_reg->csid_pxl_vcrop_addr);
|
|
CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
|
|
CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
|
|
csid_reg->ipp_reg->csid_pxl_vcrop_addr, val);
|
|
csid_reg->ipp_reg->csid_pxl_vcrop_addr, val);
|
|
|
|
+ } else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP) {
|
|
|
|
+ CAM_INFO(CAM_ISP, "Dumping CSID:%d PPP registers ",
|
|
|
|
+ csid_hw->hw_intf->hw_idx);
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_cfg0_addr);
|
|
|
|
+ CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_cfg0_addr, val);
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_cfg1_addr);
|
|
|
|
+ CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_cfg1_addr, val);
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_ctrl_addr);
|
|
|
|
+ CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_ctrl_addr, val);
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_hcrop_addr);
|
|
|
|
+ CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_hcrop_addr, val);
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_vcrop_addr);
|
|
|
|
+ CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_vcrop_addr, val);
|
|
} else {
|
|
} else {
|
|
id = res->res_id;
|
|
id = res->res_id;
|
|
CAM_INFO(CAM_ISP, "Dumping CSID:%d RDI:%d registers ",
|
|
CAM_INFO(CAM_ISP, "Dumping CSID:%d RDI:%d registers ",
|
|
@@ -3321,6 +3656,13 @@ static int cam_tfe_csid_evt_bottom_half_handler(
|
|
csid_hw->hw_intf->hw_idx);
|
|
csid_hw->hw_intf->hw_idx);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (evt_payload->irq_status[TFE_CSID_IRQ_REG_PPP] &
|
|
|
|
+ TFE_CSID_PATH_INFO_INPUT_SOF) {
|
|
|
|
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
|
|
|
|
+ "CSID:%d PPP SOF received",
|
|
|
|
+ csid_hw->hw_intf->hw_idx);
|
|
|
|
+ }
|
|
|
|
+
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
if (evt_payload->irq_status[i] &
|
|
if (evt_payload->irq_status[i] &
|
|
TFE_CSID_PATH_INFO_INPUT_SOF)
|
|
TFE_CSID_PATH_INFO_INPUT_SOF)
|
|
@@ -3330,13 +3672,14 @@ static int cam_tfe_csid_evt_bottom_half_handler(
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
CAM_ERR_RATE_LIMIT(CAM_ISP,
|
|
CAM_ERR_RATE_LIMIT(CAM_ISP,
|
|
- "CSID %d err %d phy %d irq status TOP: 0x%x RX: 0x%x IPP: 0x%x RDI0: 0x%x RDI1: 0x%x RDI2: 0x%x",
|
|
|
|
|
|
+ "CSID %d err %d phy %d irq status TOP: 0x%x RX: 0x%x IPP: 0x%x PPP: 0x%x RDI0: 0x%x RDI1: 0x%x RDI2: 0x%x",
|
|
csid_hw->hw_intf->hw_idx,
|
|
csid_hw->hw_intf->hw_idx,
|
|
evt_payload->evt_type,
|
|
evt_payload->evt_type,
|
|
csid_hw->csi2_rx_cfg.phy_sel,
|
|
csid_hw->csi2_rx_cfg.phy_sel,
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_TOP],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_TOP],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_RX],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_RX],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_IPP],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_IPP],
|
|
|
|
+ evt_payload->irq_status[TFE_CSID_IRQ_REG_PPP],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_RDI0],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_RDI0],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_RDI1],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_RDI1],
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_RDI2]);
|
|
evt_payload->irq_status[TFE_CSID_IRQ_REG_RDI2]);
|
|
@@ -3421,10 +3764,11 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
|
|
struct cam_hw_soc_info *soc_info;
|
|
struct cam_hw_soc_info *soc_info;
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
const struct cam_tfe_csid_pxl_reg_offset *ipp_reg;
|
|
const struct cam_tfe_csid_pxl_reg_offset *ipp_reg;
|
|
|
|
+ const struct cam_tfe_csid_pxl_reg_offset *ppp_reg;
|
|
const struct cam_tfe_csid_rdi_reg_offset *rdi_reg;
|
|
const struct cam_tfe_csid_rdi_reg_offset *rdi_reg;
|
|
const struct cam_tfe_csid_common_reg_offset *cmn_reg;
|
|
const struct cam_tfe_csid_common_reg_offset *cmn_reg;
|
|
const struct cam_tfe_csid_csi2_rx_reg_offset *csi2_reg;
|
|
const struct cam_tfe_csid_csi2_rx_reg_offset *csi2_reg;
|
|
- uint32_t irq_status[TFE_CSID_IRQ_REG_MAX];
|
|
|
|
|
|
+ uint32_t irq_status[TFE_CSID_IRQ_REG_MAX] = {0};
|
|
bool fatal_err_detected = false, is_error_irq = false;
|
|
bool fatal_err_detected = false, is_error_irq = false;
|
|
uint32_t sof_irq_debug_en = 0, log_en = 0;
|
|
uint32_t sof_irq_debug_en = 0, log_en = 0;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
@@ -3458,6 +3802,11 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
|
|
cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
csid_reg->ipp_reg->csid_pxl_irq_status_addr);
|
|
csid_reg->ipp_reg->csid_pxl_irq_status_addr);
|
|
|
|
|
|
|
|
+ if (csid_reg->cmn_reg->num_ppp)
|
|
|
|
+ irq_status[TFE_CSID_IRQ_REG_PPP] =
|
|
|
|
+ cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_irq_status_addr);
|
|
|
|
+
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
|
|
irq_status[i] =
|
|
irq_status[i] =
|
|
cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
@@ -3477,6 +3826,11 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
|
|
soc_info->reg_map[0].mem_base +
|
|
soc_info->reg_map[0].mem_base +
|
|
csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
|
|
csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
|
|
|
|
|
|
|
|
+ if (csid_reg->cmn_reg->num_ppp)
|
|
|
|
+ cam_io_w_mb(irq_status[TFE_CSID_IRQ_REG_PPP],
|
|
|
|
+ soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
|
|
|
|
+
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
cam_io_w_mb(irq_status[i],
|
|
cam_io_w_mb(irq_status[i],
|
|
soc_info->reg_map[0].mem_base +
|
|
soc_info->reg_map[0].mem_base +
|
|
@@ -3700,6 +4054,11 @@ handle_fatal_error:
|
|
CAM_INFO_RATE_LIMIT(CAM_ISP,
|
|
CAM_INFO_RATE_LIMIT(CAM_ISP,
|
|
"CSID IPP reset complete");
|
|
"CSID IPP reset complete");
|
|
|
|
|
|
|
|
+ if (irq_status[TFE_CSID_IRQ_REG_PPP] &
|
|
|
|
+ BIT(csid_reg->cmn_reg->path_rst_done_shift_val))
|
|
|
|
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
|
|
|
|
+ "CSID PPP reset complete");
|
|
|
|
+
|
|
if (irq_status[TFE_CSID_IRQ_REG_TOP])
|
|
if (irq_status[TFE_CSID_IRQ_REG_TOP])
|
|
CAM_INFO_RATE_LIMIT(CAM_ISP,
|
|
CAM_INFO_RATE_LIMIT(CAM_ISP,
|
|
"CSID TOP reset complete");
|
|
"CSID TOP reset complete");
|
|
@@ -3781,6 +4140,77 @@ handle_fatal_error:
|
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* read the PPP errors */
|
|
|
|
+ if (csid_reg->cmn_reg->num_ppp) {
|
|
|
|
+ /* PPP reset done bit */
|
|
|
|
+ if (irq_status[TFE_CSID_IRQ_REG_PPP] &
|
|
|
|
+ BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID PPP reset complete");
|
|
|
|
+ complete(&csid_hw->csid_ppp_complete);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ((irq_status[TFE_CSID_IRQ_REG_PPP] &
|
|
|
|
+ TFE_CSID_PATH_INFO_INPUT_SOF) &&
|
|
|
|
+ (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
|
|
|
|
+ if (!csid_hw->sof_irq_triggered)
|
|
|
|
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
|
|
|
|
+ "CSID:%d PPP SOF received",
|
|
|
|
+ csid_hw->hw_intf->hw_idx);
|
|
|
|
+ else
|
|
|
|
+ log_en = 1;
|
|
|
|
+
|
|
|
|
+ if (csid_hw->sof_irq_triggered)
|
|
|
|
+ csid_hw->irq_debug_cnt++;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ((irq_status[TFE_CSID_IRQ_REG_PPP] &
|
|
|
|
+ TFE_CSID_PATH_INFO_INPUT_EOF) &&
|
|
|
|
+ (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)) {
|
|
|
|
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP EOF received",
|
|
|
|
+ csid_hw->hw_intf->hw_idx);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (irq_status[TFE_CSID_IRQ_REG_PPP] &
|
|
|
|
+ TFE_CSID_PATH_ERROR_FIFO_OVERFLOW) {
|
|
|
|
+ /* Stop PPP path immediately */
|
|
|
|
+ cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY,
|
|
|
|
+ soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ppp_reg->csid_pxl_ctrl_addr);
|
|
|
|
+ is_error_irq = true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (irq_status[TFE_CSID_IRQ_REG_PPP] &
|
|
|
|
+ TFE_CSID_PATH_PPP_ERROR_CCIF_VIOLATION)
|
|
|
|
+ is_error_irq = true;
|
|
|
|
+
|
|
|
|
+ if ((irq_status[TFE_CSID_IRQ_REG_PPP] &
|
|
|
|
+ TFE_CSID_PATH_ERROR_PIX_COUNT) ||
|
|
|
|
+ (irq_status[TFE_CSID_IRQ_REG_PPP] &
|
|
|
|
+ TFE_CSID_PATH_ERROR_LINE_COUNT)) {
|
|
|
|
+ ppp_reg = csid_reg->ppp_reg;
|
|
|
|
+ cmn_reg = csid_reg->cmn_reg;
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ ppp_reg->csid_pxl_format_measure0_addr);
|
|
|
|
+ val1 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ ppp_reg->csid_pxl_format_measure_cfg1_addr);
|
|
|
|
+
|
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
|
+ "Pix/Line count error for CSID: %d PPP path, Expected:: height: %d, width: %d and Actual:: height: %d width %d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
|
+ ((val1 >>
|
|
|
|
+ cmn_reg->format_measure_height_shift_val) &
|
|
|
|
+ cmn_reg->format_measure_height_mask_val),
|
|
|
|
+ val1 &
|
|
|
|
+ cmn_reg->format_measure_width_mask_val,
|
|
|
|
+ ((val >>
|
|
|
|
+ cmn_reg->format_measure_height_shift_val) &
|
|
|
|
+ cmn_reg->format_measure_height_mask_val),
|
|
|
|
+ val &
|
|
|
|
+ cmn_reg->format_measure_width_mask_val);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
|
|
|
|
if ((irq_status[i] &
|
|
if ((irq_status[i] &
|
|
@@ -3860,13 +4290,14 @@ handle_fatal_error:
|
|
}
|
|
}
|
|
|
|
|
|
if (is_error_irq || log_en) {
|
|
if (is_error_irq || log_en) {
|
|
- CAM_ERR_RATE_LIMIT(CAM_ISP,
|
|
|
|
- "CSID %d irq status TOP: 0x%x RX: 0x%x IPP: 0x%x",
|
|
|
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
|
+ "CSID %d irq status TOP: 0x%x RX: 0x%x IPP: 0x%x PPP: 0x%x",
|
|
csid_hw->hw_intf->hw_idx,
|
|
csid_hw->hw_intf->hw_idx,
|
|
irq_status[TFE_CSID_IRQ_REG_TOP],
|
|
irq_status[TFE_CSID_IRQ_REG_TOP],
|
|
irq_status[TFE_CSID_IRQ_REG_RX],
|
|
irq_status[TFE_CSID_IRQ_REG_RX],
|
|
- irq_status[TFE_CSID_IRQ_REG_IPP]);
|
|
|
|
- CAM_ERR_RATE_LIMIT(CAM_ISP,
|
|
|
|
|
|
+ irq_status[TFE_CSID_IRQ_REG_IPP],
|
|
|
|
+ irq_status[TFE_CSID_IRQ_REG_PPP]);
|
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
"RDI0: 0x%x RDI1: 0x%x RDI2: 0x%x CSID clk:%d",
|
|
"RDI0: 0x%x RDI1: 0x%x RDI2: 0x%x CSID clk:%d",
|
|
irq_status[TFE_CSID_IRQ_REG_RDI0],
|
|
irq_status[TFE_CSID_IRQ_REG_RDI0],
|
|
irq_status[TFE_CSID_IRQ_REG_RDI1],
|
|
irq_status[TFE_CSID_IRQ_REG_RDI1],
|
|
@@ -3922,6 +4353,7 @@ int cam_tfe_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
|
|
init_completion(&tfe_csid_hw->csid_top_complete);
|
|
init_completion(&tfe_csid_hw->csid_top_complete);
|
|
init_completion(&tfe_csid_hw->csid_csi2_complete);
|
|
init_completion(&tfe_csid_hw->csid_csi2_complete);
|
|
init_completion(&tfe_csid_hw->csid_ipp_complete);
|
|
init_completion(&tfe_csid_hw->csid_ipp_complete);
|
|
|
|
+ init_completion(&tfe_csid_hw->csid_ppp_complete);
|
|
for (i = 0; i < CAM_TFE_CSID_RDI_MAX; i++)
|
|
for (i = 0; i < CAM_TFE_CSID_RDI_MAX; i++)
|
|
init_completion(&tfe_csid_hw->csid_rdin_complete[i]);
|
|
init_completion(&tfe_csid_hw->csid_rdin_complete[i]);
|
|
|
|
|
|
@@ -4010,6 +4442,24 @@ int cam_tfe_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
|
|
tfe_csid_hw->pxl_pipe_enable = 1;
|
|
tfe_csid_hw->pxl_pipe_enable = 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* Initialize the PPP resources */
|
|
|
|
+ if (tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_ppp) {
|
|
|
|
+ CAM_DBG(CAM_ISP, "initializing the ppp path");
|
|
|
|
+
|
|
|
|
+ tfe_csid_hw->ppp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
|
|
|
|
+ tfe_csid_hw->ppp_res.res_id = CAM_TFE_CSID_PATH_RES_PPP;
|
|
|
|
+ tfe_csid_hw->ppp_res.res_state =
|
|
|
|
+ CAM_ISP_RESOURCE_STATE_AVAILABLE;
|
|
|
|
+ tfe_csid_hw->ppp_res.hw_intf = tfe_csid_hw->hw_intf;
|
|
|
|
+ path_data = kzalloc(sizeof(*path_data),
|
|
|
|
+ GFP_KERNEL);
|
|
|
|
+ if (!path_data) {
|
|
|
|
+ rc = -ENOMEM;
|
|
|
|
+ goto err;
|
|
|
|
+ }
|
|
|
|
+ tfe_csid_hw->ppp_res.res_priv = path_data;
|
|
|
|
+ }
|
|
|
|
+
|
|
/* Initialize the RDI resource */
|
|
/* Initialize the RDI resource */
|
|
for (i = 0; i < tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
|
|
for (i = 0; i < tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
|
|
i++) {
|
|
i++) {
|
|
@@ -4077,6 +4527,8 @@ int cam_tfe_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
|
|
err:
|
|
err:
|
|
if (rc) {
|
|
if (rc) {
|
|
kfree(tfe_csid_hw->ipp_res.res_priv);
|
|
kfree(tfe_csid_hw->ipp_res.res_priv);
|
|
|
|
+ if (tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_ppp)
|
|
|
|
+ kfree(tfe_csid_hw->ppp_res.res_priv);
|
|
for (i = 0; i <
|
|
for (i = 0; i <
|
|
tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
|
|
tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
|
|
i++)
|
|
i++)
|
|
@@ -4100,6 +4552,9 @@ int cam_tfe_csid_hw_deinit(struct cam_tfe_csid_hw *tfe_csid_hw)
|
|
/* release the privdate data memory from resources */
|
|
/* release the privdate data memory from resources */
|
|
kfree(tfe_csid_hw->ipp_res.res_priv);
|
|
kfree(tfe_csid_hw->ipp_res.res_priv);
|
|
|
|
|
|
|
|
+ if (tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_ppp)
|
|
|
|
+ kfree(tfe_csid_hw->ppp_res.res_priv);
|
|
|
|
+
|
|
for (i = 0; i <
|
|
for (i = 0; i <
|
|
tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
|
|
tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
|
|
i++) {
|
|
i++) {
|