|
@@ -1159,10 +1159,67 @@ end:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void cam_ife_csid_ver2_print_illegal_programming_irq_status(
|
|
|
- struct cam_ife_csid_ver2_hw *csid_hw,
|
|
|
- struct cam_isp_resource_node *res)
|
|
|
+void cam_ife_csid_hw_ver2_rdi_line_buffer_conflict_handler(
|
|
|
+ void *csid)
|
|
|
+{
|
|
|
+ struct cam_ife_csid_ver2_hw *csid_hw = csid;
|
|
|
+ struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg;
|
|
|
+ struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info;
|
|
|
+ void __iomem *base =
|
|
|
+ soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base;
|
|
|
+ const struct cam_ife_csid_ver2_path_reg_info *path_reg;
|
|
|
+ uint32_t i = 0, rdi_cfg = 0;
|
|
|
+ uint8_t *log_buf = NULL;
|
|
|
+ size_t len = 0;
|
|
|
+
|
|
|
+ for (i = CAM_IFE_PIX_PATH_RES_RDI_0; i < CAM_IFE_PIX_PATH_RES_RDI_4;
|
|
|
+ i++) {
|
|
|
+ path_reg = csid_reg->path_reg[i - CAM_IFE_PIX_PATH_RES_RDI_0];
|
|
|
+
|
|
|
+ if (!(path_reg->capabilities &
|
|
|
+ CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ rdi_cfg = cam_io_r_mb(base + path_reg->cfg1_addr);
|
|
|
+
|
|
|
+ if (rdi_cfg & path_reg->pix_store_en_shift_val)
|
|
|
+ CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len,
|
|
|
+ "LINE BUFFER ENABLED for RDI%d", (i - CAM_IFE_PIX_PATH_RES_RDI_0));
|
|
|
+ }
|
|
|
+
|
|
|
+ if (len)
|
|
|
+ CAM_ERR(CAM_ISP, "CSID[%d] %s", csid_hw->hw_intf->hw_idx, log_buf);
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void cam_ife_csid_hw_ver2_mup_mismatch_handler(
|
|
|
+ void *csid, void *resource)
|
|
|
{
|
|
|
+ struct cam_ife_csid_ver2_hw *csid_hw = csid;
|
|
|
+ struct cam_isp_resource_node *res = resource;
|
|
|
+ struct cam_ife_csid_ver2_path_cfg *path_cfg =
|
|
|
+ (struct cam_ife_csid_ver2_path_cfg *)res->res_priv;
|
|
|
+ struct cam_ife_csid_cid_data *cid_data = &csid_hw->cid_data[path_cfg->cid];
|
|
|
+
|
|
|
+ CAM_INFO(CAM_ISP, "CSID:%d Last MUP value 0x%x programmed for res [id: %d name: %s]",
|
|
|
+ csid_hw->hw_intf->hw_idx, csid_hw->rx_cfg.mup, res->res_id, res->res_name);
|
|
|
+
|
|
|
+ if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) {
|
|
|
+ CAM_INFO(CAM_ISP, "vc0 %d vc1 %d" ,
|
|
|
+ cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc,
|
|
|
+ cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].vc);
|
|
|
+ } else {
|
|
|
+ CAM_ERR(CAM_ISP, "Multi-VCDT is not enabled, vc0 %d" ,
|
|
|
+ cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc);
|
|
|
+ }
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void cam_ife_csid_ver2_print_illegal_programming_irq_status(
|
|
|
+ void *csid, void *resource)
|
|
|
+{
|
|
|
+ struct cam_ife_csid_ver2_hw *csid_hw = csid;
|
|
|
+ struct cam_isp_resource_node *res = resource;
|
|
|
struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg;
|
|
|
struct cam_ife_csid_ver2_path_cfg *path_cfg =
|
|
|
(struct cam_ife_csid_ver2_path_cfg *)res->res_priv;
|
|
@@ -1279,7 +1336,8 @@ static void cam_ife_csid_ver2_print_debug_reg_status(
|
|
|
}
|
|
|
|
|
|
static int cam_ife_csid_ver2_parse_path_irq_status(
|
|
|
- struct cam_ife_csid_ver2_hw *csid_hw,
|
|
|
+ struct cam_ife_csid_ver2_hw *csid_hw,
|
|
|
+ struct cam_isp_resource_node *res,
|
|
|
uint32_t index,
|
|
|
uint32_t err_mask,
|
|
|
uint32_t irq_status)
|
|
@@ -1287,7 +1345,7 @@ static int cam_ife_csid_ver2_parse_path_irq_status(
|
|
|
const uint8_t **irq_reg_tag;
|
|
|
const struct cam_ife_csid_ver2_reg_info *csid_reg;
|
|
|
uint32_t bit_pos = 0;
|
|
|
- uint32_t status;
|
|
|
+ uint32_t status, err_type = 0;
|
|
|
uint32_t sof_irq_debug_en = 0;
|
|
|
size_t len = 0;
|
|
|
uint8_t *log_buf = NULL;
|
|
@@ -1302,9 +1360,14 @@ static int cam_ife_csid_ver2_parse_path_irq_status(
|
|
|
|
|
|
status = irq_status & err_mask;
|
|
|
while (status) {
|
|
|
- if (status & 0x1 )
|
|
|
+ if (status & 0x1 ) {
|
|
|
CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, "%s",
|
|
|
csid_reg->path_irq_desc[bit_pos].desc);
|
|
|
+ if (csid_reg->path_irq_desc[bit_pos].err_type)
|
|
|
+ err_type |= csid_reg->path_irq_desc[bit_pos].err_type;
|
|
|
+ if (csid_reg->path_irq_desc[bit_pos].err_handler)
|
|
|
+ csid_reg->path_irq_desc[bit_pos].err_handler(csid_hw, res);
|
|
|
+ }
|
|
|
bit_pos++;
|
|
|
status >>= 1;
|
|
|
}
|
|
@@ -1340,7 +1403,7 @@ static int cam_ife_csid_ver2_parse_path_irq_status(
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- return 0;
|
|
|
+ return err_type;
|
|
|
}
|
|
|
|
|
|
static int cam_ife_csid_ver2_top_err_irq_bottom_half(
|
|
@@ -1379,6 +1442,8 @@ static int cam_ife_csid_ver2_top_err_irq_bottom_half(
|
|
|
CAM_ERR(CAM_ISP, "%s %s",
|
|
|
csid_reg->top_irq_desc[i].err_name,
|
|
|
csid_reg->top_irq_desc[i].desc);
|
|
|
+ if (csid_reg->top_irq_desc[i].err_handler)
|
|
|
+ csid_reg->top_irq_desc[i].err_handler(csid_hw);
|
|
|
event_type |= csid_reg->top_irq_desc[i].err_type;
|
|
|
}
|
|
|
}
|
|
@@ -1394,16 +1459,17 @@ static int cam_ife_csid_ver2_top_err_irq_bottom_half(
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void cam_ife_csid_ver2_print_format_measure_info(
|
|
|
- struct cam_ife_csid_ver2_hw *csid_hw,
|
|
|
- struct cam_isp_resource_node *res)
|
|
|
+void cam_ife_csid_ver2_print_format_measure_info(
|
|
|
+ void *csid, void *resource)
|
|
|
{
|
|
|
- uint32_t expected_frame = 0, actual_frame = 0;
|
|
|
+ struct cam_ife_csid_ver2_hw *csid_hw = csid;
|
|
|
+ struct cam_isp_resource_node *res = resource;
|
|
|
struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg;
|
|
|
const struct cam_ife_csid_ver2_path_reg_info *path_reg =
|
|
|
csid_reg->path_reg[res->res_id];
|
|
|
struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info;
|
|
|
void __iomem *base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base;
|
|
|
+ uint32_t expected_frame = 0, actual_frame = 0;
|
|
|
|
|
|
actual_frame = cam_io_r_mb(base + path_reg->format_measure0_addr);
|
|
|
expected_frame = cam_io_r_mb(base + path_reg->format_measure_cfg1_addr);
|
|
@@ -1504,36 +1570,11 @@ static int cam_ife_csid_ver2_ipp_bottom_half(
|
|
|
goto unlock;
|
|
|
}
|
|
|
|
|
|
- cam_ife_csid_ver2_parse_path_irq_status(
|
|
|
- csid_hw,
|
|
|
+ err_type = cam_ife_csid_ver2_parse_path_irq_status(
|
|
|
+ csid_hw, res,
|
|
|
CAM_IFE_CSID_IRQ_REG_IPP,
|
|
|
err_mask, irq_status_ipp);
|
|
|
|
|
|
-
|
|
|
- if (irq_status_ipp & IFE_CSID_VER2_PATH_ERROR_ILLEGAL_PROGRAM) {
|
|
|
- if (path_reg->fatal_err_mask &
|
|
|
- IFE_CSID_VER2_PATH_ERROR_ILLEGAL_PROGRAM) {
|
|
|
- CAM_ERR(CAM_ISP, "CSID[%u] Illegal Programming for IPP status:0x%x",
|
|
|
- csid_hw->hw_intf->hw_idx,irq_status_ipp);
|
|
|
- cam_ife_csid_ver2_print_illegal_programming_irq_status(
|
|
|
- csid_hw, res);
|
|
|
- } else {
|
|
|
- CAM_ERR(CAM_ISP, "Illegal Programming IRQ is not supported");
|
|
|
- CAM_INFO(CAM_ISP, "CSID[%u]: status:0x%x",
|
|
|
- csid_hw->hw_intf->hw_idx, irq_status_ipp);
|
|
|
- }
|
|
|
- err_type |= CAM_ISP_HW_ERROR_CSID_FATAL;
|
|
|
- }
|
|
|
-
|
|
|
- if (irq_status_ipp & IFE_CSID_VER2_PATH_RECOVERY_OVERFLOW)
|
|
|
- err_type |= CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW;
|
|
|
-
|
|
|
- if (irq_status_ipp & (IFE_CSID_VER2_PATH_ERROR_PIX_COUNT |
|
|
|
- IFE_CSID_VER2_PATH_ERROR_LINE_COUNT)) {
|
|
|
- cam_ife_csid_ver2_print_format_measure_info(csid_hw, res);
|
|
|
- err_type |= CAM_ISP_HW_ERROR_CSID_FRAME_SIZE;
|
|
|
- }
|
|
|
-
|
|
|
if (err_type)
|
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
|
irq_status_ipp,
|
|
@@ -1604,34 +1645,10 @@ static int cam_ife_csid_ver2_ppp_bottom_half(
|
|
|
csid_hw->hw_intf->hw_idx);
|
|
|
goto unlock;
|
|
|
}
|
|
|
- cam_ife_csid_ver2_parse_path_irq_status(
|
|
|
- csid_hw, CAM_IFE_CSID_IRQ_REG_PPP,
|
|
|
+ err_type = cam_ife_csid_ver2_parse_path_irq_status(
|
|
|
+ csid_hw, res, CAM_IFE_CSID_IRQ_REG_PPP,
|
|
|
err_mask, irq_status_ppp);
|
|
|
|
|
|
- if (irq_status_ppp & IFE_CSID_VER2_PATH_ERROR_ILLEGAL_PROGRAM) {
|
|
|
- if (path_reg->fatal_err_mask &
|
|
|
- IFE_CSID_VER2_PATH_ERROR_ILLEGAL_PROGRAM) {
|
|
|
- CAM_ERR(CAM_ISP, "CSID[%u] Illegal Programming for PPP status:0x%x",
|
|
|
- csid_hw->hw_intf->hw_idx,irq_status_ppp);
|
|
|
- cam_ife_csid_ver2_print_illegal_programming_irq_status(
|
|
|
- csid_hw, res);
|
|
|
- } else {
|
|
|
- CAM_ERR(CAM_ISP, "Illegal Programming IRQ is not supported");
|
|
|
- CAM_INFO(CAM_ISP, "CSID[%u]: status:0x%x",
|
|
|
- csid_hw->hw_intf->hw_idx, irq_status_ppp);
|
|
|
- }
|
|
|
- err_type |= CAM_ISP_HW_ERROR_CSID_FATAL;
|
|
|
- }
|
|
|
-
|
|
|
- if (irq_status_ppp & (IFE_CSID_VER2_PATH_ERROR_PIX_COUNT |
|
|
|
- IFE_CSID_VER2_PATH_ERROR_LINE_COUNT)) {
|
|
|
- cam_ife_csid_ver2_print_format_measure_info(csid_hw, res);
|
|
|
- err_type |= CAM_ISP_HW_ERROR_CSID_FRAME_SIZE;
|
|
|
- }
|
|
|
-
|
|
|
- if (irq_status_ppp & IFE_CSID_VER2_PATH_RECOVERY_OVERFLOW)
|
|
|
- err_type |= CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW;
|
|
|
-
|
|
|
if (err_type)
|
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
|
irq_status_ppp,
|
|
@@ -1711,36 +1728,10 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
|
|
|
goto end;
|
|
|
}
|
|
|
|
|
|
- cam_ife_csid_ver2_parse_path_irq_status(csid_hw,
|
|
|
+ err_type = cam_ife_csid_ver2_parse_path_irq_status(csid_hw, res,
|
|
|
path_cfg->irq_reg_idx,
|
|
|
err_mask, irq_status_rdi);
|
|
|
|
|
|
- if (irq_status_rdi & IFE_CSID_VER2_PATH_ERROR_ILLEGAL_PROGRAM) {
|
|
|
- if (rdi_reg->fatal_err_mask &
|
|
|
- IFE_CSID_VER2_PATH_ERROR_ILLEGAL_PROGRAM) {
|
|
|
- CAM_ERR(CAM_ISP, "CSID[%u]: Illegal Programming for RDI:%d status:0x%x",
|
|
|
- csid_hw->hw_intf->hw_idx,
|
|
|
- res->res_id, irq_status_rdi);
|
|
|
- cam_ife_csid_ver2_print_illegal_programming_irq_status(
|
|
|
- csid_hw, res);
|
|
|
- } else {
|
|
|
- CAM_ERR(CAM_ISP, "Illegal Programming IRQ is not supported");
|
|
|
- CAM_INFO(CAM_ISP, "CSID[%u]: RDI:%d status:0x%x",
|
|
|
- csid_hw->hw_intf->hw_idx,
|
|
|
- res->res_id, irq_status_rdi);
|
|
|
- }
|
|
|
- err_type |= CAM_ISP_HW_ERROR_CSID_FATAL;
|
|
|
- }
|
|
|
-
|
|
|
- if (irq_status_rdi & IFE_CSID_VER2_PATH_RECOVERY_OVERFLOW)
|
|
|
- err_type |= CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW;
|
|
|
-
|
|
|
- if (irq_status_rdi & (IFE_CSID_VER2_PATH_ERROR_PIX_COUNT |
|
|
|
- IFE_CSID_VER2_PATH_ERROR_LINE_COUNT)) {
|
|
|
- cam_ife_csid_ver2_print_format_measure_info(csid_hw, res);
|
|
|
- err_type |= CAM_ISP_HW_ERROR_CSID_FRAME_SIZE;
|
|
|
- }
|
|
|
-
|
|
|
spin_unlock(&csid_hw->lock_state);
|
|
|
if (err_type) {
|
|
|
|