|
@@ -55,6 +55,28 @@ static void cam_ife_csid_ver2_print_debug_reg_status(
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
struct cam_isp_resource_node *res);
|
|
struct cam_isp_resource_node *res);
|
|
|
|
|
|
|
|
+static bool cam_ife_csid_ver2_disable_sof_retime(
|
|
|
|
+ struct cam_ife_csid_ver2_hw *csid_hw,
|
|
|
|
+ struct cam_isp_resource_node *res)
|
|
|
|
+{
|
|
|
|
+ struct cam_ife_csid_ver2_reg_info *csid_reg = (struct cam_ife_csid_ver2_reg_info *)
|
|
|
|
+ csid_hw->core_info->csid_reg;
|
|
|
|
+ struct cam_ife_csid_ver2_path_cfg *path_cfg = (struct cam_ife_csid_ver2_path_cfg *)
|
|
|
|
+ res->res_priv;
|
|
|
|
+ const struct cam_ife_csid_ver2_path_reg_info *path_reg = csid_reg->path_reg[res->res_id];
|
|
|
|
+
|
|
|
|
+ if (!(path_reg->capabilities & CAM_IFE_CSID_CAP_SOF_RETIME_DIS))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ if (path_cfg->sfe_shdr || path_cfg->lcr_en)
|
|
|
|
+ return true;
|
|
|
|
+
|
|
|
|
+ if (csid_hw->flags.rdi_lcr_en && res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
|
|
|
|
+ return true;
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
static int cam_ife_csid_ver2_set_debug(
|
|
static int cam_ife_csid_ver2_set_debug(
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
uint32_t debug_val)
|
|
uint32_t debug_val)
|
|
@@ -134,6 +156,7 @@ static int cam_ife_csid_ver2_sof_irq_debug(
|
|
struct cam_ife_csid_ver2_path_cfg *path_cfg;
|
|
struct cam_ife_csid_ver2_path_cfg *path_cfg;
|
|
struct cam_isp_resource_node *res;
|
|
struct cam_isp_resource_node *res;
|
|
uint32_t irq_mask[CAM_IFE_CSID_IRQ_REG_MAX] = {0};
|
|
uint32_t irq_mask[CAM_IFE_CSID_IRQ_REG_MAX] = {0};
|
|
|
|
+ uint32_t data_idx;
|
|
|
|
|
|
if (*((uint32_t *)cmd_args) == 1)
|
|
if (*((uint32_t *)cmd_args) == 1)
|
|
sof_irq_enable = true;
|
|
sof_irq_enable = true;
|
|
@@ -147,6 +170,7 @@ static int cam_ife_csid_ver2_sof_irq_debug(
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ data_idx = csid_hw->rx_cfg.phy_sel - 1;
|
|
csid_reg = (struct cam_ife_csid_ver2_reg_info *)
|
|
csid_reg = (struct cam_ife_csid_ver2_reg_info *)
|
|
csid_hw->core_info->csid_reg;
|
|
csid_hw->core_info->csid_reg;
|
|
|
|
|
|
@@ -185,8 +209,7 @@ static int cam_ife_csid_ver2_sof_irq_debug(
|
|
csid_hw->rx_cfg.phy_sel - 1);
|
|
csid_hw->rx_cfg.phy_sel - 1);
|
|
|
|
|
|
cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE,
|
|
cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE,
|
|
- CAM_SUBDEV_MESSAGE_IRQ_ERR,
|
|
|
|
- (csid_hw->rx_cfg.phy_sel - 1));
|
|
|
|
|
|
+ CAM_SUBDEV_MESSAGE_IRQ_ERR, (void *)&data_idx);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -897,10 +920,12 @@ static int cam_ife_csid_ver2_handle_event_err(
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
uint32_t irq_status,
|
|
uint32_t irq_status,
|
|
uint32_t err_type,
|
|
uint32_t err_type,
|
|
|
|
+ bool is_secondary,
|
|
struct cam_isp_resource_node *res)
|
|
struct cam_isp_resource_node *res)
|
|
{
|
|
{
|
|
- struct cam_isp_hw_event_info evt = {0};
|
|
|
|
- struct cam_ife_csid_ver2_path_cfg *path_cfg;
|
|
|
|
|
|
+ struct cam_isp_hw_error_event_info err_evt_info;
|
|
|
|
+ struct cam_isp_hw_event_info evt = {0};
|
|
|
|
+ struct cam_ife_csid_ver2_path_cfg *path_cfg;
|
|
|
|
|
|
if (!csid_hw->event_cb) {
|
|
if (!csid_hw->event_cb) {
|
|
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID[%u] event cb not registered",
|
|
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID[%u] event cb not registered",
|
|
@@ -911,24 +936,29 @@ static int cam_ife_csid_ver2_handle_event_err(
|
|
evt.hw_idx = csid_hw->hw_intf->hw_idx;
|
|
evt.hw_idx = csid_hw->hw_intf->hw_idx;
|
|
evt.reg_val = irq_status;
|
|
evt.reg_val = irq_status;
|
|
evt.hw_type = CAM_ISP_HW_TYPE_CSID;
|
|
evt.hw_type = CAM_ISP_HW_TYPE_CSID;
|
|
- evt.err_type = err_type;
|
|
|
|
|
|
+ evt.is_secondary_evt = is_secondary;
|
|
|
|
+ err_evt_info.err_type = err_type;
|
|
|
|
+ evt.event_data = (void *)&err_evt_info;
|
|
|
|
|
|
- if (res) {
|
|
|
|
- cam_ife_csid_ver2_print_debug_reg_status(csid_hw, res);
|
|
|
|
- path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv;
|
|
|
|
- evt.res_id = res->res_id;
|
|
|
|
- CAM_ERR_RATE_LIMIT(CAM_ISP,
|
|
|
|
- "csid[%u] Res:%s Err 0x%x status 0x%x time_stamp: %lld:%lld",
|
|
|
|
- csid_hw->hw_intf->hw_idx, res->res_name, err_type,
|
|
|
|
- irq_status, path_cfg->error_ts.tv_sec,
|
|
|
|
- path_cfg->error_ts.tv_nsec);
|
|
|
|
- } else {
|
|
|
|
- CAM_ERR_RATE_LIMIT(CAM_ISP,
|
|
|
|
- "csid[%u] Rx Err: 0x%x status 0x%x",
|
|
|
|
- csid_hw->hw_intf->hw_idx, err_type, irq_status);
|
|
|
|
|
|
+ if (!is_secondary) {
|
|
|
|
+ if (res) {
|
|
|
|
+ cam_ife_csid_ver2_print_debug_reg_status(csid_hw, res);
|
|
|
|
+ path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv;
|
|
|
|
+ evt.res_id = res->res_id;
|
|
|
|
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
|
|
|
|
+ "csid[%u] Res:%s Err 0x%x status 0x%x time_stamp: %lld:%lld",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_name, err_type,
|
|
|
|
+ irq_status, path_cfg->error_ts.tv_sec,
|
|
|
|
+ path_cfg->error_ts.tv_nsec);
|
|
|
|
+ } else {
|
|
|
|
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
|
|
|
|
+ "csid[%u] Rx Err: 0x%x status 0x%x",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, err_type, irq_status);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
- evt.in_core_idx = cam_ife_csid_ver2_input_core_to_hw_idx(csid_hw->top_cfg.input_core_type);
|
|
|
|
|
|
+ evt.in_core_idx =
|
|
|
|
+ cam_ife_csid_ver2_input_core_to_hw_idx(csid_hw->top_cfg.input_core_type);
|
|
|
|
|
|
csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_ERROR, (void *)&evt);
|
|
csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_ERROR, (void *)&evt);
|
|
|
|
|
|
@@ -952,6 +982,7 @@ static int cam_ife_csid_ver2_rx_err_bottom_half(
|
|
uint32_t event_type = 0;
|
|
uint32_t event_type = 0;
|
|
uint32_t long_pkt_ftr_val;
|
|
uint32_t long_pkt_ftr_val;
|
|
uint32_t total_crc;
|
|
uint32_t total_crc;
|
|
|
|
+ uint32_t data_idx;
|
|
|
|
|
|
if (!handler_priv || !evt_payload_priv) {
|
|
if (!handler_priv || !evt_payload_priv) {
|
|
CAM_ERR(CAM_ISP, "Invalid params");
|
|
CAM_ERR(CAM_ISP, "Invalid params");
|
|
@@ -961,6 +992,7 @@ static int cam_ife_csid_ver2_rx_err_bottom_half(
|
|
payload = evt_payload_priv;
|
|
payload = evt_payload_priv;
|
|
csid_hw = handler_priv;
|
|
csid_hw = handler_priv;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
|
|
+ data_idx = csid_hw->rx_cfg.phy_sel -1;
|
|
|
|
|
|
log_buf = csid_hw->log_buf;
|
|
log_buf = csid_hw->log_buf;
|
|
memset(log_buf, 0, sizeof(csid_hw->log_buf));
|
|
memset(log_buf, 0, sizeof(csid_hw->log_buf));
|
|
@@ -1103,16 +1135,20 @@ static int cam_ife_csid_ver2_rx_err_bottom_half(
|
|
csid_hw->hw_intf->hw_idx,
|
|
csid_hw->hw_intf->hw_idx,
|
|
payload->irq_reg_val[CAM_IFE_CSID_IRQ_REG_RX]);
|
|
payload->irq_reg_val[CAM_IFE_CSID_IRQ_REG_RX]);
|
|
|
|
|
|
- if (csid_hw->flags.fatal_err_detected) {
|
|
|
|
- event_type |= CAM_ISP_HW_ERROR_CSID_FATAL;
|
|
|
|
- cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE,
|
|
|
|
- CAM_SUBDEV_MESSAGE_IRQ_ERR,
|
|
|
|
- (csid_hw->rx_cfg.phy_sel -1));
|
|
|
|
- }
|
|
|
|
|
|
+ if (!csid_hw->flags.reset_awaited) {
|
|
|
|
+ if (csid_hw->flags.fatal_err_detected) {
|
|
|
|
+ event_type |= CAM_ISP_HW_ERROR_CSID_FATAL;
|
|
|
|
|
|
- if (event_type)
|
|
|
|
- cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
|
|
- rx_irq_status, event_type, NULL);
|
|
|
|
|
|
+ cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE,
|
|
|
|
+ CAM_SUBDEV_MESSAGE_IRQ_ERR, (void *)&data_idx);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (event_type) {
|
|
|
|
+ cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
|
|
+ rx_irq_status, event_type, false, NULL);
|
|
|
|
+ csid_hw->flags.reset_awaited = true;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
unlock:
|
|
unlock:
|
|
spin_unlock(&csid_hw->lock_state);
|
|
spin_unlock(&csid_hw->lock_state);
|
|
end:
|
|
end:
|
|
@@ -1349,7 +1385,7 @@ static int cam_ife_csid_ver2_top_err_irq_bottom_half(
|
|
|
|
|
|
if (event_type)
|
|
if (event_type)
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
- irq_status, event_type, NULL);
|
|
|
|
|
|
+ irq_status, event_type, false, NULL);
|
|
|
|
|
|
cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload,
|
|
cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload,
|
|
&csid_hw->path_free_payload_list,
|
|
&csid_hw->path_free_payload_list,
|
|
@@ -1429,10 +1465,11 @@ static int cam_ife_csid_ver2_ipp_bottom_half(
|
|
goto end;
|
|
goto end;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ evt_info.hw_type = CAM_ISP_HW_TYPE_CSID;
|
|
evt_info.hw_idx = csid_hw->hw_intf->hw_idx;
|
|
evt_info.hw_idx = csid_hw->hw_intf->hw_idx;
|
|
evt_info.res_id = CAM_IFE_PIX_PATH_RES_IPP;
|
|
evt_info.res_id = CAM_IFE_PIX_PATH_RES_IPP;
|
|
evt_info.res_type = CAM_ISP_RESOURCE_PIX_PATH;
|
|
evt_info.res_type = CAM_ISP_RESOURCE_PIX_PATH;
|
|
- evt_info.reg_val = irq_status_ipp;
|
|
|
|
|
|
+ evt_info.reg_val = irq_status_ipp;
|
|
|
|
|
|
if (irq_status_ipp & IFE_CSID_VER2_PATH_CAMIF_EOF) {
|
|
if (irq_status_ipp & IFE_CSID_VER2_PATH_CAMIF_EOF) {
|
|
if (csid_hw->event_cb)
|
|
if (csid_hw->event_cb)
|
|
@@ -1501,6 +1538,7 @@ static int cam_ife_csid_ver2_ipp_bottom_half(
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
irq_status_ipp,
|
|
irq_status_ipp,
|
|
err_type,
|
|
err_type,
|
|
|
|
+ false,
|
|
res);
|
|
res);
|
|
unlock:
|
|
unlock:
|
|
spin_unlock(&csid_hw->lock_state);
|
|
spin_unlock(&csid_hw->lock_state);
|
|
@@ -1598,6 +1636,7 @@ static int cam_ife_csid_ver2_ppp_bottom_half(
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
irq_status_ppp,
|
|
irq_status_ppp,
|
|
err_type,
|
|
err_type,
|
|
|
|
+ false,
|
|
res);
|
|
res);
|
|
unlock:
|
|
unlock:
|
|
spin_unlock(&csid_hw->lock_state);
|
|
spin_unlock(&csid_hw->lock_state);
|
|
@@ -1625,7 +1664,7 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
|
|
uint32_t irq_status_rdi;
|
|
uint32_t irq_status_rdi;
|
|
uint32_t err_mask;
|
|
uint32_t err_mask;
|
|
uint32_t err_type = 0;
|
|
uint32_t err_type = 0;
|
|
- bool skip_sof_notify = false;
|
|
|
|
|
|
+ bool skip_evt_notify = false;
|
|
struct cam_isp_hw_event_info evt_info;
|
|
struct cam_isp_hw_event_info evt_info;
|
|
|
|
|
|
if (!handler_priv || !evt_payload_priv) {
|
|
if (!handler_priv || !evt_payload_priv) {
|
|
@@ -1706,7 +1745,7 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
|
|
if (err_type) {
|
|
if (err_type) {
|
|
|
|
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
- irq_status_rdi, err_type, res);
|
|
|
|
|
|
+ irq_status_rdi, err_type, false, res);
|
|
goto end;
|
|
goto end;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1720,19 +1759,34 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
|
|
evt_info.reg_val = irq_status_rdi;
|
|
evt_info.reg_val = irq_status_rdi;
|
|
evt_info.hw_type = CAM_ISP_HW_TYPE_CSID;
|
|
evt_info.hw_type = CAM_ISP_HW_TYPE_CSID;
|
|
|
|
|
|
- /* Check for secondary evt */
|
|
|
|
- if ((path_cfg->en_secondary_evt) &&
|
|
|
|
- (irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_SOF)) {
|
|
|
|
- evt_info.is_secondary_evt = true;
|
|
|
|
- CAM_DBG(CAM_ISP,
|
|
|
|
- "CSID[%u] RDI:%u notify CAMIF SOF as secondary evt",
|
|
|
|
- csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
|
|
+ /* Check for specific secondary events */
|
|
|
|
+ if (path_cfg->sec_evt_config.en_secondary_evt) {
|
|
|
|
+ if ((irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_SOF) &&
|
|
|
|
+ (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_SOF)) {
|
|
|
|
+ evt_info.is_secondary_evt = true;
|
|
|
|
+ csid_hw->event_cb(csid_hw->token,
|
|
|
|
+ CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
|
|
|
|
+ skip_evt_notify = true;
|
|
|
|
+ }
|
|
|
|
|
|
- csid_hw->event_cb(csid_hw->token,
|
|
|
|
- CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
|
|
|
|
- skip_sof_notify = true;
|
|
|
|
|
|
+ if ((irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_EPOCH0) &&
|
|
|
|
+ (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_EPOCH)) {
|
|
|
|
+ evt_info.is_secondary_evt = true;
|
|
|
|
+ csid_hw->event_cb(csid_hw->token,
|
|
|
|
+ CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
|
|
|
|
+ skip_evt_notify = true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ((irq_status_rdi &
|
|
|
|
+ IFE_CSID_VER2_PATH_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP) &&
|
|
|
|
+ (path_cfg->sec_evt_config.evt_type &
|
|
|
|
+ CAM_IFE_CSID_EVT_SENSOR_SYNC_FRAME_DROP)) {
|
|
|
|
+ cam_ife_csid_ver2_handle_event_err(csid_hw,
|
|
|
|
+ irq_status_rdi, CAM_ISP_HW_ERROR_CSID_SENSOR_FRAME_DROP, true, res);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ evt_info.is_secondary_evt = false;
|
|
if (!path_cfg->handle_camif_irq)
|
|
if (!path_cfg->handle_camif_irq)
|
|
goto end;
|
|
goto end;
|
|
|
|
|
|
@@ -1741,7 +1795,7 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
|
|
CAM_ISP_HW_EVENT_EOF,
|
|
CAM_ISP_HW_EVENT_EOF,
|
|
(void *)&evt_info);
|
|
(void *)&evt_info);
|
|
|
|
|
|
- if (!skip_sof_notify && (irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_SOF))
|
|
|
|
|
|
+ if (!skip_evt_notify && (irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_SOF))
|
|
csid_hw->event_cb(csid_hw->token,
|
|
csid_hw->event_cb(csid_hw->token,
|
|
CAM_ISP_HW_EVENT_SOF,
|
|
CAM_ISP_HW_EVENT_SOF,
|
|
(void *)&evt_info);
|
|
(void *)&evt_info);
|
|
@@ -1751,7 +1805,7 @@ static int cam_ife_csid_ver2_rdi_bottom_half(
|
|
CAM_ISP_HW_EVENT_REG_UPDATE,
|
|
CAM_ISP_HW_EVENT_REG_UPDATE,
|
|
(void *)&evt_info);
|
|
(void *)&evt_info);
|
|
|
|
|
|
- if (irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_EPOCH0)
|
|
|
|
|
|
+ if (!skip_evt_notify && (irq_status_rdi & IFE_CSID_VER2_PATH_CAMIF_EPOCH0))
|
|
csid_hw->event_cb(csid_hw->token,
|
|
csid_hw->event_cb(csid_hw->token,
|
|
CAM_ISP_HW_EVENT_EPOCH,
|
|
CAM_ISP_HW_EVENT_EPOCH,
|
|
(void *)&evt_info);
|
|
(void *)&evt_info);
|
|
@@ -2081,6 +2135,45 @@ err:
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool cam_ife_csid_hw_ver2_need_unpack_mipi(
|
|
|
|
+ struct cam_ife_csid_ver2_hw *csid_hw,
|
|
|
|
+ struct cam_csid_hw_reserve_resource_args *reserve,
|
|
|
|
+ const struct cam_ife_csid_ver2_path_reg_info *path_reg,
|
|
|
|
+ uint32_t format)
|
|
|
|
+{
|
|
|
|
+ bool need_unpack = false;
|
|
|
|
+
|
|
|
|
+ switch(format) {
|
|
|
|
+ case CAM_FORMAT_MIPI_RAW_8:
|
|
|
|
+ need_unpack = (bool)(path_reg->capabilities & CAM_IFE_CSID_CAP_MIPI8_UNPACK);
|
|
|
|
+ break;
|
|
|
|
+ case CAM_FORMAT_MIPI_RAW_10:
|
|
|
|
+ need_unpack = (bool)(path_reg->capabilities & CAM_IFE_CSID_CAP_MIPI10_UNPACK);
|
|
|
|
+ break;
|
|
|
|
+ case CAM_FORMAT_MIPI_RAW_12:
|
|
|
|
+ need_unpack = (bool)(path_reg->capabilities & CAM_IFE_CSID_CAP_MIPI12_UNPACK);
|
|
|
|
+ break;
|
|
|
|
+ case CAM_FORMAT_MIPI_RAW_14:
|
|
|
|
+ need_unpack = (bool)(path_reg->capabilities & CAM_IFE_CSID_CAP_MIPI14_UNPACK);
|
|
|
|
+ break;
|
|
|
|
+ case CAM_FORMAT_MIPI_RAW_16:
|
|
|
|
+ need_unpack = (bool)(path_reg->capabilities & CAM_IFE_CSID_CAP_MIPI16_UNPACK);
|
|
|
|
+ break;
|
|
|
|
+ case CAM_FORMAT_MIPI_RAW_20:
|
|
|
|
+ need_unpack = (bool)(path_reg->capabilities & CAM_IFE_CSID_CAP_MIPI20_UNPACK);
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ need_unpack = false;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID[%u], RDI_%u format %u need_unpack %u sfe_shdr %u",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, reserve->res_id, format, need_unpack,
|
|
|
|
+ reserve->sfe_inline_shdr);
|
|
|
|
+
|
|
|
|
+ return need_unpack;
|
|
|
|
+}
|
|
|
|
+
|
|
static int cam_ife_csid_hw_ver2_config_path_data(
|
|
static int cam_ife_csid_hw_ver2_config_path_data(
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
struct cam_ife_csid_ver2_path_cfg *path_cfg,
|
|
struct cam_ife_csid_ver2_path_cfg *path_cfg,
|
|
@@ -2093,6 +2186,7 @@ static int cam_ife_csid_hw_ver2_config_path_data(
|
|
(struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg;
|
|
(struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg;
|
|
struct cam_ife_csid_cid_data *cid_data = &csid_hw->cid_data[cid];
|
|
struct cam_ife_csid_cid_data *cid_data = &csid_hw->cid_data[cid];
|
|
struct cam_isp_resource_node *res = &csid_hw->path_res[reserve->res_id];
|
|
struct cam_isp_resource_node *res = &csid_hw->path_res[reserve->res_id];
|
|
|
|
+ const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL;
|
|
|
|
|
|
for(i = 0; i < reserve->in_port->num_valid_vc_dt; i++)
|
|
for(i = 0; i < reserve->in_port->num_valid_vc_dt; i++)
|
|
path_cfg->in_format[i] = reserve->in_port->format[i];
|
|
path_cfg->in_format[i] = reserve->in_port->format[i];
|
|
@@ -2109,7 +2203,9 @@ static int cam_ife_csid_hw_ver2_config_path_data(
|
|
path_cfg->vertical_bin = reserve->in_port->vertical_bin;
|
|
path_cfg->vertical_bin = reserve->in_port->vertical_bin;
|
|
path_cfg->qcfa_bin = reserve->in_port->qcfa_bin;
|
|
path_cfg->qcfa_bin = reserve->in_port->qcfa_bin;
|
|
path_cfg->num_bytes_out = reserve->in_port->num_bytes_out;
|
|
path_cfg->num_bytes_out = reserve->in_port->num_bytes_out;
|
|
- path_cfg->en_secondary_evt = reserve->en_secondary_evt;
|
|
|
|
|
|
+ path_cfg->sec_evt_config.en_secondary_evt = reserve->sec_evt_config.en_secondary_evt;
|
|
|
|
+ path_cfg->sec_evt_config.evt_type = reserve->sec_evt_config.evt_type;
|
|
|
|
+ path_reg = csid_reg->path_reg[res->res_id];
|
|
|
|
|
|
if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
|
|
if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
|
|
path_cfg->start_pixel = reserve->in_port->left_start;
|
|
path_cfg->start_pixel = reserve->in_port->left_start;
|
|
@@ -2163,11 +2259,18 @@ static int cam_ife_csid_hw_ver2_config_path_data(
|
|
case CAM_IFE_PIX_PATH_RES_RDI_3:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_3:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_4:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_4:
|
|
is_rpp = path_cfg->crop_enable || path_cfg->drop_enable;
|
|
is_rpp = path_cfg->crop_enable || path_cfg->drop_enable;
|
|
|
|
+ /*
|
|
|
|
+ * if csid gives unpacked out, packing needs to be done at
|
|
|
|
+ * WM side if needed, based on the format the decision is
|
|
|
|
+ * taken at WM side
|
|
|
|
+ */
|
|
|
|
+ reserve->use_wm_pack = cam_ife_csid_hw_ver2_need_unpack_mipi(csid_hw,
|
|
|
|
+ reserve, path_reg, path_cfg->out_format);
|
|
rc = cam_ife_csid_get_format_rdi(
|
|
rc = cam_ife_csid_get_format_rdi(
|
|
path_cfg->in_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0],
|
|
path_cfg->in_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0],
|
|
path_cfg->out_format,
|
|
path_cfg->out_format,
|
|
&path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0],
|
|
&path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0],
|
|
- is_rpp);
|
|
|
|
|
|
+ is_rpp, reserve->use_wm_pack);
|
|
if (rc)
|
|
if (rc)
|
|
goto end;
|
|
goto end;
|
|
|
|
|
|
@@ -2178,7 +2281,7 @@ static int cam_ife_csid_hw_ver2_config_path_data(
|
|
path_cfg->in_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_1],
|
|
path_cfg->in_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_1],
|
|
path_cfg->out_format,
|
|
path_cfg->out_format,
|
|
&path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_1],
|
|
&path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_1],
|
|
- is_rpp);
|
|
|
|
|
|
+ is_rpp, reserve->use_wm_pack);
|
|
if (rc)
|
|
if (rc)
|
|
goto end;
|
|
goto end;
|
|
}
|
|
}
|
|
@@ -2290,41 +2393,32 @@ static int cam_ife_csid_ver_config_camif(
|
|
struct cam_csid_hw_reserve_resource_args *reserve,
|
|
struct cam_csid_hw_reserve_resource_args *reserve,
|
|
struct cam_ife_csid_ver2_path_cfg *path_cfg)
|
|
struct cam_ife_csid_ver2_path_cfg *path_cfg)
|
|
{
|
|
{
|
|
- int rc = 0;
|
|
|
|
- uint32_t epoch0 = 0;
|
|
|
|
struct cam_ife_csid_ver2_reg_info *csid_reg;
|
|
struct cam_ife_csid_ver2_reg_info *csid_reg;
|
|
- const struct cam_ife_csid_ver2_path_reg_info *rdi_reg = NULL;
|
|
|
|
|
|
|
|
csid_reg = (struct cam_ife_csid_ver2_reg_info *)
|
|
csid_reg = (struct cam_ife_csid_ver2_reg_info *)
|
|
- csid_hw->core_info->csid_reg;
|
|
|
|
|
|
+ csid_hw->core_info->csid_reg;
|
|
|
|
|
|
switch (reserve->res_id) {
|
|
switch (reserve->res_id) {
|
|
- case CAM_IFE_PIX_PATH_RES_IPP:
|
|
|
|
- epoch0 = (path_cfg->end_line - path_cfg->start_line)/
|
|
|
|
- csid_reg->cmn_reg->epoch_div_factor;
|
|
|
|
- break;
|
|
|
|
|
|
+ case CAM_IFE_PIX_PATH_RES_IPP:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_0:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_0:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_1:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_1:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_2:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_2:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_3:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_3:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_4:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_4:
|
|
- rdi_reg = csid_reg->path_reg[reserve->res_id];
|
|
|
|
- if (!rdi_reg) {
|
|
|
|
- rc = -EINVAL;
|
|
|
|
- CAM_ERR(CAM_ISP, "CSID[%d] invalid res %d",
|
|
|
|
- csid_hw->hw_intf->hw_idx, reserve->res_id);
|
|
|
|
- goto end;
|
|
|
|
- }
|
|
|
|
- epoch0 = rdi_reg->epoch0_cfg_val;
|
|
|
|
|
|
+ path_cfg->camif_data.epoch0 =
|
|
|
|
+ (path_cfg->end_line - path_cfg->start_line) /
|
|
|
|
+ csid_reg->cmn_reg->epoch_div_factor;
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID[%d] res_id: %u epoch0: 0x%x",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, reserve->res_id,
|
|
|
|
+ path_cfg->camif_data.epoch0);
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ CAM_DBG(CAM_ISP, "No CAMIF epoch update for res: %u", reserve->res_id);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- path_cfg->camif_data.epoch0 = epoch0;
|
|
|
|
-
|
|
|
|
-end:
|
|
|
|
- CAM_DBG(CAM_ISP, "CSID[%d] epoch0: 0x%x",
|
|
|
|
- csid_hw->hw_intf->hw_idx, epoch0);
|
|
|
|
- return rc;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
int cam_ife_csid_hw_ver2_hw_cfg(
|
|
int cam_ife_csid_hw_ver2_hw_cfg(
|
|
@@ -2497,9 +2591,9 @@ int cam_ife_csid_ver2_reserve(void *hw_priv,
|
|
csid_hw->token = reserve->cb_priv;
|
|
csid_hw->token = reserve->cb_priv;
|
|
reserve->buf_done_controller = csid_hw->buf_done_irq_controller;
|
|
reserve->buf_done_controller = csid_hw->buf_done_irq_controller;
|
|
res->cdm_ops = reserve->cdm_ops;
|
|
res->cdm_ops = reserve->cdm_ops;
|
|
- csid_hw->flags.sfe_inline_shdr = reserve->sfe_inline_shdr;
|
|
|
|
|
|
+ csid_hw->flags.sfe_en = reserve->sfe_en;
|
|
|
|
+ path_cfg->sfe_shdr = reserve->sfe_inline_shdr;
|
|
csid_hw->flags.offline_mode = reserve->is_offline;
|
|
csid_hw->flags.offline_mode = reserve->is_offline;
|
|
-
|
|
|
|
reserve->need_top_cfg = csid_reg->need_top_cfg;
|
|
reserve->need_top_cfg = csid_reg->need_top_cfg;
|
|
|
|
|
|
CAM_DBG(CAM_ISP, "CSID[%u] Resource[id: %d name:%s] state %d cid %d",
|
|
CAM_DBG(CAM_ISP, "CSID[%u] Resource[id: %d name:%s] state %d cid %d",
|
|
@@ -2595,7 +2689,7 @@ end:
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
-static int cam_ife_csid_ver2_shdr_cfg(
|
|
|
|
|
|
+static int cam_ife_csid_ver2_res_master_slave_cfg(
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
struct cam_ife_csid_ver2_hw *csid_hw,
|
|
uint32_t res_id)
|
|
uint32_t res_id)
|
|
{
|
|
{
|
|
@@ -2622,6 +2716,9 @@ static int cam_ife_csid_ver2_shdr_cfg(
|
|
case CAM_IFE_PIX_PATH_RES_RDI_2:
|
|
case CAM_IFE_PIX_PATH_RES_RDI_2:
|
|
val |= BIT(csid_reg->cmn_reg->shdr_slave_rdi2_shift);
|
|
val |= BIT(csid_reg->cmn_reg->shdr_slave_rdi2_shift);
|
|
break;
|
|
break;
|
|
|
|
+ case CAM_IFE_PIX_PATH_RES_PPP:
|
|
|
|
+ val |= BIT(csid_reg->cmn_reg->shdr_slave_ppp_shift);
|
|
|
|
+ break;
|
|
default:
|
|
default:
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -2708,8 +2805,14 @@ static int cam_ife_csid_ver2_init_config_rdi_path(
|
|
(cmn_reg->timestamp_strobe_val <<
|
|
(cmn_reg->timestamp_strobe_val <<
|
|
cmn_reg->timestamp_stb_sel_shift_val);
|
|
cmn_reg->timestamp_stb_sel_shift_val);
|
|
|
|
|
|
|
|
+ if (cam_ife_csid_ver2_disable_sof_retime(csid_hw, res))
|
|
|
|
+ cfg0 |= 1 << path_reg->sof_retiming_dis_shift;
|
|
|
|
+
|
|
cam_io_w_mb(cfg0, mem_base + path_reg->cfg0_addr);
|
|
cam_io_w_mb(cfg0, mem_base + path_reg->cfg0_addr);
|
|
|
|
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID[%d] %s cfg0_addr 0x%x",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_name, cfg0);
|
|
|
|
+
|
|
/*Configure Multi VC DT combo */
|
|
/*Configure Multi VC DT combo */
|
|
if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) {
|
|
if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) {
|
|
val = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].vc <<
|
|
val = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].vc <<
|
|
@@ -2757,6 +2860,12 @@ static int cam_ife_csid_ver2_init_config_rdi_path(
|
|
(cmn_reg->timestamp_strobe_val <<
|
|
(cmn_reg->timestamp_strobe_val <<
|
|
cmn_reg->timestamp_stb_sel_shift_val);
|
|
cmn_reg->timestamp_stb_sel_shift_val);
|
|
|
|
|
|
|
|
+ /* We use line smoothting only on RDI_0 in all usecases */
|
|
|
|
+ if ((path_reg->capabilities &
|
|
|
|
+ CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI) &&
|
|
|
|
+ (res->res_id == CAM_IFE_PIX_PATH_RES_RDI_0))
|
|
|
|
+ cfg1 |= 1 << path_reg->pix_store_en_shift_val;
|
|
|
|
+
|
|
cam_io_w_mb(cfg1, mem_base + path_reg->cfg1_addr);
|
|
cam_io_w_mb(cfg1, mem_base + path_reg->cfg1_addr);
|
|
|
|
|
|
/* set frame drop pattern to 0 and period to 1 */
|
|
/* set frame drop pattern to 0 and period to 1 */
|
|
@@ -2784,8 +2893,10 @@ static int cam_ife_csid_ver2_init_config_rdi_path(
|
|
path_reg->err_recovery_cfg0_addr);
|
|
path_reg->err_recovery_cfg0_addr);
|
|
}
|
|
}
|
|
|
|
|
|
- if (csid_hw->flags.sfe_inline_shdr)
|
|
|
|
- cam_ife_csid_ver2_shdr_cfg(csid_hw, res->res_id);
|
|
|
|
|
|
+ if (path_cfg->sfe_shdr ||
|
|
|
|
+ (csid_hw->flags.rdi_lcr_en &&
|
|
|
|
+ res->res_id == CAM_IFE_PIX_PATH_RES_RDI_0))
|
|
|
|
+ cam_ife_csid_ver2_res_master_slave_cfg(csid_hw, res->res_id);
|
|
|
|
|
|
if (csid_hw->debug_info.debug_val &
|
|
if (csid_hw->debug_info.debug_val &
|
|
CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
|
|
CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
|
|
@@ -2860,6 +2971,9 @@ static int cam_ife_csid_ver2_init_config_pxl_path(
|
|
(cmn_reg->timestamp_strobe_val <<
|
|
(cmn_reg->timestamp_strobe_val <<
|
|
cmn_reg->timestamp_stb_sel_shift_val);
|
|
cmn_reg->timestamp_stb_sel_shift_val);
|
|
|
|
|
|
|
|
+ if (cam_ife_csid_ver2_disable_sof_retime(csid_hw, res))
|
|
|
|
+ cfg0 |= 1 << path_reg->sof_retiming_dis_shift;
|
|
|
|
+
|
|
CAM_DBG(CAM_ISP, "CSID[%d] res:%d cfg0_addr 0x%x",
|
|
CAM_DBG(CAM_ISP, "CSID[%d] res:%d cfg0_addr 0x%x",
|
|
csid_hw->hw_intf->hw_idx, res->res_id, cfg0);
|
|
csid_hw->hw_intf->hw_idx, res->res_id, cfg0);
|
|
|
|
|
|
@@ -2971,6 +3085,9 @@ static int cam_ife_csid_ver2_init_config_pxl_path(
|
|
mem_base + path_reg->format_measure_cfg0_addr);
|
|
mem_base + path_reg->format_measure_cfg0_addr);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (csid_hw->flags.rdi_lcr_en && res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
|
|
|
|
+ cam_ife_csid_ver2_res_master_slave_cfg(csid_hw, res->res_id);
|
|
|
|
+
|
|
res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
|
|
res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
@@ -3069,18 +3186,23 @@ static int cam_ife_csid_ver2_program_rdi_path(
|
|
}
|
|
}
|
|
|
|
|
|
if ((csid_hw->flags.offline_mode ||
|
|
if ((csid_hw->flags.offline_mode ||
|
|
- csid_hw->flags.sfe_inline_shdr) &&
|
|
|
|
|
|
+ path_cfg->sfe_shdr) &&
|
|
(res->res_id == CAM_IFE_PIX_PATH_RES_RDI_0)) {
|
|
(res->res_id == CAM_IFE_PIX_PATH_RES_RDI_0)) {
|
|
val |= path_reg->camif_irq_mask;
|
|
val |= path_reg->camif_irq_mask;
|
|
path_cfg->handle_camif_irq = true;
|
|
path_cfg->handle_camif_irq = true;
|
|
}
|
|
}
|
|
|
|
|
|
- /* Currently CAMIF SOF is the secondary evt enabled for HW mgr */
|
|
|
|
- if (path_cfg->en_secondary_evt) {
|
|
|
|
- val |= IFE_CSID_VER2_PATH_CAMIF_SOF;
|
|
|
|
|
|
+ /* Enable secondary events dictated by HW mgr for RDI paths */
|
|
|
|
+ if (path_cfg->sec_evt_config.en_secondary_evt) {
|
|
|
|
+ if (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_SOF)
|
|
|
|
+ val |= IFE_CSID_VER2_PATH_CAMIF_SOF;
|
|
|
|
+
|
|
|
|
+ if (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_EPOCH)
|
|
|
|
+ val |= IFE_CSID_VER2_PATH_CAMIF_EPOCH0;
|
|
|
|
+
|
|
CAM_DBG(CAM_ISP,
|
|
CAM_DBG(CAM_ISP,
|
|
- "Enable camif SOF irq for res: %s",
|
|
|
|
- res->res_name);
|
|
|
|
|
|
+ "Enable camif: %d evt irq for res: %s",
|
|
|
|
+ path_cfg->sec_evt_config.evt_type, res->res_name);
|
|
}
|
|
}
|
|
|
|
|
|
res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
|
|
res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
|
|
@@ -3669,7 +3791,6 @@ err:
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
static int cam_ife_csid_ver2_program_top(
|
|
static int cam_ife_csid_ver2_program_top(
|
|
struct cam_ife_csid_ver2_hw *csid_hw)
|
|
struct cam_ife_csid_ver2_hw *csid_hw)
|
|
{
|
|
{
|
|
@@ -3711,6 +3832,8 @@ static int cam_ife_csid_ver2_program_top(
|
|
val |= csid_hw->top_cfg.out_ife_en <<
|
|
val |= csid_hw->top_cfg.out_ife_en <<
|
|
top_reg->out_ife_en_shift_val;
|
|
top_reg->out_ife_en_shift_val;
|
|
|
|
|
|
|
|
+ val |= csid_hw->top_cfg.rdi_lcr;
|
|
|
|
+
|
|
cam_io_w_mb(val,
|
|
cam_io_w_mb(val,
|
|
soc_info->reg_map[CAM_IFE_CSID_TOP_MEM_BASE_ID].mem_base +
|
|
soc_info->reg_map[CAM_IFE_CSID_TOP_MEM_BASE_ID].mem_base +
|
|
top_reg->io_path_cfg0_addr[csid_hw->hw_intf->hw_idx]);
|
|
top_reg->io_path_cfg0_addr[csid_hw->hw_intf->hw_idx]);
|
|
@@ -4034,6 +4157,7 @@ static int cam_ife_csid_ver2_disable_core(
|
|
spin_lock_bh(&csid_hw->lock_state);
|
|
spin_lock_bh(&csid_hw->lock_state);
|
|
csid_hw->flags.device_enabled = false;
|
|
csid_hw->flags.device_enabled = false;
|
|
csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
|
|
csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
|
|
|
|
+ csid_hw->flags.rdi_lcr_en = false;
|
|
spin_unlock_bh(&csid_hw->lock_state);
|
|
spin_unlock_bh(&csid_hw->lock_state);
|
|
rc = cam_ife_csid_disable_soc_resources(soc_info);
|
|
rc = cam_ife_csid_disable_soc_resources(soc_info);
|
|
if (rc)
|
|
if (rc)
|
|
@@ -4041,7 +4165,6 @@ static int cam_ife_csid_ver2_disable_core(
|
|
csid_hw->hw_intf->hw_idx);
|
|
csid_hw->hw_intf->hw_idx);
|
|
|
|
|
|
csid_hw->counters.error_irq_count = 0;
|
|
csid_hw->counters.error_irq_count = 0;
|
|
-
|
|
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4196,13 +4319,20 @@ int cam_ife_csid_ver2_start(void *hw_priv, void *args,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- /* Configure RUP/AUP/MUP @ streamon for all paths together */
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Configure RUP/AUP/MUP @ streamon for all enabled paths
|
|
|
|
+ * For internal recovery - skip this, CDM packet corresponding
|
|
|
|
+ * to the request being recovered will apply the appropriate RUP/AUP/MUP
|
|
|
|
+ */
|
|
rup_aup_mask |= (csid_hw->rx_cfg.mup << csid_reg->cmn_reg->mup_shift_val);
|
|
rup_aup_mask |= (csid_hw->rx_cfg.mup << csid_reg->cmn_reg->mup_shift_val);
|
|
- cam_io_w_mb(rup_aup_mask,
|
|
|
|
- soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base +
|
|
|
|
- csid_reg->cmn_reg->rup_aup_cmd_addr);
|
|
|
|
- CAM_DBG(CAM_ISP, "CSID:%u RUP_AUP_MUP: 0x%x at start",
|
|
|
|
- csid_hw->hw_intf->hw_idx, rup_aup_mask);
|
|
|
|
|
|
+ if (!start_args->is_internal_start)
|
|
|
|
+ cam_io_w_mb(rup_aup_mask,
|
|
|
|
+ soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base +
|
|
|
|
+ csid_reg->cmn_reg->rup_aup_cmd_addr);
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%u RUP_AUP_MUP: 0x%x at start updated: %s",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, rup_aup_mask,
|
|
|
|
+ CAM_BOOL_TO_YESNO(!start_args->is_internal_start));
|
|
|
|
|
|
cam_ife_csid_ver2_enable_csi2(csid_hw);
|
|
cam_ife_csid_ver2_enable_csi2(csid_hw);
|
|
|
|
|
|
@@ -4214,6 +4344,7 @@ int cam_ife_csid_ver2_start(void *hw_priv, void *args,
|
|
cam_ife_csid_ver2_enable_path(csid_hw, res);
|
|
cam_ife_csid_ver2_enable_path(csid_hw, res);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ csid_hw->flags.reset_awaited = false;
|
|
end:
|
|
end:
|
|
mutex_unlock(&csid_hw->hw_info->hw_mutex);
|
|
mutex_unlock(&csid_hw->hw_info->hw_mutex);
|
|
return rc;
|
|
return rc;
|
|
@@ -4256,6 +4387,7 @@ int cam_ife_csid_ver2_stop(void *hw_priv,
|
|
csid_stop->num_res);
|
|
csid_stop->num_res);
|
|
|
|
|
|
csid_hw->flags.device_enabled = false;
|
|
csid_hw->flags.device_enabled = false;
|
|
|
|
+ csid_hw->flags.rdi_lcr_en = false;
|
|
|
|
|
|
reset.reset_type = (csid_hw->flags.fatal_err_detected) ? CAM_IFE_CSID_RESET_GLOBAL :
|
|
reset.reset_type = (csid_hw->flags.fatal_err_detected) ? CAM_IFE_CSID_RESET_GLOBAL :
|
|
CAM_IFE_CSID_RESET_PATH;
|
|
CAM_IFE_CSID_RESET_PATH;
|
|
@@ -4487,12 +4619,18 @@ static int cam_ife_csid_ver2_reg_update(
|
|
reg_val_pair[0] = csid_reg->cmn_reg->rup_aup_cmd_addr;
|
|
reg_val_pair[0] = csid_reg->cmn_reg->rup_aup_cmd_addr;
|
|
reg_val_pair[1] = rup_aup_mask;
|
|
reg_val_pair[1] = rup_aup_mask;
|
|
|
|
|
|
- reg_val_pair[1] |= csid_hw->rx_cfg.mup <<
|
|
|
|
- csid_reg->cmn_reg->mup_shift_val;
|
|
|
|
|
|
+ /* If not an actual request, configure last applied MUP */
|
|
|
|
+ if (rup_args->reg_write)
|
|
|
|
+ reg_val_pair[1] |= (rup_args->last_applied_mup <<
|
|
|
|
+ csid_reg->cmn_reg->mup_shift_val);
|
|
|
|
+ else
|
|
|
|
+ reg_val_pair[1] |= (csid_hw->rx_cfg.mup <<
|
|
|
|
+ csid_reg->cmn_reg->mup_shift_val);
|
|
|
|
|
|
- CAM_DBG(CAM_ISP, "CSID:%d reg_update_cmd 0x%X offset 0x%X",
|
|
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d configure rup_aup_mup: 0x%x offset: 0x%x via %s",
|
|
csid_hw->hw_intf->hw_idx,
|
|
csid_hw->hw_intf->hw_idx,
|
|
- reg_val_pair[1], reg_val_pair[0]);
|
|
|
|
|
|
+ reg_val_pair[1], reg_val_pair[0],
|
|
|
|
+ (rup_args->reg_write ? "AHB" : "CDM"));
|
|
|
|
|
|
if (rup_args->reg_write) {
|
|
if (rup_args->reg_write) {
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
@@ -4876,6 +5014,59 @@ end:
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int cam_ife_csid_ver2_rdi_lcr_cfg(
|
|
|
|
+ struct cam_ife_csid_ver2_hw *csid_hw, void *cmd_args)
|
|
|
|
+{
|
|
|
|
+ const struct cam_ife_csid_ver2_path_reg_info *path_reg;
|
|
|
|
+ struct cam_ife_csid_ver2_reg_info *csid_reg;
|
|
|
|
+ struct cam_ife_csid_ver2_path_cfg *path_cfg = NULL;
|
|
|
|
+ struct cam_isp_resource_node *res = cmd_args;
|
|
|
|
+
|
|
|
|
+ if (!csid_hw || !cmd_args) {
|
|
|
|
+ CAM_ERR(CAM_ISP, "Invalid params");
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ csid_reg = (struct cam_ife_csid_ver2_reg_info *)
|
|
|
|
+ csid_hw->core_info->csid_reg;
|
|
|
|
+
|
|
|
|
+ path_reg = csid_reg->path_reg[res->res_id];
|
|
|
|
+ path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv;
|
|
|
|
+ if (!path_cfg || !path_reg || !path_reg->capabilities ||
|
|
|
|
+ !(path_reg->capabilities & CAM_IFE_CSID_CAP_INPUT_LCR)) {
|
|
|
|
+ CAM_ERR(CAM_ISP, "Invalid res %s", res->res_name);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!path_cfg->sfe_shdr && (res->res_id != CAM_IFE_PIX_PATH_RES_RDI_0)) {
|
|
|
|
+ CAM_ERR(CAM_ISP, "Invalid res: %s, capabilities 0x%x sfe_shdr: %u",
|
|
|
|
+ res->res_name, path_reg->capabilities, path_cfg->sfe_shdr);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * LCR should not be on for a resource if CSID is giving packed data
|
|
|
|
+ * this case would come for formats which are not supported
|
|
|
|
+ * */
|
|
|
|
+ if (path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].packing_fmt) {
|
|
|
|
+ CAM_ERR(CAM_ISP, "LCR enabled for %s, csid out packed not supported",
|
|
|
|
+ res->res_name);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (csid_hw->flags.sfe_en)
|
|
|
|
+ csid_hw->top_cfg.rdi_lcr |= BIT(res->res_id) <<
|
|
|
|
+ csid_reg->top_reg->rdi_lcr_shift_val;
|
|
|
|
+
|
|
|
|
+ csid_hw->flags.rdi_lcr_en = true;
|
|
|
|
+ path_cfg->lcr_en = true;
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID[%u] %s top_cfg %u",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res->res_name, csid_hw->top_cfg.rdi_lcr);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
static int cam_ife_csid_ver2_process_cmd(void *hw_priv,
|
|
static int cam_ife_csid_ver2_process_cmd(void *hw_priv,
|
|
uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
|
|
uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
|
|
{
|
|
{
|
|
@@ -4953,6 +5144,9 @@ static int cam_ife_csid_ver2_process_cmd(void *hw_priv,
|
|
case CAM_ISP_HW_CMD_CSID_DISCARD_INIT_FRAMES:
|
|
case CAM_ISP_HW_CMD_CSID_DISCARD_INIT_FRAMES:
|
|
rc = cam_ife_csid_ver2_set_discard_frame_cfg(csid_hw, cmd_args);
|
|
rc = cam_ife_csid_ver2_set_discard_frame_cfg(csid_hw, cmd_args);
|
|
break;
|
|
break;
|
|
|
|
+ case CAM_ISP_HW_CMD_RDI_LCR_CFG:
|
|
|
|
+ rc = cam_ife_csid_ver2_rdi_lcr_cfg(csid_hw, cmd_args);
|
|
|
|
+ break;
|
|
default:
|
|
default:
|
|
CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d",
|
|
CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d",
|
|
csid_hw->hw_intf->hw_idx, cmd_type);
|
|
csid_hw->hw_intf->hw_idx, cmd_type);
|