|
@@ -225,6 +225,153 @@ static int cam_tfe_match_vc_dt_pair(int32_t *vc, uint32_t *dt,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void cam_tfe_csid_enable_path_for_init_frame_drop(
|
|
|
|
+ struct cam_tfe_csid_hw *csid_hw,
|
|
|
|
+ int res_id)
|
|
|
|
+{
|
|
|
|
+ struct cam_tfe_csid_path_cfg *path_data;
|
|
|
|
+ const struct cam_tfe_csid_pxl_reg_offset *pxl_reg = NULL;
|
|
|
|
+ const struct cam_tfe_csid_rdi_reg_offset *rdi_reg = NULL;
|
|
|
|
+ const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
|
|
+ struct cam_hw_soc_info *soc_info;
|
|
|
|
+ struct cam_isp_resource_node *res;
|
|
|
|
+ uint32_t val;
|
|
|
|
+
|
|
|
|
+ if (!csid_hw) {
|
|
|
|
+ CAM_WARN(CAM_ISP, "csid_hw cannot be NULL");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ csid_reg = csid_hw->csid_info->csid_reg;
|
|
|
|
+ soc_info = &csid_hw->hw_info->soc_info;
|
|
|
|
+
|
|
|
|
+ if (res_id == CAM_TFE_CSID_PATH_RES_IPP) {
|
|
|
|
+ res = &csid_hw->ipp_res;
|
|
|
|
+ pxl_reg = csid_reg->ipp_reg;
|
|
|
|
+ } else if (res_id >= CAM_TFE_CSID_PATH_RES_RDI_0 &&
|
|
|
|
+ res_id <= CAM_TFE_CSID_PATH_RES_RDI_2) {
|
|
|
|
+ res = &csid_hw->rdi_res[res_id];
|
|
|
|
+ rdi_reg = csid_reg->rdi_reg[res_id];
|
|
|
|
+ } else {
|
|
|
|
+ CAM_ERR(CAM_ISP, "Invalid res_id");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *)res->res_priv;
|
|
|
|
+
|
|
|
|
+ if (!path_data || !path_data->init_frame_drop)
|
|
|
|
+ return;
|
|
|
|
+ if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ path_data->res_sof_cnt++;
|
|
|
|
+ if ((path_data->res_sof_cnt + 1) <
|
|
|
|
+ path_data->res_sof_cnt) {
|
|
|
|
+ CAM_WARN(CAM_ISP, "Res %d sof count overflow %d",
|
|
|
|
+ res_id, path_data->res_sof_cnt);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d res_id %d SOF cnt:%d init_frame_drop:%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, res_id, path_data->res_sof_cnt,
|
|
|
|
+ path_data->init_frame_drop);
|
|
|
|
+
|
|
|
|
+ if ((path_data->res_sof_cnt ==
|
|
|
|
+ path_data->init_frame_drop) &&
|
|
|
|
+ pxl_reg) {
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d Enabling pixel IPP Path",
|
|
|
|
+ csid_hw->hw_intf->hw_idx);
|
|
|
|
+ if (path_data->sync_mode !=
|
|
|
|
+ CAM_ISP_HW_SYNC_SLAVE) {
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ pxl_reg->csid_pxl_ctrl_addr);
|
|
|
|
+ val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY;
|
|
|
|
+ cam_io_w_mb(val,
|
|
|
|
+ soc_info->reg_map[0].mem_base +
|
|
|
|
+ pxl_reg->csid_pxl_ctrl_addr);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!(csid_hw->csid_debug &
|
|
|
|
+ TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ pxl_reg->csid_pxl_irq_mask_addr);
|
|
|
|
+ val &= ~(TFE_CSID_PATH_INFO_INPUT_SOF);
|
|
|
|
+ cam_io_w_mb(val,
|
|
|
|
+ soc_info->reg_map[0].mem_base +
|
|
|
|
+ pxl_reg->csid_pxl_irq_mask_addr);
|
|
|
|
+ }
|
|
|
|
+ } else if ((path_data->res_sof_cnt ==
|
|
|
|
+ path_data->init_frame_drop) && rdi_reg) {
|
|
|
|
+ CAM_DBG(CAM_ISP, "Enabling RDI %d Path", res_id);
|
|
|
|
+ cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
|
|
|
|
+ soc_info->reg_map[0].mem_base +
|
|
|
|
+ rdi_reg->csid_rdi_ctrl_addr);
|
|
|
|
+ if (!(csid_hw->csid_debug &
|
|
|
|
+ TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
|
|
|
|
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ rdi_reg->csid_rdi_irq_mask_addr);
|
|
|
|
+ val &= ~(TFE_CSID_PATH_INFO_INPUT_SOF);
|
|
|
|
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
|
|
|
|
+ rdi_reg->csid_rdi_irq_mask_addr);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool cam_tfe_csid_check_path_active(struct cam_tfe_csid_hw *csid_hw)
|
|
|
|
+{
|
|
|
|
+ const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
|
|
+ struct cam_hw_soc_info *soc_info;
|
|
|
|
+ uint32_t i;
|
|
|
|
+ uint32_t path_status = 1;
|
|
|
|
+
|
|
|
|
+ csid_reg = csid_hw->csid_info->csid_reg;
|
|
|
|
+ soc_info = &csid_hw->hw_info->soc_info;
|
|
|
|
+
|
|
|
|
+ /* check the IPP path status */
|
|
|
|
+ if (csid_reg->cmn_reg->num_pix) {
|
|
|
|
+ path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->ipp_reg->csid_pxl_status_addr);
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d IPP path status:%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, path_status);
|
|
|
|
+ /* if status is 0 then it is active */
|
|
|
|
+ if (!path_status)
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Check the RDI path status */
|
|
|
|
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
|
|
+ path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->rdi_reg[i]->csid_rdi_status_addr);
|
|
|
|
+ CAM_DBG(CAM_ISP, "CSID:%d RDI:%d path status:%d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx, i, path_status);
|
|
|
|
+ /* if status is 0 then it is active */
|
|
|
|
+ if (!path_status)
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+end:
|
|
|
|
+ /* if status is 0 then path is active */
|
|
|
|
+ return path_status ? false : true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cam_tfe_csid_reset_path_data(
|
|
|
|
+ struct cam_tfe_csid_hw *csid_hw,
|
|
|
|
+ struct cam_isp_resource_node *res)
|
|
|
|
+{
|
|
|
|
+ struct cam_tfe_csid_path_cfg *path_data = NULL;
|
|
|
|
+
|
|
|
|
+ if (!csid_hw || !res) {
|
|
|
|
+ CAM_WARN(CAM_ISP, "csid_hw or res cannot be NULL");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ path_data = res->res_priv;
|
|
|
|
+
|
|
|
|
+ if (path_data) {
|
|
|
|
+ path_data->init_frame_drop = 0;
|
|
|
|
+ path_data->res_sof_cnt = 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
static int cam_tfe_csid_cid_get(struct cam_tfe_csid_hw *csid_hw,
|
|
static int cam_tfe_csid_cid_get(struct cam_tfe_csid_hw *csid_hw,
|
|
int32_t *vc, uint32_t *dt, uint32_t num_valid_vc_dt, uint32_t *cid)
|
|
int32_t *vc, uint32_t *dt, uint32_t num_valid_vc_dt, uint32_t *cid)
|
|
{
|
|
{
|
|
@@ -276,6 +423,7 @@ static int cam_tfe_csid_global_reset(struct cam_tfe_csid_hw *csid_hw)
|
|
{
|
|
{
|
|
struct cam_hw_soc_info *soc_info;
|
|
struct cam_hw_soc_info *soc_info;
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
|
|
+ struct cam_tfe_csid_path_cfg *path_data = NULL;
|
|
int rc = 0;
|
|
int rc = 0;
|
|
uint32_t val = 0, i;
|
|
uint32_t val = 0, i;
|
|
uint32_t status;
|
|
uint32_t status;
|
|
@@ -377,6 +525,18 @@ static int cam_tfe_csid_global_reset(struct cam_tfe_csid_hw *csid_hw)
|
|
csid_hw->error_irq_count = 0;
|
|
csid_hw->error_irq_count = 0;
|
|
csid_hw->prev_boot_timestamp = 0;
|
|
csid_hw->prev_boot_timestamp = 0;
|
|
|
|
|
|
|
|
+ if (csid_hw->pxl_pipe_enable) {
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *)
|
|
|
|
+ csid_hw->ipp_res.res_priv;
|
|
|
|
+ path_data->res_sof_cnt = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *)
|
|
|
|
+ csid_hw->rdi_res[i].res_priv;
|
|
|
|
+ path_data->res_sof_cnt = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -712,6 +872,7 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
|
|
path_data->height = reserve->in_port->height;
|
|
path_data->height = reserve->in_port->height;
|
|
path_data->start_line = reserve->in_port->line_start;
|
|
path_data->start_line = reserve->in_port->line_start;
|
|
path_data->end_line = reserve->in_port->line_end;
|
|
path_data->end_line = reserve->in_port->line_end;
|
|
|
|
+ path_data->usage_type = reserve->in_port->usage_type;
|
|
|
|
|
|
path_data->bayer_bin = reserve->in_port->bayer_bin;
|
|
path_data->bayer_bin = reserve->in_port->bayer_bin;
|
|
path_data->qcfa_bin = reserve->in_port->qcfa_bin;
|
|
path_data->qcfa_bin = reserve->in_port->qcfa_bin;
|
|
@@ -913,7 +1074,8 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw *csid_hw)
|
|
{
|
|
{
|
|
int rc = 0;
|
|
int rc = 0;
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
- struct cam_hw_soc_info *soc_info;
|
|
|
|
|
|
+ struct cam_hw_soc_info *soc_info;
|
|
|
|
+ struct cam_tfe_csid_path_cfg *path_data = NULL;
|
|
uint32_t i, val, clk_lvl;
|
|
uint32_t i, val, clk_lvl;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
@@ -994,6 +1156,19 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw *csid_hw)
|
|
spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
|
|
spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
|
|
cam_tasklet_start(csid_hw->tasklet);
|
|
cam_tasklet_start(csid_hw->tasklet);
|
|
|
|
|
|
|
|
+ if (csid_hw->pxl_pipe_enable ) {
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *)
|
|
|
|
+ csid_hw->ipp_res.res_priv;
|
|
|
|
+ path_data->res_sof_cnt = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *)
|
|
|
|
+ csid_hw->rdi_res[i].res_priv;
|
|
|
|
+ path_data->res_sof_cnt = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
return rc;
|
|
return rc;
|
|
|
|
|
|
disable_soc:
|
|
disable_soc:
|
|
@@ -1651,11 +1826,14 @@ static int cam_tfe_csid_enable_rdi_path(
|
|
{
|
|
{
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
struct cam_hw_soc_info *soc_info;
|
|
struct cam_hw_soc_info *soc_info;
|
|
|
|
+ struct cam_tfe_csid_path_cfg *path_data;
|
|
uint32_t id, val;
|
|
uint32_t id, val;
|
|
|
|
+ bool path_active = false;
|
|
|
|
|
|
csid_reg = csid_hw->csid_info->csid_reg;
|
|
csid_reg = csid_hw->csid_info->csid_reg;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
id = res->res_id;
|
|
id = res->res_id;
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv;
|
|
|
|
|
|
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
|
|
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
|
|
res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2 ||
|
|
res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2 ||
|
|
@@ -1667,10 +1845,30 @@ static int cam_tfe_csid_enable_rdi_path(
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- /* resume at frame boundary */
|
|
|
|
- cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
|
|
|
|
- soc_info->reg_map[0].mem_base +
|
|
|
|
- csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
|
|
|
|
|
|
+ /*Drop one frame extra on RDI for dual TFE use case */
|
|
|
|
+ if (path_data->usage_type == CAM_ISP_TFE_IN_RES_USAGE_DUAL)
|
|
|
|
+ path_data->init_frame_drop = 1;
|
|
|
|
+
|
|
|
|
+ /*resume at frame boundary */
|
|
|
|
+ if (!path_data->init_frame_drop) {
|
|
|
|
+ CAM_DBG(CAM_ISP, "Start RDI:%d path", id);
|
|
|
|
+ /* resume at frame boundary */
|
|
|
|
+ cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
|
|
|
|
+ soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
|
|
|
|
+ } else {
|
|
|
|
+ path_active = cam_tfe_csid_check_path_active(csid_hw);
|
|
|
|
+ if (path_active)
|
|
|
|
+ cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
|
|
|
|
+ soc_info->reg_map[0].mem_base +
|
|
|
|
+ csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
|
+ "CSID:%d %s RDI:%d path frame drop %d",
|
|
|
|
+ csid_hw->hw_intf->hw_idx,
|
|
|
|
+ path_active ? "Starting" : "Not Starting", id,
|
|
|
|
+ path_data->init_frame_drop);
|
|
|
|
+ }
|
|
|
|
|
|
/* Enable the required RDI interrupts */
|
|
/* Enable the required RDI interrupts */
|
|
val = TFE_CSID_PATH_INFO_RST_DONE | TFE_CSID_PATH_ERROR_FIFO_OVERFLOW |
|
|
val = TFE_CSID_PATH_INFO_RST_DONE | TFE_CSID_PATH_ERROR_FIFO_OVERFLOW |
|
|
@@ -1682,7 +1880,8 @@ static int cam_tfe_csid_enable_rdi_path(
|
|
TFE_CSID_PATH_ERROR_LINE_COUNT;
|
|
TFE_CSID_PATH_ERROR_LINE_COUNT;
|
|
}
|
|
}
|
|
|
|
|
|
- if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)
|
|
|
|
|
|
+ if ((csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ) ||
|
|
|
|
+ (path_data->init_frame_drop && !path_active))
|
|
val |= TFE_CSID_PATH_INFO_INPUT_SOF;
|
|
val |= TFE_CSID_PATH_INFO_INPUT_SOF;
|
|
if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)
|
|
if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)
|
|
val |= TFE_CSID_PATH_INFO_INPUT_EOF;
|
|
val |= TFE_CSID_PATH_INFO_INPUT_EOF;
|
|
@@ -1704,10 +1903,12 @@ static int cam_tfe_csid_disable_rdi_path(
|
|
uint32_t id, val = 0;
|
|
uint32_t id, val = 0;
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
const struct cam_tfe_csid_reg_offset *csid_reg;
|
|
struct cam_hw_soc_info *soc_info;
|
|
struct cam_hw_soc_info *soc_info;
|
|
|
|
+ struct cam_tfe_csid_path_cfg *path_data;
|
|
|
|
|
|
csid_reg = csid_hw->csid_info->csid_reg;
|
|
csid_reg = csid_hw->csid_info->csid_reg;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
soc_info = &csid_hw->hw_info->soc_info;
|
|
id = res->res_id;
|
|
id = res->res_id;
|
|
|
|
+ path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv;
|
|
|
|
|
|
if ((res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2) ||
|
|
if ((res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2) ||
|
|
(!csid_reg->rdi_reg[res->res_id])) {
|
|
(!csid_reg->rdi_reg[res->res_id])) {
|
|
@@ -1743,6 +1944,9 @@ static int cam_tfe_csid_disable_rdi_path(
|
|
CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
|
|
CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
|
|
csid_hw->hw_intf->hw_idx, res->res_id);
|
|
csid_hw->hw_intf->hw_idx, res->res_id);
|
|
|
|
|
|
|
|
+ path_data->init_frame_drop = 0;
|
|
|
|
+ path_data->res_sof_cnt = 0;
|
|
|
|
+
|
|
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
|
csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
|
|
csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
|
|
|
|
|
|
@@ -2111,6 +2315,8 @@ static int cam_tfe_csid_release(void *hw_priv,
|
|
|
|
|
|
res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
|
|
res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
|
|
|
|
|
|
|
|
+ cam_tfe_csid_reset_path_data(csid_hw, res);
|
|
|
|
+
|
|
end:
|
|
end:
|
|
mutex_unlock(&csid_hw->hw_info->hw_mutex);
|
|
mutex_unlock(&csid_hw->hw_info->hw_mutex);
|
|
return rc;
|
|
return rc;
|
|
@@ -3406,6 +3612,9 @@ handle_fatal_error:
|
|
complete(&csid_hw->csid_rdin_complete[i]);
|
|
complete(&csid_hw->csid_rdin_complete[i]);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (irq_status[i] & TFE_CSID_PATH_INFO_INPUT_SOF)
|
|
|
|
+ cam_tfe_csid_enable_path_for_init_frame_drop(csid_hw, i);
|
|
|
|
+
|
|
if ((irq_status[i] & TFE_CSID_PATH_INFO_INPUT_SOF) &&
|
|
if ((irq_status[i] & TFE_CSID_PATH_INFO_INPUT_SOF) &&
|
|
(csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
|
|
(csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
|
|
if (!csid_hw->sof_irq_triggered)
|
|
if (!csid_hw->sof_irq_triggered)
|