Merge "msm: camera: isp: Added CSID recovery mechanism" into camera-kernel.lnx.4.0

This commit is contained in:
Camera Software Integration
2020-07-22 15:35:37 -07:00
committed by Gerrit - the friendly Code Review server
8 changed files with 505 additions and 99 deletions

View File

@@ -2036,8 +2036,14 @@ end:
if (notify.error == CRM_KMD_ERR_FATAL) {
req_msg.session_hdl = ctx_isp->base->session_hdl;
req_msg.u.err_msg.device_hdl = ctx_isp->base->dev_hdl;
req_msg.u.err_msg.error_type =
CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
if (error_type == CAM_ISP_HW_ERROR_CSID_FATAL)
req_msg.u.err_msg.error_type =
CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
else
req_msg.u.err_msg.error_type =
CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
req_msg.u.err_msg.link_hdl = ctx_isp->base->link_hdl;
req_msg.u.err_msg.request_id = error_request_id;
req_msg.u.err_msg.resource_size = 0x0;

View File

@@ -1909,6 +1909,8 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_pxl(
csid_acquire.in_port = in_port;
csid_acquire.out_port = in_port->data;
csid_acquire.node_res = NULL;
csid_acquire.event_cb = cam_ife_hw_mgr_event_handler;
csid_acquire.priv = ife_ctx;
csid_acquire.crop_enable = crop_enable;
csid_acquire.drop_enable = false;
@@ -2048,6 +2050,8 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
csid_acquire.in_port = in_port;
csid_acquire.out_port = out_port;
csid_acquire.node_res = NULL;
csid_acquire.event_cb = cam_ife_hw_mgr_event_handler;
csid_acquire.priv = ife_ctx;
/*
* Enable RDI pixel drop by default. CSID will enable only for
@@ -7327,6 +7331,12 @@ static int cam_ife_hw_mgr_find_affected_ctx(
affected_core, CAM_IFE_HW_NUM_MAX))
continue;
if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending)) {
CAM_INFO(CAM_ISP, "CTX:%d already error reported",
ife_hwr_mgr_ctx->ctx_index);
continue;
}
atomic_set(&ife_hwr_mgr_ctx->overflow_pending, 1);
notify_err_cb = ife_hwr_mgr_ctx->common.event_cb[event_type];
@@ -7342,8 +7352,13 @@ static int cam_ife_hw_mgr_find_affected_ctx(
* In the call back function corresponding ISP context
* will update CRM about fatal Error
*/
notify_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
CAM_ISP_HW_EVENT_ERROR, error_event_data);
if (notify_err_cb)
notify_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
CAM_ISP_HW_EVENT_ERROR, error_event_data);
else {
CAM_WARN(CAM_ISP, "Error call back is not set");
goto end;
}
}
/* fill the affected_core in recovery data */
@@ -7352,7 +7367,7 @@ static int cam_ife_hw_mgr_find_affected_ctx(
CAM_DBG(CAM_ISP, "Vfe core %d is affected (%d)",
i, recovery_data->affected_core[i]);
}
end:
return 0;
}
@@ -7414,6 +7429,33 @@ static int cam_ife_hw_mgr_handle_hw_dump_info(
return rc;
}
static int cam_ife_hw_mgr_handle_csid_event(
struct cam_isp_hw_event_info *event_info)
{
struct cam_isp_hw_error_event_data error_event_data = {0};
struct cam_ife_hw_event_recovery_data recovery_data = {0};
/* this can be extended based on the types of error
* received from CSID
*/
switch (event_info->err_type) {
case CAM_ISP_HW_ERROR_CSID_FATAL: {
if (!g_ife_hw_mgr.debug_cfg.enable_csid_recovery)
break;
error_event_data.error_type = event_info->err_type;
cam_ife_hw_mgr_find_affected_ctx(&error_event_data,
event_info->hw_idx,
&recovery_data);
break;
}
default:
break;
}
return 0;
}
static int cam_ife_hw_mgr_handle_hw_err(
void *ctx,
void *evt_info)
@@ -7444,6 +7486,13 @@ static int cam_ife_hw_mgr_handle_hw_err(
else if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT)
error_event_data.error_type = CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
spin_lock(&g_ife_hw_mgr.ctx_lock);
if (event_info->err_type == CAM_ISP_HW_ERROR_CSID_FATAL) {
rc = cam_ife_hw_mgr_handle_csid_event(event_info);
spin_unlock(&g_ife_hw_mgr.ctx_lock);
return rc;
}
core_idx = event_info->hw_idx;
if (g_ife_hw_mgr.debug_cfg.enable_recovery)
@@ -7454,6 +7503,8 @@ static int cam_ife_hw_mgr_handle_hw_err(
rc = cam_ife_hw_mgr_find_affected_ctx(&error_event_data,
core_idx, &recovery_data);
if ((rc != 0) || !(recovery_data.no_of_context))
goto end;
if (event_info->err_type == CAM_VFE_IRQ_STATUS_VIOLATION)
recovery_data.error_type = CAM_ISP_HW_ERROR_VIOLATION;
@@ -7461,7 +7512,8 @@ static int cam_ife_hw_mgr_handle_hw_err(
recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
cam_ife_hw_mgr_do_error_recovery(&recovery_data);
end:
spin_unlock(&g_ife_hw_mgr.ctx_lock);
return rc;
}
@@ -7849,6 +7901,9 @@ static int cam_ife_hw_mgr_debug_register(void)
dbgfileptr = debugfs_create_bool("enable_req_dump", 0644,
g_ife_hw_mgr.debug_cfg.dentry,
&g_ife_hw_mgr.debug_cfg.enable_req_dump);
dbgfileptr = debugfs_create_u32("enable_csid_recovery", 0644,
g_ife_hw_mgr.debug_cfg.dentry,
&g_ife_hw_mgr.debug_cfg.enable_csid_recovery);
dbgfileptr = debugfs_create_file("ife_camif_debug", 0644,
g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_camif_debug);
dbgfileptr = debugfs_create_bool("per_req_reg_dump", 0644,
@@ -7881,6 +7936,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
mutex_init(&g_ife_hw_mgr.ctx_mutex);
spin_lock_init(&g_ife_hw_mgr.ctx_lock);
if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
CAM_ERR(CAM_ISP, "CSID num is different then IFE num");

View File

@@ -38,6 +38,7 @@ enum cam_ife_res_master_slave {
* @dentry: Debugfs entry
* @csid_debug: csid debug information
* @enable_recovery: enable recovery
* @enable_csid_recovery: enable csid recovery
* @enable_diag_sensor_status: enable sensor diagnosis status
* @enable_req_dump: Enable request dump on HW errors
* @per_req_reg_dump: Enable per request reg dump
@@ -47,6 +48,7 @@ struct cam_ife_hw_mgr_debug {
struct dentry *dentry;
uint64_t csid_debug;
uint32_t enable_recovery;
uint32_t enable_csid_recovery;
uint32_t camif_debug;
bool enable_req_dump;
bool per_req_reg_dump;
@@ -199,6 +201,7 @@ struct cam_ife_hw_mgr {
struct cam_vfe_hw_get_hw_cap ife_dev_caps[CAM_IFE_HW_NUM_MAX];
struct cam_req_mgr_core_workq *workq;
struct cam_ife_hw_mgr_debug debug_cfg;
spinlock_t ctx_lock;
};
/**

View File

@@ -53,6 +53,7 @@ enum cam_isp_hw_err_type {
CAM_ISP_HW_ERROR_P2I_ERROR,
CAM_ISP_HW_ERROR_VIOLATION,
CAM_ISP_HW_ERROR_BUSIF_OVERFLOW,
CAM_ISP_HW_ERROR_CSID_FATAL,
CAM_ISP_HW_ERROR_MAX,
};

View File

@@ -12,6 +12,7 @@
#include <media/cam_req_mgr.h>
#include <dt-bindings/msm/msm-camera.h>
#include "cam_isp_hw_mgr_intf.h"
#include "cam_ife_csid_core.h"
#include "cam_isp_hw.h"
#include "cam_soc_util.h"
@@ -19,6 +20,7 @@
#include "cam_debug_util.h"
#include "cam_cpas_api.h"
#include "cam_subdev.h"
#include "cam_tasklet_util.h"
/* Timeout value in msec */
#define IFE_CSID_TIMEOUT 1000
@@ -1227,6 +1229,8 @@ int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
csid_hw->hw_intf->hw_idx, reserve->res_id,
path_data->width, path_data->height);
reserve->node_res = res;
csid_hw->event_cb = reserve->event_cb;
csid_hw->priv = reserve->priv;
end:
return rc;
@@ -1327,8 +1331,10 @@ static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw *csid_hw)
csid_hw->csid_info->hw_reg_version);
spin_lock_irqsave(&csid_hw->lock_state, flags);
csid_hw->fatal_err_detected = false;
csid_hw->device_enabled = 1;
spin_unlock_irqrestore(&csid_hw->lock_state, flags);
cam_tasklet_start(csid_hw->tasklet);
return 0;
@@ -1381,6 +1387,7 @@ static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
csid_hw->hw_intf->hw_idx);
cam_tasklet_stop(csid_hw->tasklet);
spin_lock_irqsave(&csid_hw->lock_state, flags);
csid_hw->device_enabled = 0;
spin_unlock_irqrestore(&csid_hw->lock_state, flags);
@@ -3353,6 +3360,8 @@ int cam_ife_csid_release(void *hw_priv,
res = (struct cam_isp_resource_node *)release_args;
mutex_lock(&csid_hw->hw_info->hw_mutex);
csid_hw->event_cb = NULL;
csid_hw->priv = NULL;
if ((res->res_type == CAM_ISP_RESOURCE_CID &&
res->res_id >= CAM_IFE_CSID_CID_MAX) ||
(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
@@ -4165,16 +4174,208 @@ static int cam_ife_csid_process_cmd(void *hw_priv,
}
static int cam_csid_get_evt_payload(
struct cam_ife_csid_hw *csid_hw,
struct cam_csid_evt_payload **evt_payload)
{
spin_lock(&csid_hw->lock_state);
if (list_empty(&csid_hw->free_payload_list)) {
*evt_payload = NULL;
spin_unlock(&csid_hw->lock_state);
CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload core %d",
csid_hw->hw_intf->hw_idx);
return -ENOMEM;
}
*evt_payload = list_first_entry(&csid_hw->free_payload_list,
struct cam_csid_evt_payload, list);
list_del_init(&(*evt_payload)->list);
spin_unlock(&csid_hw->lock_state);
return 0;
}
static int cam_csid_put_evt_payload(
struct cam_ife_csid_hw *csid_hw,
struct cam_csid_evt_payload **evt_payload)
{
unsigned long flags;
if (*evt_payload == NULL) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid payload core %d",
csid_hw->hw_intf->hw_idx);
return -EINVAL;
}
spin_lock_irqsave(&csid_hw->lock_state, flags);
list_add_tail(&(*evt_payload)->list,
&csid_hw->free_payload_list);
*evt_payload = NULL;
spin_unlock_irqrestore(&csid_hw->lock_state, flags);
return 0;
}
static char *cam_csid_status_to_str(uint32_t status)
{
switch (status) {
case CAM_IFE_CSID_IRQ_REG_TOP:
return "TOP";
case CAM_IFE_CSID_IRQ_REG_RX:
return "RX";
case CAM_IFE_CSID_IRQ_REG_IPP:
return "IPP";
case CAM_IFE_CSID_IRQ_REG_PPP:
return "PPP";
case CAM_IFE_CSID_IRQ_REG_RDI_0:
return "RDI0";
case CAM_IFE_CSID_IRQ_REG_RDI_1:
return "RDI1";
case CAM_IFE_CSID_IRQ_REG_RDI_2:
return "RDI2";
case CAM_IFE_CSID_IRQ_REG_RDI_3:
return "RDI3";
case CAM_IFE_CSID_IRQ_REG_UDI_0:
return "UDI0";
case CAM_IFE_CSID_IRQ_REG_UDI_1:
return "UDI1";
case CAM_IFE_CSID_IRQ_REG_UDI_2:
return "UDI2";
default:
return "Invalid IRQ";
}
}
static int cam_csid_evt_bottom_half_handler(
void *handler_priv,
void *evt_payload_priv)
{
struct cam_ife_csid_hw *csid_hw;
struct cam_csid_evt_payload *evt_payload;
int i;
int rc = 0;
struct cam_isp_hw_event_info event_info;
if (!handler_priv || !evt_payload_priv) {
CAM_ERR(CAM_ISP,
"Invalid Param handler_priv %pK evt_payload_priv %pK",
handler_priv, evt_payload_priv);
return 0;
}
csid_hw = (struct cam_ife_csid_hw *)handler_priv;
evt_payload = (struct cam_csid_evt_payload *)evt_payload_priv;
if (!csid_hw->event_cb || !csid_hw->priv) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"hw_idx %d Invalid args %pK %pK",
csid_hw->hw_intf->hw_idx,
csid_hw->event_cb,
csid_hw->priv);
goto end;
}
if (csid_hw->priv != evt_payload->priv) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"hw_idx %d priv mismatch %pK, %pK",
csid_hw->hw_intf->hw_idx,
csid_hw->priv,
evt_payload->priv);
goto end;
}
CAM_ERR_RATE_LIMIT(CAM_ISP, "idx %d err %d phy %d cnt %d",
csid_hw->hw_intf->hw_idx,
evt_payload->evt_type,
csid_hw->csi2_rx_cfg.phy_sel,
csid_hw->csi2_cfg_cnt);
for (i = 0; i < CAM_IFE_CSID_IRQ_REG_MAX; i++)
CAM_ERR_RATE_LIMIT(CAM_ISP, "status %s: %x",
cam_csid_status_to_str(i),
evt_payload->irq_status[i]);
/* this hunk can be extended to handle more cases
* which we want to offload to bottom half from
* irq handlers
*/
event_info.err_type = evt_payload->evt_type;
event_info.hw_idx = evt_payload->hw_idx;
switch (evt_payload->evt_type) {
case CAM_ISP_HW_ERROR_CSID_FATAL:
if (csid_hw->fatal_err_detected)
break;
csid_hw->fatal_err_detected = true;
rc = csid_hw->event_cb(NULL,
CAM_ISP_HW_EVENT_ERROR, (void *)&event_info);
break;
default:
CAM_DBG(CAM_ISP, "CSID[%d] invalid error type %d",
csid_hw->hw_intf->hw_idx,
evt_payload->evt_type);
break;
}
end:
cam_csid_put_evt_payload(csid_hw, &evt_payload);
return 0;
}
static int cam_csid_handle_hw_err_irq(
struct cam_ife_csid_hw *csid_hw,
int evt_type,
uint32_t *irq_status)
{
int rc = 0;
int i;
void *bh_cmd = NULL;
struct cam_csid_evt_payload *evt_payload;
CAM_DBG(CAM_ISP, "CSID[%d] error %d",
csid_hw->hw_intf->hw_idx, evt_type);
rc = cam_csid_get_evt_payload(csid_hw, &evt_payload);
if (rc) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"No free payload core %d",
csid_hw->hw_intf->hw_idx);
return rc;
}
rc = tasklet_bh_api.get_bh_payload_func(csid_hw->tasklet, &bh_cmd);
if (rc || !bh_cmd) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID[%d] Can not get cmd for tasklet, evt_type %d",
csid_hw->hw_intf->hw_idx,
evt_type);
return rc;
}
evt_payload->evt_type = evt_type;
evt_payload->priv = csid_hw->priv;
evt_payload->hw_idx = csid_hw->hw_intf->hw_idx;
for (i = 0; i < CAM_IFE_CSID_IRQ_REG_MAX; i++)
evt_payload->irq_status[i] = irq_status[i];
tasklet_bh_api.bottom_half_enqueue_func(csid_hw->tasklet,
bh_cmd,
csid_hw,
evt_payload,
cam_csid_evt_bottom_half_handler);
return rc;
}
irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
{
struct cam_ife_csid_hw *csid_hw;
struct cam_hw_soc_info *soc_info;
const struct cam_ife_csid_reg_offset *csid_reg;
const struct cam_ife_csid_csi2_rx_reg_offset *csi2_reg;
uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0;
uint32_t irq_status_rdi[CAM_IFE_CSID_RDI_MAX] = {0, 0, 0, 0};
uint32_t irq_status_udi[CAM_IFE_CSID_UDI_MAX] = {0, 0, 0};
uint32_t val, val2, irq_status_ppp = 0;
uint32_t irq_status[CAM_IFE_CSID_IRQ_REG_MAX] = {0};
uint32_t i, val, val2;
bool fatal_err_detected = false;
uint32_t sof_irq_debug_en = 0;
unsigned long flags;
@@ -4193,23 +4394,27 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csi2_reg = csid_reg->csi2_reg;
/* read */
irq_status_top = cam_io_r_mb(soc_info->reg_map[0].mem_base +
irq_status[CAM_IFE_CSID_IRQ_REG_TOP] =
cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_top_irq_status_addr);
irq_status_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
irq_status[CAM_IFE_CSID_IRQ_REG_RX] =
cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
if (csid_reg->cmn_reg->num_pix)
irq_status_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
irq_status[CAM_IFE_CSID_IRQ_REG_IPP] =
cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_pxl_irq_status_addr);
if (csid_reg->cmn_reg->num_ppp)
irq_status_ppp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
irq_status[CAM_IFE_CSID_IRQ_REG_PPP] =
cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->ppp_reg->csid_pxl_irq_status_addr);
if (csid_reg->cmn_reg->num_rdis <= CAM_IFE_CSID_RDI_MAX) {
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
irq_status_rdi[i] =
irq_status[i] =
cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
}
@@ -4217,7 +4422,7 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
if (csid_reg->cmn_reg->num_udis <= CAM_IFE_CSID_UDI_MAX) {
for (i = 0; i < csid_reg->cmn_reg->num_udis; i++) {
irq_status_udi[i] =
irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0 + i] =
cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->udi_reg[i]->csid_udi_irq_status_addr);
}
@@ -4225,22 +4430,26 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
spin_lock_irqsave(&csid_hw->hw_info->hw_lock, flags);
/* clear */
cam_io_w_mb(irq_status_top, soc_info->reg_map[0].mem_base +
cam_io_w_mb(irq_status[CAM_IFE_CSID_IRQ_REG_TOP],
soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_top_irq_clear_addr);
cam_io_w_mb(irq_status_rx, soc_info->reg_map[0].mem_base +
cam_io_w_mb(irq_status[CAM_IFE_CSID_IRQ_REG_RX],
soc_info->reg_map[0].mem_base +
csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
if (csid_reg->cmn_reg->num_pix)
cam_io_w_mb(irq_status_ipp, soc_info->reg_map[0].mem_base +
cam_io_w_mb(irq_status[CAM_IFE_CSID_IRQ_REG_IPP],
soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
if (csid_reg->cmn_reg->num_ppp)
cam_io_w_mb(irq_status_ppp, soc_info->reg_map[0].mem_base +
cam_io_w_mb(irq_status[CAM_IFE_CSID_IRQ_REG_PPP],
soc_info->reg_map[0].mem_base +
csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
if (csid_reg->cmn_reg->num_rdis <= CAM_IFE_CSID_RDI_MAX) {
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
cam_io_w_mb(irq_status_rdi[i],
cam_io_w_mb(irq_status[i],
soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
}
@@ -4248,7 +4457,7 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
if (csid_reg->cmn_reg->num_udis <= CAM_IFE_CSID_UDI_MAX) {
for (i = 0; i < csid_reg->cmn_reg->num_udis; i++) {
cam_io_w_mb(irq_status_udi[i],
cam_io_w_mb(irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0 + i],
soc_info->reg_map[0].mem_base +
csid_reg->udi_reg[i]->csid_udi_irq_clear_addr);
}
@@ -4259,18 +4468,24 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
spin_unlock_irqrestore(&csid_hw->hw_info->hw_lock, flags);
CAM_DBG(CAM_ISP, "irq_status_top = 0x%x", irq_status_top);
CAM_DBG(CAM_ISP, "irq_status_rx = 0x%x", irq_status_rx);
CAM_DBG(CAM_ISP, "irq_status_ipp = 0x%x", irq_status_ipp);
CAM_DBG(CAM_ISP, "irq_status_ppp = 0x%x", irq_status_ppp);
CAM_DBG(CAM_ISP, "irq_status_top = 0x%x",
irq_status[CAM_IFE_CSID_IRQ_REG_TOP]);
CAM_DBG(CAM_ISP, "irq_status_rx = 0x%x",
irq_status[CAM_IFE_CSID_IRQ_REG_RX]);
CAM_DBG(CAM_ISP, "irq_status_ipp = 0x%x",
irq_status[CAM_IFE_CSID_IRQ_REG_IPP]);
CAM_DBG(CAM_ISP, "irq_status_ppp = 0x%x",
irq_status[CAM_IFE_CSID_IRQ_REG_PPP]);
CAM_DBG(CAM_ISP,
"irq_status_rdi0= 0x%x irq_status_rdi1= 0x%x irq_status_rdi2= 0x%x",
irq_status_rdi[0], irq_status_rdi[1], irq_status_rdi[2]);
irq_status[0], irq_status[1], irq_status[2]);
CAM_DBG(CAM_ISP,
"irq_status_udi0= 0x%x irq_status_udi1= 0x%x irq_status_udi2= 0x%x",
irq_status_udi[0], irq_status_udi[1], irq_status_udi[2]);
irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0],
irq_status[CAM_IFE_CSID_IRQ_REG_UDI_1],
irq_status[CAM_IFE_CSID_IRQ_REG_UDI_2]);
if (irq_status_top & CSID_TOP_IRQ_DONE) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_TOP] & CSID_TOP_IRQ_DONE) {
CAM_DBG(CAM_ISP, "csid top reset complete");
complete(&csid_hw->csid_top_complete);
csid_hw->is_resetting = false;
@@ -4283,78 +4498,99 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
return IRQ_HANDLED;
}
if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
CAM_DBG(CAM_ISP, "csi rx reset complete");
complete(&csid_hw->csid_csi2_complete);
}
spin_lock_irqsave(&csid_hw->lock_state, flags);
if (csid_hw->device_enabled == 1) {
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow",
csid_hw->hw_intf->hw_idx);
fatal_err_detected = true;
goto handle_fatal_error;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow",
csid_hw->hw_intf->hw_idx);
fatal_err_detected = true;
goto handle_fatal_error;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
fatal_err_detected = true;
goto handle_fatal_error;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
fatal_err_detected = true;
goto handle_fatal_error;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER FLOW",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
fatal_err_detected = true;
goto handle_fatal_error;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID:%d CPHY_EOT_RECEPTION",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
csid_hw->error_irq_count++;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID:%d CPHY_SOT_RECEPTION",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
csid_hw->error_irq_count++;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PH_CRC",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CRC) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_CRC) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_CRC",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_ECC) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_ECC) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_ECC",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_MMAPPED_VC_DT) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_MMAPPED_VC_DT) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d MMAPPED_VC_DT",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID:%d ERROR_STREAM_UNDERFLOW",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
csid_hw->error_irq_count++;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d UNBOUNDED_FRAME",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_intf->hw_idx);
csid_hw->error_irq_count++;
}
}
handle_fatal_error:
spin_unlock_irqrestore(&csid_hw->lock_state, flags);
if (csid_hw->error_irq_count >
@@ -4363,26 +4599,33 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_hw->error_irq_count = 0;
}
if (fatal_err_detected)
if (fatal_err_detected) {
cam_ife_csid_halt_csi2(csid_hw);
cam_csid_handle_hw_err_irq(csid_hw,
CAM_ISP_HW_ERROR_CSID_FATAL, irq_status);
}
if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d PHY_DL0_EOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d PHY_DL1_EOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d PHY_DL2_EOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d PHY_DL3_EOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
@@ -4390,22 +4633,26 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
}
if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ) {
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d PHY_DL0_SOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d PHY_DL1_SOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d PHY_DL2_SOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d PHY_DL3_SOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
@@ -4413,7 +4660,9 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
}
if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) &&
(irq_status_rx & CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED)) {
(irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED)) {
CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d LONG_PKT_CAPTURED",
csid_hw->hw_intf->hw_idx);
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
@@ -4433,7 +4682,9 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_hw->hw_intf->hw_idx, (val >> 16), (val & 0xFFFF));
}
if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) &&
(irq_status_rx & CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED)) {
(irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED)) {
CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d SHORT_PKT_CAPTURED",
csid_hw->hw_intf->hw_idx);
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
@@ -4449,7 +4700,8 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
}
if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) &&
(irq_status_rx & CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED)) {
(irq_status[CAM_IFE_CSID_IRQ_REG_RX] &
CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED)) {
CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PKT_HDR_CAPTURED",
csid_hw->hw_intf->hw_idx);
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
@@ -4463,14 +4715,15 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
/*read the IPP errors */
if (csid_reg->cmn_reg->num_pix) {
/* IPP reset done bit */
if (irq_status_ipp &
if (irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
CAM_DBG(CAM_ISP, "CSID:%d IPP reset complete",
csid_hw->hw_intf->hw_idx);
complete(&csid_hw->csid_ipp_complete);
}
if ((irq_status_ipp & CSID_PATH_INFO_INPUT_SOF) &&
if ((irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
CSID_PATH_INFO_INPUT_SOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received",
csid_hw->hw_intf->hw_idx);
@@ -4478,22 +4731,26 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_hw->irq_debug_cnt++;
}
if ((irq_status_ipp & CSID_PATH_INFO_INPUT_EOF) &&
if ((irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
CSID_PATH_INFO_INPUT_EOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received",
csid_hw->hw_intf->hw_idx);
if ((irq_status_ipp & CSID_PATH_ERROR_CCIF_VIOLATION))
if ((irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
CSID_PATH_ERROR_CCIF_VIOLATION))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d IPP CCIF violation",
csid_hw->hw_intf->hw_idx);
if ((irq_status_ipp & CSID_PATH_OVERFLOW_RECOVERY))
if ((irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
CSID_PATH_OVERFLOW_RECOVERY))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d IPP Overflow due to back pressure",
csid_hw->hw_intf->hw_idx);
if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
CSID_PATH_ERROR_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID:%d IPP fifo over flow",
csid_hw->hw_intf->hw_idx);
@@ -4503,14 +4760,17 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_reg->ipp_reg->csid_pxl_ctrl_addr);
}
if ((irq_status_ipp & CSID_PATH_ERROR_PIX_COUNT) ||
(irq_status_ipp & CSID_PATH_ERROR_LINE_COUNT)) {
if ((irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
CSID_PATH_ERROR_PIX_COUNT) ||
(irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
CSID_PATH_ERROR_LINE_COUNT)) {
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_pxl_format_measure0_addr);
CAM_ERR(CAM_ISP,
"CSID:%d irq_status_ipp:0x%x",
csid_hw->hw_intf->hw_idx, irq_status_ipp);
csid_hw->hw_intf->hw_idx,
irq_status[CAM_IFE_CSID_IRQ_REG_IPP]);
CAM_ERR(CAM_ISP,
"Expected:: h: 0x%x w: 0x%x actual:: h: 0x%x w: 0x%x",
csid_hw->ipp_path_config.height,
@@ -4526,14 +4786,15 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
/*read PPP errors */
if (csid_reg->cmn_reg->num_ppp) {
/* PPP reset done bit */
if (irq_status_ppp &
if (irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
CAM_DBG(CAM_ISP, "CSID:%d PPP reset complete",
csid_hw->hw_intf->hw_idx);
complete(&csid_hw->csid_ppp_complete);
}
if ((irq_status_ppp & CSID_PATH_INFO_INPUT_SOF) &&
if ((irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
CSID_PATH_INFO_INPUT_SOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP SOF received",
csid_hw->hw_intf->hw_idx);
@@ -4541,22 +4802,26 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_hw->irq_debug_cnt++;
}
if ((irq_status_ppp & CSID_PATH_INFO_INPUT_EOF) &&
if ((irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
CSID_PATH_INFO_INPUT_EOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP EOF received",
csid_hw->hw_intf->hw_idx);
if ((irq_status_ppp & CSID_PATH_ERROR_CCIF_VIOLATION))
if ((irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
CSID_PATH_ERROR_CCIF_VIOLATION))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d PPP CCIF violation",
csid_hw->hw_intf->hw_idx);
if ((irq_status_ppp & CSID_PATH_OVERFLOW_RECOVERY))
if ((irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
CSID_PATH_OVERFLOW_RECOVERY))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d IPP Overflow due to back pressure",
csid_hw->hw_intf->hw_idx);
if (irq_status_ppp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
CSID_PATH_ERROR_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID:%d PPP fifo over flow",
csid_hw->hw_intf->hw_idx);
@@ -4566,14 +4831,17 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_reg->ppp_reg->csid_pxl_ctrl_addr);
}
if ((irq_status_ppp & CSID_PATH_ERROR_PIX_COUNT) ||
(irq_status_ppp & CSID_PATH_ERROR_LINE_COUNT)) {
if ((irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
CSID_PATH_ERROR_PIX_COUNT) ||
(irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
CSID_PATH_ERROR_LINE_COUNT)) {
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->ppp_reg->csid_pxl_format_measure0_addr);
CAM_ERR(CAM_ISP,
"CSID:%d irq_status_ppp:0x%x",
csid_hw->hw_intf->hw_idx, irq_status_ppp);
csid_hw->hw_intf->hw_idx,
irq_status[CAM_IFE_CSID_IRQ_REG_PPP]);
CAM_ERR(CAM_ISP,
"Expected:: h: 0x%x w: 0x%x actual:: h: 0x%x w: 0x%x",
csid_hw->ppp_path_config.height,
@@ -4587,14 +4855,14 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
}
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
if (irq_status_rdi[i] &
if (irq_status[i] &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
CAM_DBG(CAM_ISP, "CSID:%d RDI%d reset complete",
csid_hw->hw_intf->hw_idx, i);
complete(&csid_hw->csid_rdin_complete[i]);
}
if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF) &&
if ((irq_status[i] & CSID_PATH_INFO_INPUT_SOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d RDI:%d SOF received",
@@ -4603,23 +4871,23 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_hw->irq_debug_cnt++;
}
if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF) &&
if ((irq_status[i] & CSID_PATH_INFO_INPUT_EOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d RDI:%d EOF received",
csid_hw->hw_intf->hw_idx, i);
if ((irq_status_rdi[i] & CSID_PATH_ERROR_CCIF_VIOLATION))
if ((irq_status[i] & CSID_PATH_ERROR_CCIF_VIOLATION))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d RDI :%d CCIF violation",
csid_hw->hw_intf->hw_idx, i);
if ((irq_status_rdi[i] & CSID_PATH_OVERFLOW_RECOVERY))
if ((irq_status[i] & CSID_PATH_OVERFLOW_RECOVERY))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d RDI :%d Overflow due to back pressure",
csid_hw->hw_intf->hw_idx, i);
if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
if (irq_status[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID:%d RDI fifo over flow",
csid_hw->hw_intf->hw_idx);
@@ -4629,8 +4897,8 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
}
if ((irq_status_rdi[i] & CSID_PATH_ERROR_PIX_COUNT) ||
(irq_status_rdi[i] & CSID_PATH_ERROR_LINE_COUNT)) {
if ((irq_status[i] & CSID_PATH_ERROR_PIX_COUNT) ||
(irq_status[i] & CSID_PATH_ERROR_LINE_COUNT)) {
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[i]->csid_rdi_format_measure0_addr);
val2 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
@@ -4638,7 +4906,7 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
);
CAM_ERR(CAM_ISP,
"CSID:%d irq_status_rdi[%d]:0x%x",
csid_hw->hw_intf->hw_idx, i, irq_status_rdi[i]);
csid_hw->hw_intf->hw_idx, i, irq_status[i]);
CAM_ERR(CAM_ISP,
"Expected:: h: 0x%x w: 0x%x actual:: h: 0x%x w: 0x%x",
((val2 >>
@@ -4655,14 +4923,15 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
}
for (i = 0; i < csid_reg->cmn_reg->num_udis; i++) {
if (irq_status_udi[i] &
if (irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0 + i] &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
CAM_DBG(CAM_ISP, "CSID:%d UDI%d reset complete",
csid_hw->hw_intf->hw_idx, i);
complete(&csid_hw->csid_udin_complete[i]);
}
if ((irq_status_udi[i] & CSID_PATH_INFO_INPUT_SOF) &&
if ((irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0 + i] &
CSID_PATH_INFO_INPUT_SOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d UDI:%d SOF received",
@@ -4671,23 +4940,27 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_hw->irq_debug_cnt++;
}
if ((irq_status_udi[i] & CSID_PATH_INFO_INPUT_EOF) &&
if ((irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0 + i] &
CSID_PATH_INFO_INPUT_EOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID:%d UDI:%d EOF received",
csid_hw->hw_intf->hw_idx, i);
if ((irq_status_udi[i] & CSID_PATH_ERROR_CCIF_VIOLATION))
if ((irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0 + i] &
CSID_PATH_ERROR_CCIF_VIOLATION))
CAM_WARN_RATE_LIMIT(CAM_ISP,
"CSID:%d UDI :%d CCIF violation",
csid_hw->hw_intf->hw_idx, i);
if ((irq_status_udi[i] & CSID_PATH_OVERFLOW_RECOVERY))
if ((irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0 + i] &
CSID_PATH_OVERFLOW_RECOVERY))
CAM_WARN_RATE_LIMIT(CAM_ISP,
"CSID:%d UDI :%d Overflow due to back pressure",
csid_hw->hw_intf->hw_idx, i);
if (irq_status_udi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
if (irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0 + i] &
CSID_PATH_ERROR_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID:%d UDI fifo over flow",
csid_hw->hw_intf->hw_idx);
@@ -4867,6 +5140,19 @@ int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
ife_csid_hw->udi_res[i].res_priv = path_data;
}
rc = cam_tasklet_init(&ife_csid_hw->tasklet, ife_csid_hw, csid_idx);
if (rc) {
CAM_ERR(CAM_ISP, "Unable to create CSID tasklet rc %d", rc);
goto err;
}
INIT_LIST_HEAD(&ife_csid_hw->free_payload_list);
for (i = 0; i < CAM_CSID_EVT_PAYLOAD_MAX; i++) {
INIT_LIST_HEAD(&ife_csid_hw->evt_payload[i].list);
list_add_tail(&ife_csid_hw->evt_payload[i].list,
&ife_csid_hw->free_payload_list);
}
ife_csid_hw->csid_debug = 0;
ife_csid_hw->error_irq_count = 0;
ife_csid_hw->ipp_path_config.measure_enabled = 0;

View File

@@ -72,6 +72,8 @@
#define CSID_DEBUG_ENABLE_HBI_VBI_INFO BIT(7)
#define CSID_DEBUG_DISABLE_EARLY_EOF BIT(8)
#define CAM_CSID_EVT_PAYLOAD_MAX 10
/* enum cam_csid_path_halt_mode select the path halt mode control */
enum cam_csid_path_halt_mode {
CSID_HALT_MODE_INTERNAL,
@@ -91,6 +93,24 @@ enum cam_csid_path_timestamp_stb_sel {
CSID_TIMESTAMP_STB_MAX,
};
/**
* enum cam_ife_pix_path_res_id - Specify the csid patch
*/
enum cam_ife_csid_irq_reg {
CAM_IFE_CSID_IRQ_REG_RDI_0,
CAM_IFE_CSID_IRQ_REG_RDI_1,
CAM_IFE_CSID_IRQ_REG_RDI_2,
CAM_IFE_CSID_IRQ_REG_RDI_3,
CAM_IFE_CSID_IRQ_REG_TOP,
CAM_IFE_CSID_IRQ_REG_RX,
CAM_IFE_CSID_IRQ_REG_IPP,
CAM_IFE_CSID_IRQ_REG_PPP,
CAM_IFE_CSID_IRQ_REG_UDI_0,
CAM_IFE_CSID_IRQ_REG_UDI_1,
CAM_IFE_CSID_IRQ_REG_UDI_2,
CAM_IFE_CSID_IRQ_REG_MAX,
};
struct cam_ife_csid_pxl_reg_offset {
/* Pxl path register offsets*/
uint32_t csid_pxl_irq_status_addr;
@@ -525,12 +545,32 @@ struct cam_ife_csid_path_cfg {
uint32_t num_bytes_out;
};
/**
* struct cam_csid_evt_payload- payload for csid hw event
* @list : list head
* @evt_type : Event type from CSID
* @irq_status : IRQ Status register
* @hw_idx : Hw index
* @priv : Private data of payload
*/
struct cam_csid_evt_payload {
struct list_head list;
uint32_t evt_type;
uint32_t irq_status[CAM_IFE_CSID_IRQ_REG_MAX];
uint32_t hw_idx;
void *priv;
};
/**
* struct cam_ife_csid_hw- csid hw device resources data
*
* @hw_intf: contain the csid hw interface information
* @hw_info: csid hw device information
* @csid_info: csid hw specific information
* @tasklet: tasklet to handle csid errors
* @priv: private data to be sent with callback
* @free_payload_list: list head for payload
* @evt_payload: Event payload to be passed to tasklet
* @res_type: CSID in resource type
* @csi2_rx_cfg: Csi2 rx decoder configuration for csid
* @tpg_cfg: TPG configuration
@@ -562,11 +602,17 @@ struct cam_ife_csid_path_cfg {
* @first_sof_ts first bootime stamp at the start
* @prev_qtimer_ts stores csid timestamp
* @epd_supported Flag is set if sensor supports EPD
* @fatal_err_detected flag to indicate fatal errror is reported
* @event_cb Callback to hw manager if CSID event reported
*/
struct cam_ife_csid_hw {
struct cam_hw_intf *hw_intf;
struct cam_hw_info *hw_info;
struct cam_ife_csid_hw_info *csid_info;
void *tasklet;
void *priv;
struct list_head free_payload_list;
struct cam_csid_evt_payload evt_payload[CAM_CSID_EVT_PAYLOAD_MAX];
uint32_t res_type;
struct cam_ife_csid_csi2_rx_cfg csi2_rx_cfg;
struct cam_ife_csid_tpg_cfg tpg_cfg;
@@ -602,6 +648,8 @@ struct cam_ife_csid_hw {
uint64_t prev_boot_timestamp;
uint64_t prev_qtimer_ts;
uint32_t epd_supported;
bool fatal_err_detected;
cam_hw_mgr_event_cb_func event_cb;
};
int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,

View File

@@ -125,6 +125,8 @@ struct cam_isp_in_port_generic_info {
* @node_res : Reserved resource structure pointer
* @crop_enable : Flag to indicate CSID crop enable
* @drop_enable : Flag to indicate CSID drop enable
* @priv: private data to be sent in callback
* @event_cb: CSID event callback to hw manager
*
*/
struct cam_csid_hw_reserve_resource_args {
@@ -138,6 +140,8 @@ struct cam_csid_hw_reserve_resource_args {
struct cam_isp_resource_node *node_res;
bool crop_enable;
bool drop_enable;
void *priv;
cam_hw_mgr_event_cb_func event_cb;
};
/**

View File

@@ -430,6 +430,8 @@ struct cam_mem_cache_ops_cmd {
#define CAM_REQ_MGR_ERROR_TYPE_BUFFER 2
#define CAM_REQ_MGR_ERROR_TYPE_RECOVERY 3
#define CAM_REQ_MGR_ERROR_TYPE_SOF_FREEZE 4
#define CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY 5
/**
* struct cam_req_mgr_error_msg