msm: camera: isp: Add dynamic enabling of IRQ in controller

CSID SOFs are needed to do a health check of sensors once the
sensor freeze is observed. Since the 680 CSID driver has controller
based mechanism, there is a need to enable the IRQ on controller
dynamically.
This commit adds support for dynamic enable/disable of IRQ on
controller.
Minor clean ups to remove unused code.

CRs-Fixed: 2830502
Change-Id: Ic4985555a5726264606a8e60c725eb8d1ce81364
Signed-off-by: Gaurav Jindal <gjindal@codeaurora.org>
This commit is contained in:
Gaurav Jindal
2021-02-05 05:42:37 +05:30
parent 623fd1e471
commit 5013b415d1
3 changed files with 118 additions and 52 deletions

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
*/ */
#include <linux/slab.h> #include <linux/slab.h>
@@ -759,3 +759,71 @@ irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
int cam_irq_controller_update_irq(void *irq_controller, uint32_t handle,
bool enable, uint32_t *irq_mask)
{
struct cam_irq_controller *controller = irq_controller;
struct cam_irq_evt_handler *evt_handler = NULL;
struct cam_irq_evt_handler *evt_handler_temp;
struct cam_irq_register_obj *irq_register = NULL;
enum cam_irq_priority_level priority;
unsigned long flags = 0;
unsigned int i;
uint32_t found = 0;
int rc = -EINVAL;
bool need_lock;
if (!controller)
return rc;
need_lock = !in_irq();
if (need_lock)
spin_lock_irqsave(&controller->lock, flags);
list_for_each_entry_safe(evt_handler, evt_handler_temp,
&controller->evt_handler_list_head, list_node) {
if (evt_handler->index == handle) {
CAM_DBG(CAM_IRQ_CTRL, "enable item %d", handle);
found = 1;
rc = 0;
break;
}
}
if (!found) {
if (need_lock)
spin_unlock_irqrestore(&controller->lock, flags);
return rc;
}
priority = evt_handler->priority;
for (i = 0; i < controller->num_registers; i++) {
irq_register = &controller->irq_register_arr[i];
if (enable) {
irq_register->top_half_enable_mask[priority] |=
irq_mask[i];
evt_handler->evt_bit_mask_arr[i] |= irq_mask[i];
} else {
irq_register->top_half_enable_mask[priority] &=
~irq_mask[i];
evt_handler->evt_bit_mask_arr[i] &= ~irq_mask[i];
if (controller->global_clear_offset)
cam_io_w_mb(controller->global_clear_bitmask,
controller->mem_base +
controller->global_clear_offset);
}
cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
controller->mem_base +
controller->irq_register_arr[i].mask_reg_offset);
}
if (need_lock)
spin_unlock_irqrestore(&controller->lock, flags);
return rc;
}

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
*/ */
#ifndef _CAM_IRQ_CONTROLLER_H_ #ifndef _CAM_IRQ_CONTROLLER_H_
@@ -271,4 +271,25 @@ int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle);
* @return: IRQ_HANDLED/IRQ_NONE * @return: IRQ_HANDLED/IRQ_NONE
*/ */
irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv); irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv);
/*
* cam_irq_controller_update_irq()
*
* @brief: Enable/Disable the interrupts on given controller.
* Dynamically any irq can be disabled or enabled.
*
* @irq_controller: Pointer to IRQ Controller that controls the registered
* events to it.
* @handle: Handle returned on successful subscribe, used to
* identify the handler object
*
* @enable: Flag to indicate if disable or enable the irq.
*
* @irq_mask: IRQ mask to be enabled or disabled.
*
* @return: 0: events found and enabled
* Negative: events not registered on this controller
*/
int cam_irq_controller_update_irq(void *irq_controller, uint32_t handle,
bool enable, uint32_t *irq_mask);
#endif /* _CAM_IRQ_CONTROLLER_H_ */ #endif /* _CAM_IRQ_CONTROLLER_H_ */

View File

@@ -291,10 +291,10 @@ static int cam_ife_csid_ver2_sof_irq_debug(
void *cmd_args) void *cmd_args)
{ {
int i = 0; int i = 0;
uint32_t val = 0; uint32_t irq_idx = 0;
bool sof_irq_enable = false; bool sof_irq_enable = false;
struct cam_hw_soc_info *soc_info;
struct cam_ife_csid_ver2_reg_info *csid_reg; struct cam_ife_csid_ver2_reg_info *csid_reg;
uint32_t irq_mask[CAM_IFE_CSID_IRQ_REG_MAX] = {0};
if (*((uint32_t *)cmd_args) == 1) if (*((uint32_t *)cmd_args) == 1)
sof_irq_enable = true; sof_irq_enable = true;
@@ -307,53 +307,32 @@ static int cam_ife_csid_ver2_sof_irq_debug(
return 0; return 0;
} }
soc_info = &csid_hw->hw_info->soc_info;
csid_reg = (struct cam_ife_csid_ver2_reg_info *) csid_reg = (struct cam_ife_csid_ver2_reg_info *)
csid_hw->core_info->csid_reg; csid_hw->core_info->csid_reg;
for (i = 0; i < csid_reg->cmn_reg->num_pix; i++) { for (i = CAM_IFE_CSID_IRQ_REG_RDI_0; i <= CAM_IFE_CSID_IRQ_REG_IPP;
i++) {
irq_idx = cam_ife_csid_get_rt_irq_idx(i,
csid_reg->cmn_reg->num_pix,
csid_reg->cmn_reg->num_ppp,
csid_reg->cmn_reg->num_rdis);
val = cam_io_r_mb(soc_info->reg_map[0].mem_base + if (csid_hw->irq_handle[irq_idx]) {
csid_reg->ipp_reg->irq_mask_addr); irq_mask[irq_idx] = IFE_CSID_VER2_PATH_INFO_INPUT_SOF;
cam_irq_controller_update_irq(
if (!val) csid_hw->csid_irq_controller,
continue; csid_hw->irq_handle[i],
sof_irq_enable, irq_mask);
if (sof_irq_enable) }
val |= IFE_CSID_VER2_PATH_INFO_INPUT_SOF;
else
val &= ~IFE_CSID_VER2_PATH_INFO_INPUT_SOF;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->irq_mask_addr);
}
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[i]->irq_mask_addr);
if (!val)
continue;
if (sof_irq_enable)
val |= IFE_CSID_VER2_PATH_INFO_INPUT_SOF;
else
val &= ~IFE_CSID_VER2_PATH_INFO_INPUT_SOF;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[i]->irq_mask_addr);
} }
if (sof_irq_enable) { if (sof_irq_enable) {
csid_hw->debug_info.path_mask |= csid_hw->debug_info.path_mask |=
IFE_CSID_VER2_PATH_INFO_INPUT_SOF; IFE_CSID_VER2_PATH_INFO_INPUT_SOF;
csid_hw->debug_info.debug_val |=
CAM_IFE_CSID_DEBUG_ENABLE_SOF_IRQ;
csid_hw->flags.sof_irq_triggered = true; csid_hw->flags.sof_irq_triggered = true;
} else { } else {
csid_hw->debug_info.path_mask &= csid_hw->debug_info.path_mask &=
~IFE_CSID_VER2_PATH_INFO_INPUT_SOF; ~IFE_CSID_VER2_PATH_INFO_INPUT_SOF;
csid_hw->debug_info.debug_val &=
~CAM_IFE_CSID_DEBUG_ENABLE_SOF_IRQ;
csid_hw->flags.sof_irq_triggered = false; csid_hw->flags.sof_irq_triggered = false;
} }
@@ -794,11 +773,6 @@ static int cam_ife_csid_ver2_parse_path_irq_status(
irq_reg_tag = cam_ife_csid_get_irq_reg_tag_ptr(); irq_reg_tag = cam_ife_csid_get_irq_reg_tag_ptr();
temp_status = irq_status & err_mask; temp_status = irq_status & err_mask;
if (csid_hw->flags.sof_irq_triggered &&
(irq_status & IFE_CSID_VER2_PATH_INFO_INPUT_SOF)) {
csid_hw->counters.irq_debug_cnt++;
}
while (temp_status) { while (temp_status) {
if (temp_status & 0x1) if (temp_status & 0x1)
@@ -826,9 +800,17 @@ static int cam_ife_csid_ver2_parse_path_irq_status(
temp_status >>= 1; temp_status >>= 1;
} }
if (csid_hw->counters.irq_debug_cnt >= CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX) { if (csid_hw->flags.sof_irq_triggered) {
cam_ife_csid_ver2_sof_irq_debug(csid_hw, &sof_irq_debug_en);
csid_hw->counters.irq_debug_cnt = 0; if (irq_status & IFE_CSID_VER2_PATH_INFO_INPUT_SOF)
csid_hw->counters.irq_debug_cnt++;
if (csid_hw->counters.irq_debug_cnt >=
CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX) {
cam_ife_csid_ver2_sof_irq_debug(csid_hw,
&sof_irq_debug_en);
csid_hw->counters.irq_debug_cnt = 0;
}
} }
return 0; return 0;
@@ -2146,7 +2128,7 @@ static int cam_ife_csid_ver2_start_rdi_path(
void __iomem *mem_base; void __iomem *mem_base;
uint32_t val = 0; uint32_t val = 0;
uint32_t irq_mask[CAM_IFE_CSID_IRQ_REG_MAX] = {0}; uint32_t irq_mask[CAM_IFE_CSID_IRQ_REG_MAX] = {0};
uint32_t top_irq_mask = 0, irq_idx = 0, rt_irq_idx; uint32_t top_irq_mask = 0, irq_idx = 0;
struct cam_ife_csid_ver2_path_cfg *path_cfg; struct cam_ife_csid_ver2_path_cfg *path_cfg;
rc = cam_ife_csid_ver2_init_config_rdi_path( rc = cam_ife_csid_ver2_init_config_rdi_path(
@@ -2220,11 +2202,6 @@ static int cam_ife_csid_ver2_start_rdi_path(
res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
irq_idx = cam_ife_csid_convert_res_to_irq_reg(res->res_id); irq_idx = cam_ife_csid_convert_res_to_irq_reg(res->res_id);
rt_irq_idx = cam_ife_csid_get_rt_irq_idx(
irq_idx,
csid_reg->cmn_reg->num_pix,
csid_reg->cmn_reg->num_ppp,
csid_reg->cmn_reg->num_rdis);
switch (res->res_id) { switch (res->res_id) {
case CAM_IFE_PIX_PATH_RES_RDI_0: case CAM_IFE_PIX_PATH_RES_RDI_0: