msm: camera: req_mgr: Correct slot status on mode update

CRM pd tbl dev mask need to be updated on mode switch in Auto SHDR,
so that the mode switch request and subsequent requests or slot should
be in ready state if request is submitted by all expected device
in a link. Also enabled error bit related to shdr mode switch in tfe.
Query dev info from CRM to ISP again on activate link call to know if
usecase is SHDR and to identify master and slave context.

CRs-Fixed: 3555900
Change-Id: Ie3ce093a453cda4bb5a9d8ddd31b4c8d2b2a69ed
Signed-off-by: Ayush Kumar <quic_ayushkr@quicinc.com>
This commit is contained in:
Ayush Kumar
2023-07-05 14:12:25 +05:30
committed by Alok Chauhan
parent 06afbd12b3
commit ee10bad9a9
9 changed files with 124 additions and 101 deletions

View File

@@ -6853,6 +6853,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
struct cam_packet *packet; struct cam_packet *packet;
size_t remain_len = 0; size_t remain_len = 0;
struct cam_hw_prepare_update_args cfg = {0}; struct cam_hw_prepare_update_args cfg = {0};
struct cam_isp_prepare_hw_update_data *hw_update_data;
struct cam_req_mgr_add_request add_req; struct cam_req_mgr_add_request add_req;
struct cam_isp_context *ctx_isp = struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv; (struct cam_isp_context *) ctx->ctx_priv;
@@ -6947,6 +6948,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
goto free_req_and_buf_tracker_list; goto free_req_and_buf_tracker_list;
} }
hw_update_data = cfg.priv;
req_isp->num_cfg = cfg.num_hw_update_entries; req_isp->num_cfg = cfg.num_hw_update_entries;
req_isp->num_fence_map_out = cfg.num_out_map_entries; req_isp->num_fence_map_out = cfg.num_out_map_entries;
req_isp->num_fence_map_in = cfg.num_in_map_entries; req_isp->num_fence_map_in = cfg.num_in_map_entries;
@@ -6955,6 +6957,8 @@ static int __cam_isp_ctx_config_dev_in_top_state(
req_isp->bubble_detected = false; req_isp->bubble_detected = false;
req_isp->cdm_reset_before_apply = false; req_isp->cdm_reset_before_apply = false;
req_isp->hw_update_data.packet = packet; req_isp->hw_update_data.packet = packet;
req_isp->hw_update_data.num_exp = hw_update_data->num_exp;
req_isp->hw_update_data.mup_en = hw_update_data->mup_en;
req->pf_data.packet_handle = cmd->packet_handle; req->pf_data.packet_handle = cmd->packet_handle;
req->pf_data.packet_offset = cmd->offset; req->pf_data.packet_offset = cmd->offset;
req->pf_data.req = req; req->pf_data.req = req;
@@ -6969,7 +6973,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
} }
CAM_DBG(CAM_ISP, CAM_DBG(CAM_ISP,
"packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d, ctx_idx: %u, link: 0x%x", "packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d, ctx: %u, link: 0x%x",
packet->header.request_id, req_isp->hw_update_data.packet_opcode_type, packet->header.request_id, req_isp->hw_update_data.packet_opcode_type,
req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in, req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in,
ctx->ctx_id, ctx->link_hdl); ctx->ctx_id, ctx->link_hdl);
@@ -7011,9 +7015,12 @@ static int __cam_isp_ctx_config_dev_in_top_state(
add_req.link_hdl = ctx->link_hdl; add_req.link_hdl = ctx->link_hdl;
add_req.dev_hdl = ctx->dev_hdl; add_req.dev_hdl = ctx->dev_hdl;
add_req.req_id = req->request_id; add_req.req_id = req->request_id;
add_req.num_exp = ctx_isp->last_num_exp;
if (ctx_isp->is_shdr_master && req_isp->hw_update_data.mup_en) if (req_isp->hw_update_data.mup_en) {
add_req.num_exp = req_isp->hw_update_data.num_exp; add_req.num_exp = req_isp->hw_update_data.num_exp;
ctx_isp->last_num_exp = add_req.num_exp;
}
rc = ctx->ctx_crm_intf->add_req(&add_req); rc = ctx->ctx_crm_intf->add_req(&add_req);
if (rc) { if (rc) {
if (rc == -EBADR) if (rc == -EBADR)
@@ -7455,6 +7462,7 @@ static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
goto free_res; goto free_res;
} }
ctx_isp->last_num_exp = 0;
ctx_isp->support_consumed_addr = ctx_isp->support_consumed_addr =
(param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN); (param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN);
ctx_isp->is_tfe_shdr = (param.op_flags & CAM_IFE_CTX_SHDR_EN); ctx_isp->is_tfe_shdr = (param.op_flags & CAM_IFE_CTX_SHDR_EN);
@@ -7520,9 +7528,9 @@ static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
trace_cam_context_state("ISP", ctx); trace_cam_context_state("ISP", ctx);
CAM_INFO(CAM_ISP, CAM_INFO(CAM_ISP,
"Acquire success:session_hdl 0x%xs ctx_type %d ctx %u link: 0x%x hw_mgr_ctx: %u", "Acquire success:session_hdl 0x%xs ctx_type %d ctx %u link: 0x%x hw_mgr_ctx: %u is_shdr %d is_shdr_master %d",
ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id, ctx->link_hdl, ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id, ctx->link_hdl,
param.hw_mgr_ctx_id); param.hw_mgr_ctx_id, ctx_isp->is_tfe_shdr, ctx_isp->is_shdr_master);
kfree(acquire_hw_info); kfree(acquire_hw_info);
return rc; return rc;
@@ -7628,6 +7636,7 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
* Set feature flag if applicable * Set feature flag if applicable
* custom hw is supported only on v2 * custom hw is supported only on v2
*/ */
ctx_isp->last_num_exp = 0;
ctx_isp->custom_enabled = ctx_isp->custom_enabled =
(param.op_flags & CAM_IFE_CTX_CUSTOM_EN); (param.op_flags & CAM_IFE_CTX_CUSTOM_EN);
ctx_isp->use_frame_header_ts = ctx_isp->use_frame_header_ts =
@@ -7771,9 +7780,9 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
trace_cam_context_state("ISP", ctx); trace_cam_context_state("ISP", ctx);
CAM_INFO(CAM_ISP, CAM_INFO(CAM_ISP,
"Acquire success: session_hdl 0x%xs ctx_type %d ctx %u link: 0x%x hw_mgr_ctx: %u", "Acquire success: session_hdl 0x%xs ctx_type %d ctx %u link 0x%x hw_mgr_ctx %u is_shdr %d is_shdr_master %d",
ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id, ctx->link_hdl, ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id, ctx->link_hdl,
param.hw_mgr_ctx_id); param.hw_mgr_ctx_id, ctx_isp->is_tfe_shdr, ctx_isp->is_shdr_master);
kfree(acquire_hw_info); kfree(acquire_hw_info);
return rc; return rc;
@@ -7975,7 +7984,7 @@ static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
return rc; return rc;
} }
static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx, static int __cam_isp_ctx_get_dev_info(struct cam_context *ctx,
struct cam_req_mgr_device_info *dev_info) struct cam_req_mgr_device_info *dev_info)
{ {
int rc = 0; int rc = 0;
@@ -8988,7 +8997,7 @@ static struct cam_ctx_ops
.crm_ops = { .crm_ops = {
.link = __cam_isp_ctx_link_in_acquired, .link = __cam_isp_ctx_link_in_acquired,
.unlink = __cam_isp_ctx_unlink_in_acquired, .unlink = __cam_isp_ctx_unlink_in_acquired,
.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired, .get_dev_info = __cam_isp_ctx_get_dev_info,
.process_evt = __cam_isp_ctx_process_evt, .process_evt = __cam_isp_ctx_process_evt,
.flush_req = __cam_isp_ctx_flush_req_in_top_state, .flush_req = __cam_isp_ctx_flush_req_in_top_state,
.dump_req = __cam_isp_ctx_dump_in_top_state, .dump_req = __cam_isp_ctx_dump_in_top_state,
@@ -9009,6 +9018,7 @@ static struct cam_ctx_ops
}, },
.crm_ops = { .crm_ops = {
.unlink = __cam_isp_ctx_unlink_in_ready, .unlink = __cam_isp_ctx_unlink_in_ready,
.get_dev_info = __cam_isp_ctx_get_dev_info,
.flush_req = __cam_isp_ctx_flush_req_in_ready, .flush_req = __cam_isp_ctx_flush_req_in_ready,
.dump_req = __cam_isp_ctx_dump_in_top_state, .dump_req = __cam_isp_ctx_dump_in_top_state,
}, },

View File

@@ -418,6 +418,7 @@ struct cam_isp_fcg_prediction_tracker {
* frames and indicates which prediction should be used * frames and indicates which prediction should be used
* @is_shdr: true, if usecase is sdhr * @is_shdr: true, if usecase is sdhr
* @is_shdr_master: Flag to indicate master context in shdr usecase * @is_shdr_master: Flag to indicate master context in shdr usecase
* @last_num_exp: Last num of exposure
* *
*/ */
struct cam_isp_context { struct cam_isp_context {
@@ -484,6 +485,7 @@ struct cam_isp_context {
struct cam_isp_fcg_prediction_tracker fcg_tracker; struct cam_isp_fcg_prediction_tracker fcg_tracker;
bool is_tfe_shdr; bool is_tfe_shdr;
bool is_shdr_master; bool is_shdr_master;
uint32_t last_num_exp;
}; };
/** /**

View File

@@ -2102,8 +2102,7 @@ static int cam_tfe_mgr_acquire_get_unified_structure_v2(
in_port->lane_cfg = in->lane_cfg; in_port->lane_cfg = in->lane_cfg;
in_port->num_valid_vc_dt = in->num_valid_vc_dt; in_port->num_valid_vc_dt = in->num_valid_vc_dt;
if (in_port->num_valid_vc_dt == 0 || if (in_port->num_valid_vc_dt == 0) {
in_port->num_valid_vc_dt >= CAM_ISP_TFE_VC_DT_CFG) {
CAM_ERR(CAM_ISP, "Invalid i/p arg invalid vc-dt: %d", CAM_ERR(CAM_ISP, "Invalid i/p arg invalid vc-dt: %d",
in->num_valid_vc_dt); in->num_valid_vc_dt);
rc = -EINVAL; rc = -EINVAL;

View File

@@ -278,6 +278,7 @@ static struct cam_tfe_csid_csi2_rx_reg_offset
.csi2_rst_done_shift_val = 27, .csi2_rst_done_shift_val = 27,
.csi2_irq_mask_all = 0xFFFFFFF, .csi2_irq_mask_all = 0xFFFFFFF,
.csi2_misr_enable_shift_val = 6, .csi2_misr_enable_shift_val = 6,
.csi2_vc_mode_shift_val = 2,
.csi2_capture_long_pkt_en_shift = 0, .csi2_capture_long_pkt_en_shift = 0,
.csi2_capture_short_pkt_en_shift = 1, .csi2_capture_short_pkt_en_shift = 1,
.csi2_capture_cphy_pkt_en_shift = 2, .csi2_capture_cphy_pkt_en_shift = 2,

View File

@@ -740,7 +740,7 @@ static int cam_tfe_csid_cid_reserve(struct cam_tfe_csid_hw *csid_hw,
/* CSID CSI2 v1.1 supports 4 vc */ /* CSID CSI2 v1.1 supports 4 vc */
for (i = 0; i < cid_reserv->in_port->num_valid_vc_dt; i++) { for (i = 0; i < cid_reserv->in_port->num_valid_vc_dt; i++) {
if (cid_reserv->in_port->dt[i] > 0x3f || if (cid_reserv->in_port->dt[i] > 0x3f ||
cid_reserv->in_port->vc[i] > 0x3) { cid_reserv->in_port->vc[i] > 0x1f) {
CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d", CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d",
csid_hw->hw_intf->hw_idx, csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->vc[i], cid_reserv->in_port->vc[i],
@@ -842,7 +842,7 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
/* CSID CSI2 v2.0 supports 4 vc */ /* CSID CSI2 v2.0 supports 4 vc */
for (i = 0; i < reserve->in_port->num_valid_vc_dt; i++) { for (i = 0; i < reserve->in_port->num_valid_vc_dt; i++) {
if (reserve->in_port->dt[i] > 0x3f || if (reserve->in_port->dt[i] > 0x3f ||
reserve->in_port->vc[i] > 0x3 || reserve->in_port->vc[i] > 0x1f ||
(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) { (reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d", CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d",
csid_hw->hw_intf->hw_idx, csid_hw->hw_intf->hw_idx,
@@ -1085,6 +1085,9 @@ static int cam_tfe_csid_enable_csi2(
/* rx cfg1 */ /* rx cfg1 */
val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val); val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
/* enable vc mode to configure vc with value more than 3 */
val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
/* enable packet ecc correction */ /* enable packet ecc correction */
val |= 1; val |= 1;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base + cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
@@ -1668,7 +1671,7 @@ static int cam_tfe_csid_enable_pxl_path(
pxl_reg->csid_pxl_ctrl_addr); pxl_reg->csid_pxl_ctrl_addr);
CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x", CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x",
csid_hw->hw_intf->hw_idx, val); csid_hw->hw_intf->hw_idx, val);
/* Enable the required pxl path interrupts */ /* Enable the required pxl path interrupts */
val = TFE_CSID_PATH_INFO_RST_DONE | val = TFE_CSID_PATH_INFO_RST_DONE |
@@ -3855,7 +3858,6 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
cam_io_w_mb(1, soc_info->reg_map[0].mem_base + cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_irq_cmd_addr); csid_reg->cmn_reg->csid_irq_cmd_addr);
/* Software register reset complete*/ /* Software register reset complete*/
if (irq_status[TFE_CSID_IRQ_REG_TOP]) if (irq_status[TFE_CSID_IRQ_REG_TOP])
complete(&csid_hw->csid_top_complete); complete(&csid_hw->csid_top_complete);

View File

@@ -137,8 +137,8 @@ static struct cam_tfe_camif_reg_data tfe770_camif_reg_data = {
.epoch1_irq_mask = 0x00000008, .epoch1_irq_mask = 0x00000008,
.eof_irq_mask = 0x00000002, .eof_irq_mask = 0x00000002,
.reg_update_irq_mask = 0x00000001, .reg_update_irq_mask = 0x00000001,
.error_irq_mask0 = 0x00010100, .error_irq_mask0 = 0x00210100,
.error_irq_mask2 = 0x00000023, .error_irq_mask2 = 0x00000223,
.subscribe_irq_mask = { .subscribe_irq_mask = {
0x00000000, 0x00000000,
0x00000007, 0x00000007,
@@ -1427,9 +1427,9 @@ struct cam_tfe_hw_info cam_tfe770 = {
0x00000000, 0x00000000,
}, },
.error_irq_mask = { .error_irq_mask = {
0x001F1F00, 0x003F1F00,
0x00000000, 0x00000000,
0x000000FF, 0x000002FF,
}, },
.bus_reg_irq_mask = { .bus_reg_irq_mask = {
0x00000002, 0x00000002,

View File

@@ -2231,6 +2231,7 @@ int cam_tfe_top_reserve(void *device_priv,
top_priv = (struct cam_tfe_top_priv *)device_priv; top_priv = (struct cam_tfe_top_priv *)device_priv;
args = (struct cam_tfe_acquire_args *)reserve_args; args = (struct cam_tfe_acquire_args *)reserve_args;
acquire_args = &args->tfe_in; acquire_args = &args->tfe_in;
top_priv->last_mup_val = 0;
for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
CAM_DBG(CAM_ISP, "i :%d res_id:%d state:%d", i, CAM_DBG(CAM_ISP, "i :%d res_id:%d state:%d", i,

View File

@@ -1197,7 +1197,6 @@ static void cam_req_mgr_reconfigure_link(struct cam_req_mgr_core_link *link,
if (dev->dev_info.trigger_on && !dev->dev_info.is_shdr_master) { if (dev->dev_info.trigger_on && !dev->dev_info.is_shdr_master) {
dev->is_active = is_active; dev->is_active = is_active;
tbl = dev->pd_tbl; tbl = dev->pd_tbl;
if (is_active) { if (is_active) {
tbl->dev_mask |= (1 << dev->dev_bit); tbl->dev_mask |= (1 << dev->dev_bit);
} else { } else {
@@ -1379,7 +1378,7 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
/* For regular send requests */ /* For regular send requests */
for (i = 0; i < link->num_devs; i++) { for (i = 0; i < link->num_devs; i++) {
dev = &link->l_dev[i]; dev = &link->l_dev[i];
if (dev && dev->is_active) { if (dev) {
pd = dev->dev_info.p_delay; pd = dev->dev_info.p_delay;
if (pd >= CAM_PIPELINE_DELAY_MAX) { if (pd >= CAM_PIPELINE_DELAY_MAX) {
CAM_WARN(CAM_CRM, "pd %d greater than max", CAM_WARN(CAM_CRM, "pd %d greater than max",
@@ -1390,10 +1389,15 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
if (!(dev->dev_info.trigger & trigger)) if (!(dev->dev_info.trigger & trigger))
continue; continue;
idx = apply_data[pd].idx;
slot = &dev->pd_tbl->slot[idx];
if (dev->dev_info.trigger_on && !dev->dev_info.is_shdr_master && if (dev->dev_info.trigger_on && !dev->dev_info.is_shdr_master &&
dev->dev_info.mode_switch_req == apply_data[pd].req_id && slot->ops.skip_isp_apply) {
link->wait_for_dual_trigger) CAM_DBG(CAM_CRM, "Skip slave switch req %d apply %lld",
dev->dev_info.mode_switch_req, apply_data[pd].req_id);
continue; continue;
}
if (apply_data[pd].skip_idx || if (apply_data[pd].skip_idx ||
(apply_data[pd].req_id < 0)) { (apply_data[pd].req_id < 0)) {
@@ -1419,8 +1423,6 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
apply_req.dev_hdl = dev->dev_hdl; apply_req.dev_hdl = dev->dev_hdl;
apply_req.request_id = apply_req.request_id =
apply_data[pd].req_id; apply_data[pd].req_id;
idx = apply_data[pd].idx;
slot = &dev->pd_tbl->slot[idx];
apply_req.report_if_bubble = apply_req.report_if_bubble =
in_q->slot[idx].recover; in_q->slot[idx].recover;
@@ -1488,6 +1490,7 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
else if (apply_req.dual_trigger_status == else if (apply_req.dual_trigger_status ==
CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE) CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE)
link->wait_for_dual_trigger = false; link->wait_for_dual_trigger = false;
if (prev_dual_trigger_status != link->wait_for_dual_trigger) if (prev_dual_trigger_status != link->wait_for_dual_trigger)
cam_req_mgr_reconfigure_link( cam_req_mgr_reconfigure_link(
link, dev, link->wait_for_dual_trigger); link, dev, link->wait_for_dual_trigger);
@@ -3360,38 +3363,6 @@ end:
return rc; return rc;
} }
static void cam_req_mgr_handle_exposure_change(
struct cam_req_mgr_core_link *link,
struct cam_req_mgr_add_request *add_req)
{
int i = 0;
struct cam_req_mgr_connected_device *device = NULL;
if (add_req->num_exp == CAM_REQ_DUAL_TRIGGER_TWO_EXPOSURE &&
!link->wait_for_dual_trigger) {
for (i = 0; i < link->num_devs; i++) {
device = &link->l_dev[i];
if (!device->dev_info.trigger_on || device->dev_info.is_shdr_master)
continue;
device->is_active = true;
device->dev_info.mode_switch_req = 0;
}
} else if (add_req->num_exp == CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE &&
link->wait_for_dual_trigger) {
for (i = 0; i < link->num_devs; i++) {
device = &link->l_dev[i];
if (!device->dev_info.trigger_on || device->dev_info.is_shdr_master)
continue;
device->dev_info.mode_switch_req = add_req->req_id;
}
}
}
/** /**
* cam_req_mgr_process_add_req() * cam_req_mgr_process_add_req()
* *
@@ -3409,6 +3380,7 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
struct cam_req_mgr_add_request *add_req = NULL; struct cam_req_mgr_add_request *add_req = NULL;
struct cam_req_mgr_core_link *link = NULL; struct cam_req_mgr_core_link *link = NULL;
struct cam_req_mgr_connected_device *device = NULL; struct cam_req_mgr_connected_device *device = NULL;
struct cam_req_mgr_connected_device *dev_l = NULL;
struct cam_req_mgr_req_tbl *tbl = NULL; struct cam_req_mgr_req_tbl *tbl = NULL;
struct cam_req_mgr_tbl_slot *slot = NULL; struct cam_req_mgr_tbl_slot *slot = NULL;
struct crm_task_payload *task_data = NULL; struct crm_task_payload *task_data = NULL;
@@ -3461,6 +3433,7 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
link_slot = &link->req.in_q->slot[idx]; link_slot = &link->req.in_q->slot[idx];
slot = &tbl->slot[idx]; slot = &tbl->slot[idx];
slot->ops.skip_isp_apply = false;
if ((add_req->skip_at_sof & 0xFF) > slot->inject_delay_at_sof) { if ((add_req->skip_at_sof & 0xFF) > slot->inject_delay_at_sof) {
slot->inject_delay_at_sof = (add_req->skip_at_sof & 0xFF); slot->inject_delay_at_sof = (add_req->skip_at_sof & 0xFF);
@@ -3565,9 +3538,9 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
slot->state = CRM_REQ_STATE_PENDING; slot->state = CRM_REQ_STATE_PENDING;
slot->req_ready_map |= BIT(device->dev_bit); slot->req_ready_map |= BIT(device->dev_bit);
CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x", CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x tbl mask %x",
idx, add_req->dev_hdl, add_req->req_id, tbl->pd, idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
slot->req_ready_map); slot->req_ready_map, tbl->dev_mask);
trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device); trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);
@@ -3589,22 +3562,35 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
return rc; return rc;
} }
if (!device->is_active && !device->dev_info.is_shdr_master && if (device->dev_info.trigger_on &&
!link->wait_for_dual_trigger) { add_req->num_exp == CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE) {
device->is_active = true; for (i = 0; i < link->num_devs; i++) {
if (slot->req_ready_map == tbl->dev_mask) { dev_l = &link->l_dev[i];
CAM_DBG(CAM_REQ, if (dev_l->dev_info.trigger_on && dev_l->dev_hdl != add_req->dev_hdl) {
"link 0x%x idx %d req_id %lld pd %d SLOT READY", slot->ops.skip_isp_apply = true;
link->link_hdl, idx, add_req->req_id, tbl->pd); slot->req_ready_map |= (1 << dev_l->dev_bit);
slot->state = CRM_REQ_STATE_READY; if (slot->req_ready_map == tbl->dev_mask) {
slot->state = CRM_REQ_STATE_READY;
CAM_DBG(CAM_REQ,
"SHDR link %x idx %d req_id %lld pd %d SLOT READY",
link->link_hdl, idx, add_req->req_id, tbl->pd);
}
break;
}
} }
mutex_unlock(&link->req.lock);
goto end;
} }
if (device->dev_info.is_shdr_master) if (device->dev_info.trigger_on &&
cam_req_mgr_handle_exposure_change(link, add_req); add_req->num_exp == CAM_REQ_DUAL_TRIGGER_TWO_EXPOSURE &&
!device->dev_info.is_shdr_master) {
tbl->dev_mask |= (1 << device->dev_bit);
if (slot->req_ready_map == tbl->dev_mask) {
slot->state = CRM_REQ_STATE_READY;
CAM_DBG(CAM_REQ,
"SHDR link 0x%x idx %d req_id %lld pd %d SLOT READY",
link->link_hdl, idx, add_req->req_id, tbl->pd);
}
}
mutex_unlock(&link->req.lock); mutex_unlock(&link->req.lock);
end: end:
@@ -4070,6 +4056,8 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
dev_req->trigger_eof = add_req->trigger_eof; dev_req->trigger_eof = add_req->trigger_eof;
dev_req->skip_at_sof = add_req->skip_at_sof; dev_req->skip_at_sof = add_req->skip_at_sof;
dev_req->skip_at_eof = add_req->skip_at_eof; dev_req->skip_at_eof = add_req->skip_at_eof;
dev_req->num_exp = add_req->num_exp;
if (dev_req->trigger_eof) { if (dev_req->trigger_eof) {
atomic_inc(&link->eof_event_cnt); atomic_inc(&link->eof_event_cnt);
CAM_DBG(CAM_REQ, "Req_id: %llu, eof_event_cnt: %d", CAM_DBG(CAM_REQ, "Req_id: %llu, eof_event_cnt: %d",
@@ -4469,12 +4457,11 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
{ {
int rc = 0, i = 0, num_devices = 0; int rc = 0, i = 0, num_devices = 0;
struct cam_req_mgr_core_dev_link_setup link_data; struct cam_req_mgr_core_dev_link_setup link_data;
struct cam_req_mgr_connected_device *dev, *master_dev = NULL, *tmp_dev; struct cam_req_mgr_connected_device *dev;
struct cam_req_mgr_req_tbl *pd_tbl; struct cam_req_mgr_req_tbl *pd_tbl;
enum cam_pipeline_delay max_delay; enum cam_pipeline_delay max_delay;
enum cam_modeswitch_delay max_modeswitch; enum cam_modeswitch_delay max_modeswitch;
uint32_t num_trigger_devices = 0; uint32_t num_trigger_devices = 0;
int32_t master_dev_idx = -1;
if (link_info->version == VERSION_1) { if (link_info->version == VERSION_1) {
if (link_info->u.link_info_v1.num_devices > if (link_info->u.link_info_v1.num_devices >
@@ -4526,11 +4513,6 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
dev->dev_info.dev_hdl = dev->dev_hdl; dev->dev_info.dev_hdl = dev->dev_hdl;
rc = dev->ops->get_dev_info(&dev->dev_info); rc = dev->ops->get_dev_info(&dev->dev_info);
if (dev->dev_info.is_shdr_master) {
master_dev = dev;
master_dev_idx = i;
}
trace_cam_req_mgr_connect_device(link, &dev->dev_info); trace_cam_req_mgr_connect_device(link, &dev->dev_info);
if (link_info->version == VERSION_1) if (link_info->version == VERSION_1)
CAM_DBG(CAM_CRM, CAM_DBG(CAM_CRM,
@@ -4579,11 +4561,8 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
max_modeswitch = dev->dev_info.m_delay; max_modeswitch = dev->dev_info.m_delay;
} }
if (dev->dev_info.trigger_on) { if (dev->dev_info.trigger_on)
num_trigger_devices++; num_trigger_devices++;
if (dev->dev_info.is_shdr)
link->is_shdr = true;
}
dev->is_active = true; dev->is_active = true;
} }
@@ -4601,26 +4580,8 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
link_data.crm_cb = &cam_req_mgr_ops; link_data.crm_cb = &cam_req_mgr_ops;
link_data.max_delay = max_delay; link_data.max_delay = max_delay;
link_data.mode_switch_max_delay = max_modeswitch; link_data.mode_switch_max_delay = max_modeswitch;
if (num_trigger_devices == CAM_REQ_MGR_MAX_TRIGGERS) { if (num_trigger_devices == CAM_REQ_MGR_MAX_TRIGGERS)
link->dual_trigger = true; link->dual_trigger = true;
link->wait_for_dual_trigger = true;
}
if (link->dual_trigger && master_dev) {
for (i = 0; i < num_devices; i++) {
dev = &link->l_dev[i];
if (dev->dev_info.trigger_on) {
if (dev->dev_hdl == master_dev->dev_hdl)
continue;
if (master_dev_idx < i) {
tmp_dev = master_dev;
master_dev = dev;
dev = tmp_dev;
}
}
}
}
num_trigger_devices = 0; num_trigger_devices = 0;
for (i = 0; i < num_devices; i++) { for (i = 0; i < num_devices; i++) {
@@ -5495,6 +5456,45 @@ end:
return rc; return rc;
} }
int cam_req_mgr_rearrange_devs(
struct cam_req_mgr_core_link *link)
{
int i, rc = 0;
uint32_t master_dev_idx = 0, slave_dev_idx = 0;
struct cam_req_mgr_connected_device *dev, tmp_dev;
for (i = 0; i < link->num_devs; i++) {
dev = &link->l_dev[i];
if (!dev->dev_info.trigger_on)
continue;
rc = dev->ops->get_dev_info(&dev->dev_info);
if (rc) {
CAM_ERR(CAM_CRM, "Get dev info failed link %x dev %x",
link->link_hdl, dev->dev_hdl);
continue;
}
if (dev->dev_info.is_shdr_master)
master_dev_idx = i;
else
slave_dev_idx = i;
}
if (master_dev_idx < slave_dev_idx) {
tmp_dev = link->l_dev[master_dev_idx];
link->l_dev[master_dev_idx] = link->l_dev[slave_dev_idx];
link->l_dev[slave_dev_idx] = tmp_dev;
}
link->is_shdr = true;
link->wait_for_dual_trigger = true;
CAM_DBG(CAM_CRM, "link hdl %x wait for dual triger %d",
link->link_hdl, link->wait_for_dual_trigger);
return rc;
}
int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control) int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
{ {
int rc = 0, i; int rc = 0, i;
@@ -5547,6 +5547,9 @@ int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
link->link_hdl); link->link_hdl);
rc = -EFAULT; rc = -EFAULT;
} }
if (link->dual_trigger)
rc = cam_req_mgr_rearrange_devs(link);
/* Wait for the streaming of sync link */ /* Wait for the streaming of sync link */
link->initial_skip = true; link->initial_skip = true;
/* Pause the timer before sensor stream on */ /* Pause the timer before sensor stream on */

View File

@@ -244,11 +244,16 @@ struct cam_req_mgr_apply {
* @num_dev : Number of devices need to be applied at this trigger point * @num_dev : Number of devices need to be applied at this trigger point
* @dev_hdl : Device handle who requested for special ops * @dev_hdl : Device handle who requested for special ops
* @apply_at_eof : Boolean Identifier for request to be applied at EOF * @apply_at_eof : Boolean Identifier for request to be applied at EOF
* @is_applied : Flag to identify if request is already applied to device
* in previous frame
* @skip_isp_apply : Flag to indicate skip apply req for ISP
*/ */
struct crm_tbl_slot_special_ops { struct crm_tbl_slot_special_ops {
uint32_t num_dev; uint32_t num_dev;
int32_t dev_hdl[MAX_DEV_FOR_SPECIAL_OPS]; int32_t dev_hdl[MAX_DEV_FOR_SPECIAL_OPS];
bool apply_at_eof; bool apply_at_eof;
bool is_applied;
bool skip_isp_apply;
}; };
/** /**