msm: camera: tfe: Add support for auto SHDR

This change add support for activate and deactivate ISP device in the
link when dual trigger mode in the link is set.

CRs-Fixed: 3374385
Change-Id: Ib6d25ab295d613fa5cd3edf1780362476920d74d
Signed-off-by: Ayush Kumar <quic_ayushkr@quicinc.com>
This commit is contained in:
Ayush Kumar
2022-12-28 17:01:03 +05:30
committed by Alok Chauhan
parent a90b42d899
commit 6e2c1b9671
12 changed files with 269 additions and 37 deletions

View File

@@ -5129,6 +5129,16 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
ctx_isp->substate_activated = next_state;
ctx_isp->last_applied_req_id = apply->request_id;
ctx_isp->last_applied_jiffies = jiffies;
if (ctx_isp->is_tfe_shdr) {
if (ctx_isp->is_shdr_master && req_isp->hw_update_data.mup_en)
apply->dual_trigger_status = req_isp->hw_update_data.num_exp;
else
apply->dual_trigger_status = CAM_REQ_DUAL_TRIGGER_NONE;
} else {
apply->dual_trigger_status = CAM_REQ_DUAL_TRIGGER_NONE;
}
list_del_init(&req->list);
if (atomic_read(&ctx_isp->internal_recovery_set))
__cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
@@ -6996,6 +7006,9 @@ static int __cam_isp_ctx_config_dev_in_top_state(
add_req.link_hdl = ctx->link_hdl;
add_req.dev_hdl = ctx->dev_hdl;
add_req.req_id = req->request_id;
if (ctx_isp->is_shdr_master && req_isp->hw_update_data.mup_en)
add_req.num_exp = req_isp->hw_update_data.num_exp;
rc = ctx->ctx_crm_intf->add_req(&add_req);
if (rc) {
if (rc == -EBADR)
@@ -7618,6 +7631,8 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
(param.op_flags & CAM_IFE_CTX_AEB_EN);
ctx_isp->mode_switch_en =
(param.op_flags & CAM_IFE_CTX_DYNAMIC_SWITCH_EN);
ctx_isp->is_tfe_shdr = (param.op_flags & CAM_IFE_CTX_SHDR_EN);
ctx_isp->is_shdr_master = (param.op_flags & CAM_IFE_CTX_SHDR_IS_MASTER);
/* Query the context bus comp group information */
ctx_isp->vfe_bus_comp_grp = kcalloc(CAM_IFE_BUS_COMP_NUM_MAX,
@@ -7955,6 +7970,8 @@ static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
struct cam_req_mgr_device_info *dev_info)
{
int rc = 0;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
dev_info->dev_hdl = ctx->dev_hdl;
strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
@@ -7963,6 +7980,8 @@ static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
dev_info->m_delay = CAM_MODESWITCH_DELAY_1;
dev_info->trigger = CAM_TRIGGER_POINT_SOF;
dev_info->trigger_on = true;
dev_info->is_shdr = ctx_isp->is_tfe_shdr;
dev_info->is_shdr_master = ctx_isp->is_shdr_master;
return rc;
}

View File

@@ -416,6 +416,8 @@ struct cam_isp_fcg_prediction_tracker {
* @hw_idx: Hardware ID
* @fcg_tracker: FCG prediction tracker containing number of previously skipped
* frames and indicates which prediction should be used
* @is_shdr: true, if usecase is sdhr
* @is_shdr_master: Flag to indicate master context in shdr usecase
*
*/
struct cam_isp_context {
@@ -480,6 +482,8 @@ struct cam_isp_context {
bool mode_switch_en;
uint32_t hw_idx;
struct cam_isp_fcg_prediction_tracker fcg_tracker;
bool is_tfe_shdr;
bool is_shdr_master;
};
/**

View File

@@ -1395,7 +1395,7 @@ static int cam_tfe_hw_mgr_acquire_res_tfe_csid_pxl(
goto acquire_successful;
/* Acquire Left if not already acquired */
if (in_port->usage_type) {
if (in_port->usage_type || in_port->is_shdr_master) {
for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
if (!tfe_hw_mgr->csid_devices[i])
continue;
@@ -1650,7 +1650,7 @@ static int cam_tfe_hw_mgr_acquire_res_tfe_csid_rdi(
}
/* Acquire if not already acquired */
if (tfe_ctx->is_dual) {
if (tfe_ctx->is_dual || in_port->is_shdr_master) {
for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
if (!tfe_hw_mgr->csid_devices[i])
continue;
@@ -2134,6 +2134,13 @@ static int cam_tfe_mgr_acquire_get_unified_structure_v2(
CAM_ISP_TFE_FLAG_BAYER_BIN;
in_port->qcfa_bin = in->feature_flag &
CAM_ISP_TFE_FLAG_QCFA_BIN;
in_port->shdr_en = in->feature_flag &
CAM_ISP_TFE_FLAG_SHDR_MASTER_EN;
in_port->shdr_en |= in->feature_flag &
CAM_ISP_TFE_FLAG_SHDR_SLAVE_EN;
in_port->is_shdr_master = in->feature_flag &
CAM_ISP_TFE_FLAG_SHDR_MASTER_EN;
if (in_port->bayer_bin && in_port->qcfa_bin) {
CAM_ERR(CAM_ISP,
@@ -2274,6 +2281,8 @@ static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
struct cam_isp_tfe_acquire_hw_info *acquire_hw_info = NULL;
uint32_t input_size = 0;
bool lcr_enable = false;
bool is_shdr_en = false;
bool is_shdr_master = false;
CAM_DBG(CAM_ISP, "Enter...");
@@ -2359,9 +2368,17 @@ static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
/* Check any inport has dual tfe usage */
tfe_ctx->is_dual = false;
for (i = 0; i < acquire_hw_info->num_inputs; i++)
for (i = 0; i < acquire_hw_info->num_inputs; i++) {
if (in_port[i].usage_type)
tfe_ctx->is_dual = true;
if (in_port[i].shdr_en)
is_shdr_en = true;
if (in_port[i].is_shdr_master)
is_shdr_master = true;
}
if (is_shdr_en && !is_shdr_master)
tfe_ctx->is_shdr_slave = true;
for (i = 0; i < acquire_hw_info->num_inputs; i++) {
cam_tfe_hw_mgr_preprocess_port(tfe_ctx, &in_port[i], &num_pix_port_per_in,
@@ -2427,6 +2444,12 @@ static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
acquire_args->op_flags |=
CAM_IFE_CTX_CONSUME_ADDR_EN;
if (is_shdr_en) {
acquire_args->op_flags |= CAM_IFE_CTX_SHDR_EN;
if (is_shdr_master)
acquire_args->op_flags |= CAM_IFE_CTX_SHDR_IS_MASTER;
}
cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->used_ctx_list, &tfe_ctx);
CAM_DBG(CAM_ISP, "Exit...(success)");
@@ -5115,6 +5138,9 @@ static int cam_tfe_mgr_prepare_hw_update(void *hw_mgr_priv,
mup_config.num_expoures = prepare_hw_data->num_exp;
mup_config.mup_en = prepare_hw_data->mup_en;
if (ctx->is_shdr_slave)
continue;
/*Add reg update */
rc = cam_isp_add_reg_update(prepare, &ctx->res_list_tfe_in,
ctx->base[i].idx, &prepare_hw_data->kmd_cmd_buff_info, false, &mup_config);

View File

@@ -129,6 +129,7 @@ struct cam_tfe_cdm_user_data {
* @try_recovery_cnt Retry count for overflow recovery
* @current_mup Current MUP val
* @recovery_req_id The request id on which overflow recovery happens
* @is_shdr_slave indicate whether context is slave in shdr usecase
*/
struct cam_tfe_hw_mgr_ctx {
struct list_head list;

View File

@@ -49,6 +49,8 @@
#define CAM_IFE_CTX_SFE_EN BIT(4)
#define CAM_IFE_CTX_AEB_EN BIT(5)
#define CAM_IFE_CTX_DYNAMIC_SWITCH_EN BIT(6)
#define CAM_IFE_CTX_SHDR_EN BIT(7)
#define CAM_IFE_CTX_SHDR_IS_MASTER BIT(8)
/*
* Maximum configuration entry size - This is based on the

View File

@@ -91,6 +91,8 @@ struct cam_isp_tfe_in_port_generic_info {
uint32_t ipp_count;
uint32_t rdi_count;
uint32_t secure_mode;
bool shdr_en;
bool is_shdr_master;
struct cam_isp_tfe_out_port_generic_info *data;
};

View File

@@ -93,6 +93,8 @@ struct cam_tfe_camif_data {
uint32_t qcfa_bin;
uint32_t bayer_bin;
uint32_t core_cfg;
bool shdr_en;
bool is_shdr_master;
};
struct cam_tfe_rdi_data {
@@ -109,6 +111,8 @@ struct cam_tfe_rdi_data {
uint32_t left_last_pixel;
uint32_t first_line;
uint32_t last_line;
bool shdr_en;
bool is_shdr_master;
};
struct cam_tfe_ppp_data {
@@ -2206,6 +2210,8 @@ int cam_tfe_top_reserve(void *device_priv,
acquire_args->in_port->bayer_bin;
camif_data->core_cfg =
acquire_args->in_port->core_cfg;
camif_data->shdr_en = acquire_args->in_port->shdr_en;
camif_data->is_shdr_master = acquire_args->in_port->is_shdr_master;
CAM_DBG(CAM_ISP,
"TFE:%d pix_pattern:%d dsp_mode=%d",
@@ -2247,6 +2253,8 @@ int cam_tfe_top_reserve(void *device_priv,
acquire_args->in_port->line_start;
rdi_data->last_line =
acquire_args->in_port->line_end;
rdi_data->shdr_en = acquire_args->in_port->shdr_en;
rdi_data->is_shdr_master = acquire_args->in_port->is_shdr_master;
}
top_priv->in_rsrc[i].cdm_ops = acquire_args->cdm_ops;
@@ -2367,6 +2375,13 @@ static int cam_tfe_camif_resource_start(
(1 << rsrc_data->reg_data->ds4_c_srl_en_shift);
}
if (rsrc_data->shdr_en) {
val |= rsrc_data->core_cfg &
(1 << rsrc_data->reg_data->shdr_mode_shift);
if (!rsrc_data->is_shdr_master)
val |= rsrc_data->core_cfg &
(1 << rsrc_data->reg_data->extern_mup_shift);
}
cam_io_w_mb(val, rsrc_data->mem_base +
rsrc_data->common_reg->core_cfg_0);

View File

@@ -202,6 +202,9 @@ struct cam_tfe_camif_reg_data {
uint32_t ai_c_srl_en_shift;
uint32_t ds16_c_srl_en_shift;
uint32_t ds4_c_srl_en_shift;
uint32_t shdr_mode_shift;
uint32_t extern_mup_shift;
};
struct cam_tfe_camif_hw_info {

View File

@@ -93,6 +93,8 @@ void cam_req_mgr_core_link_reset(struct cam_req_mgr_core_link *link)
link->is_sending_req = false;
atomic_set(&link->eof_event_cnt, 0);
link->cont_empty_slots = 0;
link->is_shdr = false;
link->wait_for_dual_trigger = false;
__cam_req_mgr_reset_apply_data(link);
__cam_req_mgr_reset_state_monitor_array(link);
@@ -1182,6 +1184,31 @@ static int __cam_req_mgr_move_to_next_req_slot(
return rc;
}
static void cam_req_mgr_reconfigure_link(struct cam_req_mgr_core_link *link,
struct cam_req_mgr_connected_device *device, bool is_active)
{
int i = 0;
struct cam_req_mgr_connected_device *dev = NULL;
struct cam_req_mgr_req_tbl *tbl = NULL;
for (i = 0; i < link->num_devs; i++) {
dev = &link->l_dev[i];
if (dev->dev_info.trigger_on && !dev->dev_info.is_shdr_master) {
dev->is_active = is_active;
tbl = dev->pd_tbl;
if (is_active) {
tbl->dev_mask |= (1 << dev->dev_bit);
} else {
tbl->dev_mask &= ~(1 << dev->dev_bit);
dev->dev_info.mode_switch_req = 0;
}
}
}
}
/**
* __cam_req_mgr_send_req()
*
@@ -1206,6 +1233,7 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
struct cam_req_mgr_tbl_slot *slot = NULL;
struct cam_req_mgr_apply *apply_data = NULL;
struct cam_req_mgr_state_monitor state;
bool prev_dual_trigger_status = false;
apply_req.link_hdl = link->link_hdl;
apply_req.report_if_bubble = 0;
@@ -1252,6 +1280,12 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
continue;
}
if (!dev->is_active) {
CAM_DBG(CAM_CRM, "Device %x linked with link %x is not active",
dev->dev_hdl, link->link_hdl);
continue;
}
for (j = 0; j < slot->ops.num_dev; j++) {
if (dev->dev_hdl == slot->ops.dev_hdl[j]) {
found = true;
@@ -1345,7 +1379,7 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
/* For regular send requests */
for (i = 0; i < link->num_devs; i++) {
dev = &link->l_dev[i];
if (dev) {
if (dev && dev->is_active) {
pd = dev->dev_info.p_delay;
if (pd >= CAM_PIPELINE_DELAY_MAX) {
CAM_WARN(CAM_CRM, "pd %d greater than max",
@@ -1356,6 +1390,11 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
if (!(dev->dev_info.trigger & trigger))
continue;
if (dev->dev_info.trigger_on && !dev->dev_info.is_shdr_master &&
dev->dev_info.mode_switch_req == apply_data[pd].req_id &&
link->wait_for_dual_trigger)
continue;
if (apply_data[pd].skip_idx ||
(apply_data[pd].req_id < 0)) {
CAM_DBG(CAM_CRM,
@@ -1421,6 +1460,7 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
continue;
apply_req.trigger_point = trigger;
apply_req.dual_trigger_status = CAM_REQ_DUAL_TRIGGER_NONE;
CAM_DBG(CAM_REQ,
"SEND: link_hdl %x dev %s pd %d req_id %lld",
link->link_hdl, dev->dev_info.name,
@@ -1440,6 +1480,19 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
state.frame_id = -1;
__cam_req_mgr_update_state_monitor_array(link, &state);
if (link->is_shdr && dev->dev_info.is_shdr_master) {
prev_dual_trigger_status = link->wait_for_dual_trigger;
if (apply_req.dual_trigger_status ==
CAM_REQ_DUAL_TRIGGER_TWO_EXPOSURE)
link->wait_for_dual_trigger = true;
else if (apply_req.dual_trigger_status ==
CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE)
link->wait_for_dual_trigger = false;
if (prev_dual_trigger_status != link->wait_for_dual_trigger)
cam_req_mgr_reconfigure_link(
link, dev, link->wait_for_dual_trigger);
}
if (pd == link->min_delay)
req_applied_to_min_pd = apply_req.request_id;
@@ -3307,6 +3360,38 @@ end:
return rc;
}
static void cam_req_mgr_handle_exposure_change(
struct cam_req_mgr_core_link *link,
struct cam_req_mgr_add_request *add_req)
{
int i = 0;
struct cam_req_mgr_connected_device *device = NULL;
if (add_req->num_exp == CAM_REQ_DUAL_TRIGGER_TWO_EXPOSURE &&
!link->wait_for_dual_trigger) {
for (i = 0; i < link->num_devs; i++) {
device = &link->l_dev[i];
if (!device->dev_info.trigger_on || device->dev_info.is_shdr_master)
continue;
device->is_active = true;
device->dev_info.mode_switch_req = 0;
}
} else if (add_req->num_exp == CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE &&
link->wait_for_dual_trigger) {
for (i = 0; i < link->num_devs; i++) {
device = &link->l_dev[i];
if (!device->dev_info.trigger_on || device->dev_info.is_shdr_master)
continue;
device->dev_info.mode_switch_req = add_req->req_id;
}
}
}
/**
* cam_req_mgr_process_add_req()
*
@@ -3498,8 +3583,30 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
state.frame_id = -1;
__cam_req_mgr_update_state_monitor_array(link, &state);
}
mutex_unlock(&link->req.lock);
if (!link->is_shdr && !device->dev_info.trigger_on) {
mutex_unlock(&link->req.lock);
return rc;
}
if (!device->is_active && !device->dev_info.is_shdr_master &&
!link->wait_for_dual_trigger) {
device->is_active = true;
if (slot->req_ready_map == tbl->dev_mask) {
CAM_DBG(CAM_REQ,
"link 0x%x idx %d req_id %lld pd %d SLOT READY",
link->link_hdl, idx, add_req->req_id, tbl->pd);
slot->state = CRM_REQ_STATE_READY;
}
mutex_unlock(&link->req.lock);
goto end;
}
if (device->dev_info.is_shdr_master)
cam_req_mgr_handle_exposure_change(link, add_req);
mutex_unlock(&link->req.lock);
end:
return rc;
}
@@ -4206,8 +4313,6 @@ end:
return rc;
}
/**
* cam_req_mgr_cb_notify_trigger()
*
@@ -4290,7 +4395,7 @@ static int cam_req_mgr_cb_notify_trigger(
(trigger == CAM_TRIGGER_POINT_SOF))
link->watchdog->pause_timer = false;
if (link->dual_trigger) {
if (link->dual_trigger && link->wait_for_dual_trigger) {
if ((trigger_id >= 0) && (trigger_id <
CAM_REQ_MGR_MAX_TRIGGERS)) {
link->trigger_cnt[trigger_id][trigger]++;
@@ -4364,11 +4469,13 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
{
int rc = 0, i = 0, num_devices = 0;
struct cam_req_mgr_core_dev_link_setup link_data;
struct cam_req_mgr_connected_device *dev;
struct cam_req_mgr_connected_device *dev, *master_dev = NULL, *tmp_dev;
struct cam_req_mgr_req_tbl *pd_tbl;
enum cam_pipeline_delay max_delay;
enum cam_modeswitch_delay max_modeswitch;
uint32_t num_trigger_devices = 0;
int32_t master_dev_idx = -1;
if (link_info->version == VERSION_1) {
if (link_info->u.link_info_v1.num_devices >
CAM_REQ_MGR_MAX_HANDLES)
@@ -4419,6 +4526,11 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
dev->dev_info.dev_hdl = dev->dev_hdl;
rc = dev->ops->get_dev_info(&dev->dev_info);
if (dev->dev_info.is_shdr_master) {
master_dev = dev;
master_dev_idx = i;
}
trace_cam_req_mgr_connect_device(link, &dev->dev_info);
if (link_info->version == VERSION_1)
CAM_DBG(CAM_CRM,
@@ -4467,8 +4579,13 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
max_modeswitch = dev->dev_info.m_delay;
}
if (dev->dev_info.trigger_on)
if (dev->dev_info.trigger_on) {
num_trigger_devices++;
if (dev->dev_info.is_shdr)
link->is_shdr = true;
}
dev->is_active = true;
}
if (num_trigger_devices > CAM_REQ_MGR_MAX_TRIGGERS) {
@@ -4484,8 +4601,26 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
link_data.crm_cb = &cam_req_mgr_ops;
link_data.max_delay = max_delay;
link_data.mode_switch_max_delay = max_modeswitch;
if (num_trigger_devices == CAM_REQ_MGR_MAX_TRIGGERS)
if (num_trigger_devices == CAM_REQ_MGR_MAX_TRIGGERS) {
link->dual_trigger = true;
link->wait_for_dual_trigger = true;
}
if (link->dual_trigger && master_dev) {
for (i = 0; i < num_devices; i++) {
dev = &link->l_dev[i];
if (dev->dev_info.trigger_on) {
if (dev->dev_hdl == master_dev->dev_hdl)
continue;
if (master_dev_idx < i) {
tmp_dev = master_dev;
master_dev = dev;
dev = tmp_dev;
}
}
}
}
num_trigger_devices = 0;
for (i = 0; i < num_devices; i++) {

View File

@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CAM_REQ_MGR_CORE_H_
#define _CAM_REQ_MGR_CORE_H_
@@ -405,6 +406,7 @@ struct cam_req_mgr_req_data {
* @dev_info : holds dev characteristics such as pipeline delay, dev name
* @ops : holds func pointer to call methods on this device
* @parent : pvt data - like link which this dev hdl belongs to
* @is_active : indicate whether device is active in auto shdr usecase
*/
struct cam_req_mgr_connected_device {
int32_t dev_hdl;
@@ -413,6 +415,7 @@ struct cam_req_mgr_connected_device {
struct cam_req_mgr_device_info dev_info;
struct cam_req_mgr_kmd_ops *ops;
void *parent;
bool is_active;
};
/**
@@ -473,6 +476,8 @@ struct cam_req_mgr_connected_device {
* @try_for_internal_recovery : If the link stalls try for RT internal recovery
* @properties_mask : Indicates if current link enables some special properties
* @cont_empty_slots : Continuous empty slots
* @is_shdr : flag to indicate auto shdr usecase without SFE
* @wait_for_dual_trigger: Flag to indicate whether to wait for second epoch in dual trigger
*/
struct cam_req_mgr_core_link {
int32_t link_hdl;
@@ -517,6 +522,8 @@ struct cam_req_mgr_core_link {
bool is_sending_req;
uint32_t properties_mask;
uint32_t cont_empty_slots;
bool is_shdr;
bool wait_for_dual_trigger;
};
/**

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CAM_REQ_MGR_INTERFACE_H
@@ -141,6 +141,14 @@ enum cam_modeswitch_delay {
#define CAM_TRIGGER_POINT_EOF (1 << 1)
#define CAM_TRIGGER_MAX_POINTS 2
enum cam_req_mgr_dual_trigger {
CAM_REQ_DUAL_TRIGGER_NONE,
CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE,
CAM_REQ_DUAL_TRIGGER_TWO_EXPOSURE,
CAM_REQ_DUAL_TRIGGER_MAX,
};
/**
* enum cam_req_status
* @brief : enumerator for request status
@@ -304,6 +312,7 @@ struct cam_req_mgr_error_notify {
* by not sending request to devices. ex: IFE and Flash
* @trigger_eof : to identify that one of the device at this slot needs
* to be apply at EOF
* @num_exp : num of exposure associated with the request
*/
struct cam_req_mgr_add_request {
int32_t link_hdl;
@@ -311,6 +320,7 @@ struct cam_req_mgr_add_request {
uint64_t req_id;
uint32_t skip_at_sof;
uint32_t skip_at_eof;
uint32_t num_exp;
bool trigger_eof;
};
@@ -333,7 +343,10 @@ struct cam_req_mgr_notify_stop {
* @p_delay : delay between time settings applied and take effect
* @m_delay : delay between time modeswitch settings applied and take effect
* @trigger : Trigger point for the client
* @mode_switch_req : Request id on which sensor mode switch observed on the device
* @trigger_on : This device provides trigger
* @is_shdr : Flag to indicate auto shdr usecase without SFE
* @is_shdr_master : Flag to indicate master dev in auto shdr usecase without SFE
*/
struct cam_req_mgr_device_info {
int32_t dev_hdl;
@@ -342,7 +355,10 @@ struct cam_req_mgr_device_info {
enum cam_pipeline_delay p_delay;
enum cam_modeswitch_delay m_delay;
uint32_t trigger;
uint64_t mode_switch_req;
bool trigger_on;
bool is_shdr;
bool is_shdr_master;
};
/**
@@ -370,13 +386,12 @@ struct cam_req_mgr_core_dev_link_setup {
* @link_hdl : link identifier
* @dev_hdl : device handle for cross check
* @request_id : request id settings to apply
* @last_applied_max_pd_req : Last applied request on highest pd device
* -1 is considered invalid
* @last_applied_max_pd_req : Last applied req on highest pd dev -1 is considered invalid
* @report_if_bubble : report to crm if failure in applying
* @trigger_point : the trigger point of this apply
* @re_apply : to skip re_apply for buf_done request
* @recovery : Indicate if it is recovery req
*
* @dual_trigger_status : Enum to indicate status of dual trigger
*/
struct cam_req_mgr_apply_request {
int32_t link_hdl;
@@ -387,6 +402,7 @@ struct cam_req_mgr_apply_request {
uint32_t trigger_point;
bool re_apply;
bool recovery;
enum cam_req_mgr_dual_trigger dual_trigger_status;
};
/**

View File

@@ -96,6 +96,8 @@
/* Feature Flag indicators */
#define CAM_ISP_TFE_FLAG_QCFA_BIN BIT(0)
#define CAM_ISP_TFE_FLAG_BAYER_BIN BIT(1)
#define CAM_ISP_TFE_FLAG_SHDR_MASTER_EN BIT(2)
#define CAM_ISP_TFE_FLAG_SHDR_SLAVE_EN BIT(3)
/* Query devices */
/**