msm: camera: isp: Refactor packet parser APIs

This commit refactors the ISP packet parser APIs to remove
the loops based on resource and split ids. This saves
the renundant iterations.
It also breaks some big functions into small util functions.
It allows the code to be more modular and can be used from multiple places.
Removes lot of duplicate code as well.

CRs-Fixed: 3321317
Change-Id: Ifdfd13387c6e70cae40b0ef4a675bca8121c548d
Signed-off-by: Gaurav Jindal <quic_gjindal@quicinc.com>
This commit is contained in:
Gaurav Jindal
2022-10-21 20:36:58 +05:30
committed by Camera Software Integration
parent e39cae73bd
commit 59e58d589b
4 changed files with 578 additions and 550 deletions

View File

@@ -98,6 +98,19 @@ static int cam_ife_mgr_prog_default_settings(
static int cam_ife_mgr_cmd_get_sof_timestamp(struct cam_ife_hw_mgr_ctx *ife_ctx,
uint64_t *time_stamp, uint64_t *boot_time_stamp, uint64_t *prev_time_stamp);
static void *cam_ife_hw_mgr_get_hw_intf(
struct cam_isp_ctx_base_info *base)
{
if (base->hw_type == CAM_ISP_HW_TYPE_CSID)
return g_ife_hw_mgr.csid_devices[base->idx];
else if (base->hw_type == CAM_ISP_HW_TYPE_SFE)
return g_ife_hw_mgr.sfe_devices[base->idx]->hw_intf;
else if (base->hw_type == CAM_ISP_HW_TYPE_VFE)
return g_ife_hw_mgr.ife_devices[base->idx]->hw_intf;
else
return NULL;
}
static int cam_ife_mgr_update_core_info_to_cpas(struct cam_ife_hw_mgr_ctx *ctx,
bool set_port)
{
@@ -7647,6 +7660,7 @@ static int cam_isp_blob_ubwc_update(
struct cam_kmd_buf_info *kmd_buf_info;
struct cam_ife_hw_mgr_ctx *ctx = NULL;
struct cam_isp_hw_mgr_res *hw_mgr_res;
struct cam_hw_intf *hw_intf = NULL;
uint32_t res_id_out, i;
uint32_t total_used_bytes = 0;
uint32_t kmd_buf_remain_size;
@@ -7714,10 +7728,21 @@ static int cam_isp_blob_ubwc_update(
goto end;
}
hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info);
if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) {
CAM_ERR(CAM_ISP,
"Invalid base %u type %u", blob_info->base_info->idx,
blob_info->base_info->hw_type);
return rc;
}
if (!hw_mgr_res->hw_res[blob_info->base_info->split_id])
break;
rc = cam_isp_add_cmd_buf_update(
hw_mgr_res, blob_type,
hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf,
blob_type,
blob_type_hw_cmd_map[blob_type],
blob_info->base_info->idx,
(void *)cmd_buf_addr,
kmd_buf_remain_size,
(void *)ubwc_plane_cfg,
@@ -7806,6 +7831,7 @@ static int cam_isp_blob_ubwc_update_v2(
struct cam_kmd_buf_info *kmd_buf_info;
struct cam_ife_hw_mgr_ctx *ctx = NULL;
struct cam_isp_hw_mgr_res *hw_mgr_res;
struct cam_hw_intf *hw_intf = NULL;
uint32_t res_id_out, i;
uint32_t total_used_bytes = 0;
uint32_t kmd_buf_remain_size;
@@ -7847,6 +7873,26 @@ static int cam_isp_blob_ubwc_update_v2(
goto end;
}
hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
if (!hw_mgr_res) {
CAM_ERR(CAM_ISP, "Invalid hw_mgr_res");
rc = -EINVAL;
goto end;
}
hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info);
if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) {
CAM_ERR(CAM_ISP,
"Invalid base %u type %u", blob_info->base_info->idx,
blob_info->base_info->hw_type);
return rc;
}
if (!hw_mgr_res->hw_res[blob_info->base_info->split_id])
goto end;
if ((kmd_buf_info->used_bytes
+ total_used_bytes) < kmd_buf_info->size) {
kmd_buf_remain_size = kmd_buf_info->size -
@@ -7864,21 +7910,15 @@ static int cam_isp_blob_ubwc_update_v2(
cmd_buf_addr = kmd_buf_info->cpu_addr +
kmd_buf_info->used_bytes/4 +
total_used_bytes/4;
hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
if (!hw_mgr_res) {
CAM_ERR(CAM_ISP, "Invalid hw_mgr_res");
rc = -EINVAL;
goto end;
}
rc = cam_isp_get_generic_ubwc_data_v2(ubwc_plane_cfg,
ubwc_config->api_version, &generic_ubwc_cfg);
rc = cam_isp_add_cmd_buf_update(
hw_mgr_res, blob_type,
hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf,
blob_type,
blob_type_hw_cmd_map[blob_type],
blob_info->base_info->idx,
(void *)cmd_buf_addr,
kmd_buf_remain_size,
(void *)&generic_ubwc_cfg,
@@ -8336,6 +8376,7 @@ static int cam_isp_blob_sfe_update_fetch_core_cfg(
struct cam_isp_hw_mgr_res *hw_mgr_res;
struct cam_kmd_buf_info *kmd_buf_info;
struct cam_ife_hw_mgr_ctx *ctx = NULL;
struct cam_hw_intf *hw_intf = NULL;
ctx = prepare->ctxt_to_hw_map;
if (prepare->num_hw_update_entries + 1 >=
@@ -8360,6 +8401,18 @@ static int cam_isp_blob_sfe_update_fetch_core_cfg(
return rc;
}
hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info);
if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) {
CAM_ERR(CAM_ISP,
"Invalid base %u type %u", blob_info->base_info->idx,
blob_info->base_info->hw_type);
return -EINVAL;
}
if (!hw_mgr_res->hw_res[blob_info->base_info->split_id])
return 0;
cpu_addr = kmd_buf_info->cpu_addr +
(kmd_buf_info->used_bytes / 4) +
(total_used_bytes / 4);
@@ -8386,11 +8439,12 @@ static int cam_isp_blob_sfe_update_fetch_core_cfg(
ctx->scratch_buf_info.sfe_scratch_config->updated_num_exp);
rc = cam_isp_add_cmd_buf_update(
hw_mgr_res, blob_type,
hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf,
blob_type,
CAM_ISP_HW_CMD_RM_ENABLE_DISABLE,
blob_info->base_info->idx,
(void *)cpu_addr, remain_size,
(void *)&enable, &used_bytes);
if (rc < 0) {
CAM_ERR(CAM_ISP,
"Failed to dynamically %s SFE: %u RM: %u bytes_used: %u rc: %d",
@@ -8422,6 +8476,7 @@ static int cam_isp_blob_hfr_update(
struct cam_kmd_buf_info *kmd_buf_info;
struct cam_ife_hw_mgr_ctx *ctx = NULL;
struct cam_isp_hw_mgr_res *hw_mgr_res;
struct cam_hw_intf *hw_intf = NULL;
uint32_t res_id_out, i;
uint32_t total_used_bytes = 0;
uint32_t kmd_buf_remain_size;
@@ -8456,6 +8511,22 @@ static int cam_isp_blob_hfr_update(
return -EINVAL;
}
if (hw_type == CAM_ISP_HW_TYPE_SFE)
hw_mgr_res = &ctx->res_list_sfe_out[res_id_out];
else
hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info);
if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) {
CAM_ERR(CAM_ISP,
"Invalid base %u type %u", blob_info->base_info->idx,
blob_info->base_info->hw_type);
return rc;
}
if (!hw_mgr_res->hw_res[blob_info->base_info->split_id])
return 0;
if ((kmd_buf_info->used_bytes
+ total_used_bytes) < kmd_buf_info->size) {
kmd_buf_remain_size = kmd_buf_info->size -
@@ -8472,15 +8543,11 @@ static int cam_isp_blob_hfr_update(
cmd_buf_addr = kmd_buf_info->cpu_addr +
kmd_buf_info->used_bytes/4 +
total_used_bytes/4;
if (hw_type == CAM_ISP_HW_TYPE_SFE)
hw_mgr_res = &ctx->res_list_sfe_out[res_id_out];
else
hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
rc = cam_isp_add_cmd_buf_update(
hw_mgr_res, blob_type,
hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf,
blob_type,
CAM_ISP_HW_CMD_GET_HFR_UPDATE,
blob_info->base_info->idx,
(void *)cmd_buf_addr,
kmd_buf_remain_size,
(void *)port_hfr_config,
@@ -8983,6 +9050,7 @@ static int cam_isp_blob_sfe_rd_update(
uint32_t bytes_used = 0;
bool found = false;
struct cam_isp_hw_mgr_res *sfe_rd_res;
struct cam_hw_intf *hw_intf = NULL;
list_for_each_entry(sfe_rd_res, &ctx->res_list_ife_in_rd,
list) {
@@ -9002,10 +9070,21 @@ static int cam_isp_blob_sfe_rd_update(
CAM_DBG(CAM_ISP, "SFE RM config for port: 0x%x",
wm_config->port_type);
hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info);
if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) {
CAM_ERR(CAM_ISP,
"Invalid base %u type %u", blob_info->base_info->idx,
blob_info->base_info->hw_type);
return -EINVAL;
}
if (!sfe_rd_res->hw_res[blob_info->base_info->split_id])
return 0;
rc = cam_isp_add_cmd_buf_update(
sfe_rd_res, blob_type,
sfe_rd_res->hw_res[blob_info->base_info->split_id], hw_intf,
blob_type,
CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD,
blob_info->base_info->idx,
(void *)cmd_buf_addr,
kmd_buf_remain_size,
(void *)wm_config,
@@ -9069,6 +9148,7 @@ static int cam_isp_blob_vfe_out_update(
struct cam_kmd_buf_info *kmd_buf_info;
struct cam_ife_hw_mgr_ctx *ctx = NULL;
struct cam_isp_hw_mgr_res *isp_out_res;
struct cam_hw_intf *hw_intf = NULL;
bool is_sfe_rd = false;
uint32_t res_id_out, i;
uint32_t total_used_bytes = 0;
@@ -9154,10 +9234,21 @@ static int cam_isp_blob_vfe_out_update(
isp_out_res = &ctx->res_list_ife_out[res_id_out];
}
hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info);
if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) {
CAM_ERR(CAM_ISP,
"Invalid base %u type %u", blob_info->base_info->idx,
blob_info->base_info->hw_type);
return rc;
}
if (!isp_out_res->hw_res[blob_info->base_info->split_id])
return 0;
rc = cam_isp_add_cmd_buf_update(
isp_out_res, blob_type,
isp_out_res->hw_res[blob_info->base_info->split_id], hw_intf,
blob_type,
CAM_ISP_HW_CMD_WM_CONFIG_UPDATE,
blob_info->base_info->idx,
(void *)cmd_buf_addr,
kmd_buf_remain_size,
(void *)wm_config,
@@ -9240,6 +9331,7 @@ static int cam_isp_blob_bw_limit_update(
struct cam_kmd_buf_info *kmd_buf_info;
struct cam_ife_hw_mgr_ctx *ctx = NULL;
struct cam_isp_hw_mgr_res *isp_out_res;
struct cam_hw_intf *hw_intf = NULL;
uint32_t res_id_out, i;
uint32_t total_used_bytes = 0;
uint32_t kmd_buf_remain_size;
@@ -9305,10 +9397,21 @@ static int cam_isp_blob_bw_limit_update(
else
isp_out_res = &ctx->res_list_ife_out[res_id_out];
hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info);
if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) {
CAM_ERR(CAM_ISP,
"Invalid base %u type %u", blob_info->base_info->idx,
blob_info->base_info->hw_type);
return rc;
}
if (!isp_out_res->hw_res[blob_info->base_info->split_id])
return 0;
rc = cam_isp_add_cmd_buf_update(
isp_out_res, blob_type,
isp_out_res->hw_res[blob_info->base_info->split_id], hw_intf,
blob_type,
CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG,
blob_info->base_info->idx,
(void *)cmd_buf_addr,
kmd_buf_remain_size,
(void *)wm_bw_limit_cfg,
@@ -9347,6 +9450,7 @@ static int cam_isp_hw_mgr_add_cmd_buf_util(
uint32_t total_used_bytes = 0;
uint32_t kmd_buf_remain_size;
struct cam_kmd_buf_info *kmd_buf_info;
struct cam_hw_intf *hw_intf = NULL;
uint32_t *cmd_buf_addr;
int rc = 0;
@@ -9360,8 +9464,21 @@ static int cam_isp_hw_mgr_add_cmd_buf_util(
}
cmd_buf_addr = kmd_buf_info->cpu_addr + (kmd_buf_info->used_bytes / 4);
rc = cam_isp_add_cmd_buf_update(hw_mgr_res, blob_type,
hw_cmd_type, blob_info->base_info->idx, (void *)cmd_buf_addr,
hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info);
if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) {
CAM_ERR(CAM_ISP,
"Invalid base %u type %u", blob_info->base_info->idx,
blob_info->base_info->hw_type);
return rc;
}
if (!hw_mgr_res->hw_res[blob_info->base_info->split_id])
return 0;
rc = cam_isp_add_cmd_buf_update(
hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf, blob_type,
hw_cmd_type, (void *)cmd_buf_addr,
kmd_buf_remain_size, data, &total_used_bytes);
if (rc) {
CAM_ERR(CAM_ISP, "Add cmd buffer failed idx: %d",
@@ -11687,6 +11804,7 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
struct list_head *res_list_ife_rd_tmp = NULL;
struct cam_isp_cmd_buf_count cmd_buf_count = {0};
struct cam_isp_check_io_cfg_for_scratch check_for_scratch = {0};
struct cam_isp_io_buf_info io_buf_info = {0};
if (!hw_mgr_priv || !prepare_hw_update_args) {
CAM_ERR(CAM_ISP, "Invalid args");
@@ -11779,30 +11897,30 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
}
/* get IO buffers */
if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE)
rc = cam_isp_add_io_buffers(
hw_mgr->mgr_common.img_iommu_hdl,
hw_mgr->mgr_common.img_iommu_hdl_secure,
prepare, ctx->base[i].idx,
&prepare_hw_data->kmd_cmd_buff_info, ctx->res_list_ife_out,
res_list_ife_rd_tmp,
CAM_ISP_IFE_OUT_RES_BASE,
(CAM_ISP_IFE_OUT_RES_BASE + max_ife_out_res),
fill_ife_fence,
CAM_ISP_HW_TYPE_VFE, &frame_header_info,
&check_for_scratch);
else if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE)
rc = cam_isp_add_io_buffers(
hw_mgr->mgr_common.img_iommu_hdl,
hw_mgr->mgr_common.img_iommu_hdl_secure,
prepare, ctx->base[i].idx,
&prepare_hw_data->kmd_cmd_buff_info, ctx->res_list_sfe_out,
&ctx->res_list_ife_in_rd,
CAM_ISP_SFE_OUT_RES_BASE,
(CAM_ISP_SFE_OUT_RES_BASE + max_sfe_out_res),
fill_sfe_fence,
CAM_ISP_HW_TYPE_SFE, &frame_header_info,
&check_for_scratch);
io_buf_info.frame_hdr = &frame_header_info;
io_buf_info.scratch_check_cfg = &check_for_scratch;
io_buf_info.prepare = prepare;
io_buf_info.kmd_buf_info = &prepare_hw_data->kmd_cmd_buff_info;
io_buf_info.iommu_hdl = hw_mgr->mgr_common.img_iommu_hdl;
io_buf_info.sec_iommu_hdl = hw_mgr->mgr_common.img_iommu_hdl_secure;
io_buf_info.base = &ctx->base[i];
if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) {
io_buf_info.fill_fence = fill_ife_fence;
io_buf_info.out_base = CAM_ISP_IFE_OUT_RES_BASE;
io_buf_info.out_max = (CAM_ISP_IFE_OUT_RES_BASE + max_ife_out_res);
io_buf_info.res_list_isp_out = ctx->res_list_ife_out;
io_buf_info.res_list_in_rd = res_list_ife_rd_tmp;
rc = cam_isp_add_io_buffers(&io_buf_info);
} else if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) {
io_buf_info.fill_fence = fill_sfe_fence;
io_buf_info.out_base = CAM_ISP_SFE_OUT_RES_BASE;
io_buf_info.out_max = (CAM_ISP_SFE_OUT_RES_BASE + max_sfe_out_res);
io_buf_info.res_list_in_rd = &ctx->res_list_ife_in_rd;
io_buf_info.res_list_isp_out = ctx->res_list_sfe_out;
rc = cam_isp_add_io_buffers(&io_buf_info);
}
if (rc) {
CAM_ERR(CAM_ISP,
"Failed in io buffers, i=%d, rc=%d hw_type=%s",

View File

@@ -46,6 +46,17 @@ static int cam_tfe_hw_mgr_event_handler(
uint32_t evt_id,
void *evt_info);
static void *cam_tfe_hw_mgr_get_hw_intf(
struct cam_isp_ctx_base_info *base,
struct cam_tfe_hw_mgr_ctx *ctx)
{
struct cam_tfe_hw_mgr *hw_mgr;
hw_mgr = ctx->hw_mgr;
return hw_mgr->tfe_devices[base->idx]->hw_intf;
}
static int cam_tfe_mgr_regspace_data_cb(uint32_t reg_base_type,
void *hw_mgr_ctx, struct cam_hw_soc_info **soc_info_ptr,
uint32_t *reg_base_idx)
@@ -3644,6 +3655,7 @@ static int cam_isp_tfe_blob_hfr_update(
struct cam_kmd_buf_info *kmd_buf_info;
struct cam_tfe_hw_mgr_ctx *ctx = NULL;
struct cam_isp_hw_mgr_res *hw_mgr_res;
struct cam_hw_intf *hw_intf;
uint32_t res_id_out, i;
uint32_t total_used_bytes = 0;
uint32_t kmd_buf_remain_size;
@@ -3695,9 +3707,13 @@ static int cam_isp_tfe_blob_hfr_update(
total_used_bytes/4;
hw_mgr_res = &ctx->res_list_tfe_out[res_id_out];
if (!hw_mgr_res->hw_res[blob_info->base_info->split_id])
return 0;
hw_intf = hw_mgr->tfe_devices[base->idx]->hw_intf;
rc = cam_isp_add_cmd_buf_update(
hw_mgr_res, blob_type, CAM_ISP_HW_CMD_GET_HFR_UPDATE,
blob_info->base_info->idx,
hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf,
blob_type, CAM_ISP_HW_CMD_GET_HFR_UPDATE,
(void *)cmd_buf_addr,
kmd_buf_remain_size,
(void *)port_hfr_config,
@@ -4362,6 +4378,7 @@ static int cam_tfe_mgr_prepare_hw_update(void *hw_mgr_priv,
struct cam_isp_frame_header_info frame_header_info;
struct cam_isp_change_base_args change_base_info = {0};
struct cam_isp_check_io_cfg_for_scratch check_for_scratch = {0};
struct cam_isp_io_buf_info io_buf_info = {0};
if (!hw_mgr_priv || !prepare_hw_update_args) {
CAM_ERR(CAM_ISP, "Invalid args");
@@ -4441,14 +4458,19 @@ static int cam_tfe_mgr_prepare_hw_update(void *hw_mgr_priv,
frame_header_info.frame_header_enable = false;
/* get IO buffers */
rc = cam_isp_add_io_buffers(hw_mgr->mgr_common.img_iommu_hdl,
hw_mgr->mgr_common.img_iommu_hdl_secure,
prepare, ctx->base[i].idx,
&prepare_hw_data->kmd_cmd_buff_info, ctx->res_list_tfe_out,
NULL, CAM_ISP_TFE_OUT_RES_BASE,
CAM_TFE_HW_OUT_RES_MAX, fill_fence,
CAM_ISP_HW_TYPE_TFE,
&frame_header_info, &check_for_scratch);
io_buf_info.frame_hdr = &frame_header_info;
io_buf_info.scratch_check_cfg = &check_for_scratch;
io_buf_info.prepare = prepare;
io_buf_info.kmd_buf_info = &prepare_hw_data->kmd_cmd_buff_info;
io_buf_info.res_list_ife_in_rd = NULL;
io_buf_info.iommu_hdl = hw_mgr->mgr_common.img_iommu_hdl;
io_buf_info.sec_iommu_hdl = hw_mgr->mgr_common.img_iommu_hdl_secure;
io_buf_info.base = &ctx->base[i];
io_buf_info.fill_fence = fill_fence;
io_buf_info.out_base = CAM_ISP_TFE_OUT_RES_BASE;
io_buf_info.out_max = CAM_TFE_HW_OUT_RES_MAX;
io_buf_info.res_list_isp_out = ctx->res_list_ife_out;
rc = cam_isp_add_io_buffers(&io_buf_info);
if (rc) {
CAM_ERR(CAM_ISP,

View File

@@ -191,27 +191,19 @@ end:
}
int cam_isp_add_cmd_buf_update(
struct cam_isp_hw_mgr_res *hw_mgr_res,
struct cam_isp_resource_node *res,
struct cam_hw_intf *hw_intf,
uint32_t cmd_type,
uint32_t hw_cmd_type,
uint32_t base_idx,
uint32_t *cmd_buf_addr,
uint32_t kmd_buf_remain_size,
void *cmd_update_data,
uint32_t *bytes_used)
{
int rc = 0;
struct cam_isp_resource_node *res;
struct cam_isp_hw_get_cmd_update cmd_update;
uint32_t i;
uint32_t total_used_bytes = 0;
if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) {
CAM_ERR(CAM_ISP, "VFE out resource:0x%X type:%d not valid",
hw_mgr_res->res_id, hw_mgr_res->res_type);
return -EINVAL;
}
cmd_update.cmd_type = hw_cmd_type;
cmd_update.cmd.cmd_buf_addr = cmd_buf_addr;
cmd_update.cmd.size = kmd_buf_remain_size;
@@ -222,30 +214,20 @@ int cam_isp_add_cmd_buf_update(
cmd_update.cmd.cmd_buf_addr,
cmd_update.cmd.size);
for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
if (!hw_mgr_res->hw_res[i])
continue;
cmd_update.res = res;
rc = hw_intf->hw_ops.process_cmd(
hw_intf->hw_priv,
cmd_update.cmd_type, &cmd_update,
sizeof(struct cam_isp_hw_get_cmd_update));
if (hw_mgr_res->hw_res[i]->hw_intf->hw_idx != base_idx)
continue;
res = hw_mgr_res->hw_res[i];
cmd_update.res = res;
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
cmd_update.cmd_type, &cmd_update,
sizeof(struct cam_isp_hw_get_cmd_update));
if (rc) {
CAM_ERR(CAM_ISP, "get buf cmd error:%d",
res->res_id);
rc = -ENOMEM;
return rc;
}
total_used_bytes += cmd_update.cmd.used_bytes;
if (rc) {
CAM_ERR(CAM_ISP, "get buf cmd error:%d cmd %d", hw_intf->hw_idx,
cmd_update.cmd_type);
rc = -ENOMEM;
return rc;
}
total_used_bytes += cmd_update.cmd.used_bytes;
*bytes_used = total_used_bytes;
CAM_DBG(CAM_ISP, "total_used_bytes %u", total_used_bytes);
return rc;
@@ -710,491 +692,388 @@ static void cam_isp_validate_for_ife_scratch(
}
}
int cam_isp_add_io_buffers(
int iommu_hdl,
int sec_iommu_hdl,
struct cam_hw_prepare_update_args *prepare,
uint32_t base_idx,
struct cam_kmd_buf_info *kmd_buf_info,
struct cam_isp_hw_mgr_res *res_list_isp_out,
struct list_head *res_list_in_rd,
uint32_t out_base,
uint32_t out_max,
bool fill_fence,
enum cam_isp_hw_type hw_type,
struct cam_isp_frame_header_info *frame_header_info,
struct cam_isp_check_io_cfg_for_scratch *scratch_check_cfg)
static inline void cam_isp_update_hw_entries_util(
enum cam_isp_cdm_bl_type cdm_bl_type,
struct cam_kmd_buf_info *kmd_buf_info,
uint32_t used_bytes,
uint32_t offset,
struct cam_hw_prepare_update_args *prepare)
{
int rc = 0;
dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
struct cam_buf_io_cfg *io_cfg;
struct cam_isp_resource_node *res;
struct cam_isp_hw_mgr_res *hw_mgr_res;
struct cam_isp_hw_mgr_res *hw_mgr_res_temp;
struct cam_isp_hw_get_cmd_update update_buf;
struct cam_isp_hw_get_wm_update wm_update;
struct cam_isp_hw_get_wm_update bus_rd_update;
struct cam_hw_fence_map_entry *out_map_entries = NULL;
struct cam_hw_fence_map_entry *in_map_entries;
struct cam_isp_hw_get_cmd_update secure_mode;
uint32_t kmd_buf_remain_size;
uint32_t i, j, num_out_buf, num_in_buf;
uint32_t res_id_out, res_id_in, plane_id;
uint32_t io_cfg_used_bytes, num_ent;
dma_addr_t *image_buf_addr;
uint32_t *image_buf_offset;
uint64_t iova_addr;
size_t size;
int32_t hdl;
int mmu_hdl;
bool is_buf_secure, found = false;
uint32_t mode;
uint32_t num_ent;
io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
&prepare->packet->payload +
prepare->packet->io_configs_offset);
num_out_buf = prepare->num_out_map_entries;
num_in_buf = prepare->num_in_map_entries;
io_cfg_used_bytes = 0;
num_ent = prepare->num_hw_update_entries;
prepare->hw_update_entries[num_ent].handle = kmd_buf_info->handle;
prepare->hw_update_entries[num_ent].len = used_bytes;
prepare->hw_update_entries[num_ent].offset = offset;
prepare->hw_update_entries[num_ent].flags = cdm_bl_type;
/* Max one hw entries required for each base */
if (prepare->num_hw_update_entries + 1 >=
prepare->max_hw_update_entries) {
CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
prepare->num_hw_update_entries,
prepare->max_hw_update_entries);
return -EINVAL;
}
num_ent++;
prepare->num_hw_update_entries = num_ent;
for (i = 0; i < prepare->packet->num_io_configs; i++) {
CAM_DBG(CAM_ISP, "======= io config idx %d ============", i);
CAM_DBG(CAM_REQ,
"i %d req_id %llu resource_type:%d fence:%d direction %d",
i, prepare->packet->header.request_id,
io_cfg[i].resource_type, io_cfg[i].fence,
io_cfg[i].direction);
CAM_DBG(CAM_ISP, "format: %d", io_cfg[i].format);
CAM_DBG(CAM_ISP, "Handle: 0x%x len: %u offset: %u flags: %u num_ent: %u",
prepare->hw_update_entries[num_ent - 1].handle,
prepare->hw_update_entries[num_ent - 1].len,
prepare->hw_update_entries[num_ent - 1].offset,
prepare->hw_update_entries[num_ent - 1].flags,
num_ent - 1);
}
if (io_cfg[i].direction == CAM_BUF_OUTPUT) {
if (io_cfg[i].resource_type < out_base ||
io_cfg[i].resource_type >= out_max)
continue;
static int cam_isp_io_buf_get_entries_util(
struct cam_isp_io_buf_info *buf_info,
struct cam_buf_io_cfg *io_cfg,
struct cam_isp_hw_mgr_res **hw_mgr_res)
{
uint32_t res_id;
uint32_t num_entries;
struct cam_hw_fence_map_entry *map_entries = NULL;
struct cam_isp_hw_mgr_res *hw_mgr_res_temp;
bool found = false;
res_id_out = io_cfg[i].resource_type & 0xFF;
if ((hw_type == CAM_ISP_HW_TYPE_SFE) &&
(scratch_check_cfg->validate_for_sfe)) {
struct cam_isp_sfe_scratch_buf_res_info *sfe_res_info =
&scratch_check_cfg->sfe_scratch_res_info;
CAM_DBG(CAM_REQ,
"req_id %llu resource_type:%d fence:%d direction %d format %d",
buf_info->prepare->packet->header.request_id,
io_cfg->resource_type, io_cfg->fence,
io_cfg->direction, io_cfg->format);
cam_isp_validate_for_sfe_scratch(sfe_res_info,
io_cfg[i].resource_type, out_base);
}
if (io_cfg->direction == CAM_BUF_OUTPUT) {
res_id = io_cfg->resource_type & 0xFF;
if ((hw_type == CAM_ISP_HW_TYPE_VFE) &&
(scratch_check_cfg->validate_for_ife)) {
struct cam_isp_ife_scratch_buf_res_info *ife_res_info =
&scratch_check_cfg->ife_scratch_res_info;
if (io_cfg->resource_type < buf_info->out_base ||
io_cfg->resource_type >= buf_info->out_max)
return -ENOMSG;
cam_isp_validate_for_ife_scratch(ife_res_info,
io_cfg[i].resource_type);
}
if ((buf_info->base->hw_type == CAM_ISP_HW_TYPE_SFE) &&
(buf_info->scratch_check_cfg->validate_for_sfe)) {
cam_isp_validate_for_sfe_scratch(
&buf_info->scratch_check_cfg->sfe_scratch_res_info,
io_cfg->resource_type, buf_info->out_base);
} else if ((buf_info->base->hw_type == CAM_ISP_HW_TYPE_VFE) &&
(buf_info->scratch_check_cfg->validate_for_ife)) {
cam_isp_validate_for_ife_scratch(
&buf_info->scratch_check_cfg->ife_scratch_res_info,
io_cfg->resource_type);
}
*hw_mgr_res = &buf_info->res_list_isp_out[res_id];
if ((*hw_mgr_res)->res_type == CAM_ISP_RESOURCE_UNINT) {
CAM_ERR(CAM_ISP, "io res id:%d not valid",
io_cfg->resource_type);
return -EINVAL;
}
} else if (io_cfg->direction == CAM_BUF_INPUT) {
found = false;
res_id = io_cfg->resource_type;
if (!buf_info->res_list_in_rd) {
CAM_DBG(CAM_ISP,
"configure output io with fill fence %d",
fill_fence);
out_map_entries =
&prepare->out_map_entries[num_out_buf];
if (fill_fence) {
if (num_out_buf <
prepare->max_out_map_entries) {
out_map_entries->resource_handle =
io_cfg[i].resource_type;
out_map_entries->sync_id =
io_cfg[i].fence;
num_out_buf++;
} else {
CAM_ERR(CAM_ISP, "ln_out:%d max_ln:%d",
num_out_buf,
prepare->max_out_map_entries);
return -EINVAL;
}
}
"No BUS Read supported hw_type %d io_cfg %d req:%d type:%d fence:%d",
buf_info->base->hw_type,
buf_info->prepare->packet->num_io_configs,
buf_info->prepare->packet->header.request_id,
io_cfg->resource_type, io_cfg->fence);
return -ENOMSG;
}
if (buf_info->base->hw_type != CAM_ISP_HW_TYPE_SFE)
res_id = CAM_ISP_HW_VFE_IN_RD;
hw_mgr_res = &res_list_isp_out[res_id_out];
if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) {
CAM_ERR(CAM_ISP, "io res id:%d not valid",
io_cfg[i].resource_type);
return -EINVAL;
list_for_each_entry_safe((*hw_mgr_res), hw_mgr_res_temp,
buf_info->res_list_in_rd, list) {
if ((*hw_mgr_res)->res_id == res_id) {
found = true;
break;
}
} else if (io_cfg[i].direction == CAM_BUF_INPUT) {
res_id_in = io_cfg[i].resource_type;
found = false;
if (!res_list_in_rd) {
CAM_DBG(CAM_ISP,
"No BUS Read supported hw_type %d io_cfg %d %d req:%d type:%d fence:%d",
hw_type,
prepare->packet->num_io_configs, i,
prepare->packet->header.request_id,
io_cfg[i].resource_type, io_cfg[i].fence);
continue;
}
if (hw_type != CAM_ISP_HW_TYPE_SFE)
res_id_in = CAM_ISP_HW_VFE_IN_RD;
}
list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
res_list_in_rd, list) {
if (hw_mgr_res->res_id == res_id_in) {
found = true;
break;
}
}
if (!found) {
CAM_ERR(CAM_ISP, "No Read resource");
return -EINVAL;
}
CAM_DBG(CAM_ISP,
"configure input io with fill fence %d",
fill_fence);
in_map_entries =
&prepare->in_map_entries[num_in_buf];
if (fill_fence) {
if (num_in_buf < prepare->max_in_map_entries) {
in_map_entries->resource_handle =
io_cfg[i].resource_type;
in_map_entries->sync_id =
io_cfg[i].fence;
num_in_buf++;
} else {
CAM_ERR(CAM_ISP, "ln_in:%d imax_ln:%d",
num_in_buf,
prepare->max_in_map_entries);
return -EINVAL;
}
}
} else {
CAM_ERR(CAM_ISP, "Invalid io config direction :%d",
io_cfg[i].direction);
if (!found) {
CAM_ERR(CAM_ISP, "No Read resource");
return -EINVAL;
}
CAM_DBG(CAM_ISP, "setup mem io");
for (j = 0; j < CAM_ISP_HW_SPLIT_MAX &&
io_cfg[i].direction == CAM_BUF_OUTPUT; j++) {
if (!hw_mgr_res->hw_res[j])
continue;
} else {
CAM_ERR(CAM_ISP, "Invalid io config direction :%d",
io_cfg->direction);
return -EINVAL;
}
if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
continue;
if (buf_info->fill_fence) {
if (io_cfg->direction == CAM_BUF_OUTPUT &&
(buf_info->prepare->num_out_map_entries <
buf_info->prepare->max_out_map_entries)) {
num_entries = buf_info->prepare->num_out_map_entries;
map_entries = &buf_info->prepare->out_map_entries[num_entries];
buf_info->prepare->num_out_map_entries++;
} else if (io_cfg->direction == CAM_BUF_INPUT &&
(buf_info->prepare->num_in_map_entries <
buf_info->prepare->max_in_map_entries)) {
num_entries = buf_info->prepare->num_in_map_entries;
map_entries = &buf_info->prepare->in_map_entries[num_entries];
buf_info->prepare->num_in_map_entries++;
res = hw_mgr_res->hw_res[j];
if (res->res_id != io_cfg[i].resource_type) {
CAM_ERR(CAM_ISP,
"wm err res id:%d io res id:%d",
res->res_id, io_cfg[i].resource_type);
return -EINVAL;
}
} else {
CAM_ERR(CAM_ISP, "dir: %d max_ln:%d max_out: %u in: %u out %u",
io_cfg->direction,
buf_info->prepare->max_in_map_entries,
buf_info->prepare->max_out_map_entries,
buf_info->prepare->num_in_map_entries,
buf_info->prepare->num_out_map_entries);
return -EINVAL;
}
map_entries->resource_handle = io_cfg->resource_type;
map_entries->sync_id = io_cfg->fence;
memset(io_addr, 0, sizeof(io_addr));
}
for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
plane_id++) {
if (!io_cfg[i].mem_handle[plane_id])
break;
return 0;
}
hdl = io_cfg[i].mem_handle[plane_id];
secure_mode.cmd_type =
CAM_ISP_HW_CMD_GET_WM_SECURE_MODE;
secure_mode.res = res;
secure_mode.data = (void *)&mode;
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
CAM_ISP_HW_CMD_GET_WM_SECURE_MODE,
&secure_mode,
sizeof(
struct cam_isp_hw_get_cmd_update));
if (rc)
return -EINVAL;
static int cam_isp_add_io_buffers_util(
struct cam_isp_io_buf_info *buf_info,
struct cam_buf_io_cfg *io_cfg,
struct cam_isp_resource_node *res)
{
uint32_t secure_mode_cmd;
uint32_t bus_update_cmd;
int rc = 0;
dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
struct cam_isp_hw_get_cmd_update update_buf;
struct cam_isp_hw_get_wm_update bus_port_update;
struct cam_hw_fence_map_entry *out_map_entry = NULL;
uint32_t kmd_buf_remain_size;
uint32_t plane_id, num_entries;
dma_addr_t *image_buf_addr;
uint32_t *image_buf_offset;
size_t size;
int mmu_hdl;
bool is_buf_secure;
struct cam_isp_hw_get_cmd_update secure_mode;
uint32_t mode = 0;
is_buf_secure = cam_mem_is_secure_buf(hdl);
if ((mode == CAM_SECURE_MODE_SECURE) &&
is_buf_secure) {
mmu_hdl = sec_iommu_hdl;
} else if (
(mode == CAM_SECURE_MODE_NON_SECURE) &&
(!is_buf_secure)) {
mmu_hdl = iommu_hdl;
} else {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"Invalid hdl: port mode[%u], buf mode[%u]",
mode, is_buf_secure);
return -EINVAL;
}
if (io_cfg->direction == CAM_BUF_OUTPUT) {
secure_mode_cmd = CAM_ISP_HW_CMD_GET_WM_SECURE_MODE;
bus_update_cmd = CAM_ISP_HW_CMD_GET_BUF_UPDATE;
} else if (io_cfg->direction == CAM_BUF_INPUT) {
secure_mode_cmd = CAM_ISP_HW_CMD_GET_RM_SECURE_MODE;
bus_update_cmd = CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM;
rc = cam_mem_get_io_buf(
io_cfg[i].mem_handle[plane_id],
mmu_hdl, &io_addr[plane_id], &size, NULL);
if (rc) {
CAM_ERR(CAM_ISP,
"no io addr for plane%d",
plane_id);
rc = -ENOMEM;
return rc;
}
} else {
CAM_ERR(CAM_ISP, "Invalid dir %d", io_cfg->direction);
return -EINVAL;
}
/* need to update with offset */
io_addr[plane_id] +=
io_cfg[i].offsets[plane_id];
CAM_DBG(CAM_ISP,
"get io_addr for plane %d: 0x%llx, mem_hdl=0x%x",
plane_id, io_addr[plane_id],
io_cfg[i].mem_handle[plane_id]);
if (!res) {
CAM_ERR(CAM_ISP, "invalid res, io res id:%d split_id :%d",
io_cfg->resource_type, buf_info->base->split_id);
return -EINVAL;
}
CAM_DBG(CAM_ISP,
"mmu_hdl=0x%x, size=%d, end=0x%x",
mmu_hdl, (int)size,
io_addr[plane_id]+size);
if (res->res_id != io_cfg->resource_type) {
CAM_ERR(CAM_ISP, "err res id:%d io res id:%d",
res->res_id, io_cfg->resource_type);
return -EINVAL;
}
}
if (!plane_id) {
CAM_ERR(CAM_ISP, "No valid planes for res%d",
res->res_id);
rc = -ENOMEM;
return rc;
}
memset(io_addr, 0, sizeof(io_addr));
for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES; plane_id++) {
if (!io_cfg->mem_handle[plane_id])
break;
secure_mode.cmd_type = secure_mode_cmd;
secure_mode.res = res;
secure_mode.data = (void *)&mode;
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv, secure_mode_cmd,
&secure_mode, sizeof(struct cam_isp_hw_get_cmd_update));
if (rc) {
CAM_ERR(CAM_ISP, "Get secure mode failed cmd_type %d res_id %d",
secure_mode_cmd, res->res_id);
return -EINVAL;
}
if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) <
kmd_buf_info->size) {
kmd_buf_remain_size = kmd_buf_info->size -
(kmd_buf_info->used_bytes +
io_cfg_used_bytes);
} else {
CAM_ERR(CAM_ISP,
"no free kmd memory for base %d",
base_idx);
rc = -ENOMEM;
return rc;
}
update_buf.res = res;
update_buf.cmd_type = CAM_ISP_HW_CMD_GET_BUF_UPDATE;
update_buf.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
kmd_buf_info->used_bytes/4 +
io_cfg_used_bytes/4;
wm_update.image_buf = io_addr;
wm_update.num_buf = plane_id;
wm_update.io_cfg = &io_cfg[i];
wm_update.frame_header = 0;
wm_update.fh_enabled = false;
is_buf_secure = cam_mem_is_secure_buf(io_cfg->mem_handle[plane_id]);
for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
plane_id++)
wm_update.image_buf_offset[plane_id] = 0;
if ((mode == CAM_SECURE_MODE_SECURE) &&
is_buf_secure) {
mmu_hdl = buf_info->sec_iommu_hdl;
} else if ((mode == CAM_SECURE_MODE_NON_SECURE) &&
!is_buf_secure) {
mmu_hdl = buf_info->iommu_hdl;
} else {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"Invalid hdl: port mode[%u], buf mode[%u]",
mode, is_buf_secure);
return -EINVAL;
}
iova_addr = frame_header_info->frame_header_iova_addr;
if ((frame_header_info->frame_header_enable) &&
!(frame_header_info->frame_header_res_id)) {
wm_update.frame_header = iova_addr;
wm_update.local_id =
prepare->packet->header.request_id;
}
rc = cam_mem_get_io_buf(io_cfg->mem_handle[plane_id],
mmu_hdl, &io_addr[plane_id], &size, NULL);
if (rc) {
CAM_ERR(CAM_ISP, "no io addr for plane%d", plane_id);
rc = -ENOMEM;
return rc;
}
update_buf.cmd.size = kmd_buf_remain_size;
update_buf.wm_update = &wm_update;
update_buf.use_scratch_cfg = false;
/* need to update with offset */
io_addr[plane_id] += io_cfg->offsets[plane_id];
CAM_DBG(CAM_ISP, "get io_addr for plane %d: 0x%llx, mem_hdl=0x%x",
plane_id, io_addr[plane_id], io_cfg->mem_handle[plane_id]);
CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
update_buf.cmd.cmd_buf_addr,
update_buf.cmd.size);
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
CAM_ISP_HW_CMD_GET_BUF_UPDATE, &update_buf,
sizeof(struct cam_isp_hw_get_cmd_update));
CAM_DBG(CAM_ISP, "mmu_hdl=0x%x, size=%d, end=0x%x",
mmu_hdl, (int)size, io_addr[plane_id]+size);
}
if (rc) {
CAM_ERR(CAM_ISP, "get buf cmd error:%d",
res->res_id);
rc = -ENOMEM;
return rc;
}
if (!plane_id) {
CAM_ERR(CAM_ISP, "No valid planes for res%d", res->res_id);
rc = -ENOMEM;
return rc;
}
if (wm_update.fh_enabled) {
frame_header_info->frame_header_res_id =
res->res_id;
CAM_DBG(CAM_ISP,
"Frame header enabled for res: 0x%x iova: %pK",
frame_header_info->frame_header_res_id,
wm_update.frame_header);
}
if ((buf_info->kmd_buf_info->used_bytes) <
buf_info->kmd_buf_info->size) {
kmd_buf_remain_size = buf_info->kmd_buf_info->size -
(buf_info->kmd_buf_info->used_bytes);
} else {
CAM_ERR(CAM_ISP, "no free kmd memory for base %d", buf_info->base->idx);
rc = -ENOMEM;
return rc;
}
update_buf.res = res;
update_buf.cmd_type = bus_update_cmd;
update_buf.cmd.cmd_buf_addr = buf_info->kmd_buf_info->cpu_addr +
buf_info->kmd_buf_info->used_bytes/4;
bus_port_update.image_buf = io_addr;
bus_port_update.num_buf = plane_id;
bus_port_update.io_cfg = io_cfg;
bus_port_update.frame_header = 0;
bus_port_update.fh_enabled = false;
io_cfg_used_bytes += update_buf.cmd.used_bytes;
for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES; plane_id++)
bus_port_update.image_buf_offset[plane_id] = 0;
if (!out_map_entries) {
CAM_ERR(CAM_ISP, "out_map_entries is NULL");
if ((buf_info->frame_hdr->frame_header_enable) &&
!(buf_info->frame_hdr->frame_header_res_id)) {
bus_port_update.frame_header =
buf_info->frame_hdr->frame_header_iova_addr;
bus_port_update.local_id =
buf_info->prepare->packet->header.request_id;
}
update_buf.cmd.size = kmd_buf_remain_size;
if (io_cfg->direction == CAM_BUF_OUTPUT)
update_buf.wm_update = &bus_port_update;
else if (io_cfg->direction == CAM_BUF_INPUT)
update_buf.rm_update = &bus_port_update;
update_buf.use_scratch_cfg = false;
CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
update_buf.cmd.cmd_buf_addr, update_buf.cmd.size);
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
bus_update_cmd, &update_buf,
sizeof(struct cam_isp_hw_get_cmd_update));
if (rc) {
CAM_ERR(CAM_ISP, "get buf cmd error:%d",
res->res_id);
rc = -ENOMEM;
return rc;
}
if (bus_port_update.fh_enabled) {
buf_info->frame_hdr->frame_header_res_id = res->res_id;
CAM_DBG(CAM_ISP,
"Frame header enabled for res: 0x%x iova: %pK",
buf_info->frame_hdr->frame_header_res_id,
bus_port_update.frame_header);
}
buf_info->kmd_buf_info->used_bytes += update_buf.cmd.used_bytes;
buf_info->kmd_buf_info->offset += update_buf.cmd.used_bytes;
if (io_cfg->direction == CAM_BUF_OUTPUT) {
if (buf_info->fill_fence) {
num_entries = buf_info->prepare->num_out_map_entries - 1;
out_map_entry =
&buf_info->prepare->out_map_entries[num_entries];
if (!out_map_entry) {
CAM_ERR(CAM_ISP, "out_map_entry is NULL");
rc = -EINVAL;
return rc;
}
image_buf_addr = out_map_entries->image_buf_addr;
image_buf_offset = wm_update.image_buf_offset;
if (j == CAM_ISP_HW_SPLIT_LEFT) {
for (plane_id = 0;
plane_id < CAM_PACKET_MAX_PLANES;
image_buf_addr = out_map_entry->image_buf_addr;
image_buf_offset = bus_port_update.image_buf_offset;
if (buf_info->base->split_id == CAM_ISP_HW_SPLIT_LEFT) {
for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
plane_id++)
image_buf_addr[plane_id] =
io_addr[plane_id] +
image_buf_addr[plane_id] = io_addr[plane_id] +
image_buf_offset[plane_id];
}
}
for (j = 0; j < CAM_ISP_HW_SPLIT_MAX &&
io_cfg[i].direction == CAM_BUF_INPUT; j++) {
if (!hw_mgr_res->hw_res[j])
continue;
}
if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
continue;
return rc;
}
res = hw_mgr_res->hw_res[j];
int cam_isp_add_io_buffers(struct cam_isp_io_buf_info *io_info)
{
int rc = 0;
struct cam_buf_io_cfg *io_cfg = NULL;
struct cam_isp_hw_mgr_res *hw_mgr_res = NULL;
uint32_t i;
uint32_t curr_used_bytes = 0;
uint32_t bytes_updated = 0;
uint32_t curr_offset = 0;
struct cam_isp_resource_node *res = NULL;
memset(io_addr, 0, sizeof(io_addr));
io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
&io_info->prepare->packet->payload +
io_info->prepare->packet->io_configs_offset);
curr_used_bytes = io_info->kmd_buf_info->used_bytes;
curr_offset = io_info->kmd_buf_info->offset;
for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
plane_id++) {
if (!io_cfg[i].mem_handle[plane_id])
break;
/* Max one hw entries required for each base */
if (io_info->prepare->num_hw_update_entries + 1 >=
io_info->prepare->max_hw_update_entries) {
CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
io_info->prepare->num_hw_update_entries,
io_info->prepare->max_hw_update_entries);
return -EINVAL;
}
hdl = io_cfg[i].mem_handle[plane_id];
secure_mode.cmd_type =
CAM_ISP_HW_CMD_GET_RM_SECURE_MODE;
secure_mode.res = res;
secure_mode.data = (void *)&mode;
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
CAM_ISP_HW_CMD_GET_RM_SECURE_MODE,
&secure_mode,
sizeof(
struct cam_isp_hw_get_cmd_update));
if (rc)
return -EINVAL;
for (i = 0; i < io_info->prepare->packet->num_io_configs; i++) {
is_buf_secure = cam_mem_is_secure_buf(hdl);
if ((mode == CAM_SECURE_MODE_SECURE) &&
is_buf_secure) {
mmu_hdl = sec_iommu_hdl;
} else if (
(mode == CAM_SECURE_MODE_NON_SECURE) &&
(!is_buf_secure)) {
mmu_hdl = iommu_hdl;
} else {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"Invalid hdl: port mode[%u], buf mode[%u]",
mode, is_buf_secure);
return -EINVAL;
}
rc = cam_isp_io_buf_get_entries_util(io_info, &io_cfg[i], &hw_mgr_res);
rc = cam_mem_get_io_buf(
io_cfg[i].mem_handle[plane_id],
mmu_hdl, &io_addr[plane_id], &size, NULL);
if (rc) {
CAM_ERR(CAM_ISP,
"no io addr for plane%d",
plane_id);
rc = -ENOMEM;
return rc;
}
if (rc == -ENOMSG) {
rc = 0;
continue;
} else if (rc) {
CAM_ERR(CAM_ISP, "io_cfg[%d] failed rc %d", i, rc);
return rc;
}
/* need to update with offset */
io_addr[plane_id] +=
io_cfg[i].offsets[plane_id];
CAM_DBG(CAM_ISP,
"get io_addr for plane %d: 0x%llx, mem_hdl=0x%x",
plane_id, io_addr[plane_id],
io_cfg[i].mem_handle[plane_id]);
if (!hw_mgr_res) {
CAM_ERR(CAM_ISP, "hw_mgr_res is NULL i:%d", i);
return -EINVAL;
}
CAM_DBG(CAM_ISP,
"mmu_hdl=0x%x, size=%d, end=0x%x",
mmu_hdl, (int)size,
io_addr[plane_id]+size);
res = hw_mgr_res->hw_res[io_info->base->split_id];
if (!res)
continue;
rc = cam_isp_add_io_buffers_util(io_info, &io_cfg[i], res);
}
if (!plane_id) {
CAM_ERR(CAM_ISP, "No valid planes for res%d",
res->res_id);
rc = -ENOMEM;
return rc;
}
if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) <
kmd_buf_info->size) {
kmd_buf_remain_size = kmd_buf_info->size -
(kmd_buf_info->used_bytes +
io_cfg_used_bytes);
} else {
CAM_ERR(CAM_ISP,
"no free kmd memory for base %d",
base_idx);
rc = -ENOMEM;
return rc;
}
update_buf.res = res;
update_buf.cmd_type = CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM;
update_buf.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
kmd_buf_info->used_bytes/4 +
io_cfg_used_bytes/4;
bus_rd_update.image_buf = io_addr;
bus_rd_update.num_buf = plane_id;
bus_rd_update.io_cfg = &io_cfg[i];
update_buf.cmd.size = kmd_buf_remain_size;
update_buf.use_scratch_cfg = false;
update_buf.rm_update = &bus_rd_update;
CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
update_buf.cmd.cmd_buf_addr,
update_buf.cmd.size);
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM, &update_buf,
sizeof(struct cam_isp_hw_get_cmd_update));
if (rc) {
CAM_ERR(CAM_ISP, "get buf cmd error:%d",
res->res_id);
rc = -ENOMEM;
return rc;
}
io_cfg_used_bytes += update_buf.cmd.used_bytes;
if (rc) {
CAM_ERR(CAM_ISP, "io_cfg[%d] failed rc %d", i, rc);
return rc;
}
}
bytes_updated = io_info->kmd_buf_info->used_bytes - curr_used_bytes;
CAM_DBG(CAM_ISP, "io_cfg_used_bytes %d, fill_fence %d",
io_cfg_used_bytes, fill_fence);
if (io_cfg_used_bytes) {
/* Update the HW entries */
num_ent = prepare->num_hw_update_entries;
prepare->hw_update_entries[num_ent].handle =
kmd_buf_info->handle;
prepare->hw_update_entries[num_ent].len =
io_cfg_used_bytes;
prepare->hw_update_entries[num_ent].offset =
kmd_buf_info->offset;
prepare->hw_update_entries[num_ent].flags = CAM_ISP_IOCFG_BL;
CAM_DBG(CAM_ISP,
"num_ent=%d handle=0x%x, len=%u, offset=%u",
num_ent,
prepare->hw_update_entries[num_ent].handle,
prepare->hw_update_entries[num_ent].len,
prepare->hw_update_entries[num_ent].offset);
num_ent++;
bytes_updated, io_info->fill_fence);
kmd_buf_info->used_bytes += io_cfg_used_bytes;
kmd_buf_info->offset += io_cfg_used_bytes;
prepare->num_hw_update_entries = num_ent;
}
if (fill_fence) {
prepare->num_out_map_entries = num_out_buf;
prepare->num_in_map_entries = num_in_buf;
}
if (bytes_updated)
cam_isp_update_hw_entries_util(CAM_ISP_IOCFG_BL, io_info->kmd_buf_info,
bytes_updated, curr_offset, io_info->prepare);
return rc;
}

View File

@@ -115,6 +115,41 @@ struct cam_isp_cmd_buf_count {
uint32_t sfe_cnt;
};
/*
* struct cam_isp_io_buf_info
*
* @frame_hdr: Frame header related params
* @scratch_check_cfg: Validate info for IFE/SFE scratch buffers
* @prepare: Contain the packet and HW update variables
* @kmd_buf_info: Kmd buffer to store the change base command
* @res_list_isp_out: IFE/SFE out resource list
* @res_list_ife_in_rd: IFE/SFE in rd resource list
* @base: Base info for IFE/SFE
* @iommu_hdl: Iommu handle to get the IO buf from memory manager
* @sec_iommu_hdl: Secure iommu handle to get the IO buf from
* memory manager
* @out_base: Base value of ISP resource (IFE/SFE)
* @out_max: Max of supported ISP resources(IFE/SFE)
* @fill_fence: If true, Fence map table will be filled
* @return: 0 for success
* -EINVAL for Fail
*/
struct cam_isp_io_buf_info {
struct cam_isp_frame_header_info *frame_hdr;
struct cam_isp_check_io_cfg_for_scratch *scratch_check_cfg;
struct cam_hw_prepare_update_args *prepare;
struct cam_kmd_buf_info *kmd_buf_info;
struct cam_isp_hw_mgr_res *res_list_isp_out;
struct list_head *res_list_in_rd;
struct cam_isp_ctx_base_info *base;
int iommu_hdl;
int sec_iommu_hdl;
uint32_t out_base;
uint32_t out_max;
bool fill_fence;
};
/*
* cam_isp_add_change_base()
*
@@ -141,7 +176,7 @@ int cam_isp_add_change_base(
* @brief Add command buffer in the HW entries list for given
* Blob Data.
*
* @hw_mgr_res: HW resource to get the update from
* @res: ISP HW resource to get the update from
* @cmd_type: Cmd type to get update for
* @hw_cmd_type: HW Cmd type corresponding to cmd_type
* @base_idx: Base hardware index
@@ -156,10 +191,10 @@ int cam_isp_add_change_base(
* otherwise returns bytes used
*/
int cam_isp_add_cmd_buf_update(
struct cam_isp_hw_mgr_res *hw_mgr_res,
struct cam_isp_resource_node *res,
struct cam_hw_intf *hw_intf,
uint32_t cmd_type,
uint32_t hw_cmd_type,
uint32_t base_idx,
uint32_t *cmd_buf_addr,
uint32_t kmd_buf_remain_size,
void *cmd_update_data,
@@ -224,37 +259,11 @@ int cam_isp_add_command_buffers(
* processe the io configurations based on the base
* index and update the HW entries list
*
* @iommu_hdl: Iommu handle to get the IO buf from memory manager
* @sec_iommu_hdl: Secure iommu handle to get the IO buf from
* memory manager
* @prepare: Contain the packet and HW update variables
* @base_idx: Base or dev index of the IFE/VFE HW instance
* @kmd_buf_info: Kmd buffer to store the change base command
* @res_list_isp_out: IFE/SFE out resource list
* @res_list_ife_in_rd: IFE/SFE in rd resource list
* @out_base: Base value of ISP resource (IFE/SFE)
* @out_max: Max of supported ISP resources(IFE/SFE)
* @fill_fence: If true, Fence map table will be filled
* @hw_type: HW type for this ctx base (IFE/SFE)
* @frame_header_info: Frame header related params
* @scratch_check_cfg: Validate info for IFE/SFE scratch buffers
* @io_info: Io buffer information
* @return: 0 for success
* -EINVAL for Fail
*/
int cam_isp_add_io_buffers(
int iommu_hdl,
int sec_iommu_hdl,
struct cam_hw_prepare_update_args *prepare,
uint32_t base_idx,
struct cam_kmd_buf_info *kmd_buf_info,
struct cam_isp_hw_mgr_res *res_list_isp_out,
struct list_head *res_list_ife_in_rd,
uint32_t out_base,
uint32_t out_max,
bool fill_fence,
enum cam_isp_hw_type hw_type,
struct cam_isp_frame_header_info *frame_header_info,
struct cam_isp_check_io_cfg_for_scratch *scratch_check_cfg);
int cam_isp_add_io_buffers(struct cam_isp_io_buf_info *io_info);
/*
* cam_isp_add_reg_update()