|
@@ -4933,10 +4933,20 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
|
|
|
ife_ctx->sfe_info.scratch_config =
|
|
|
kzalloc(sizeof(struct cam_sfe_scratch_buf_cfg), GFP_KERNEL);
|
|
|
if (!ife_ctx->sfe_info.scratch_config) {
|
|
|
- CAM_ERR(CAM_ISP, "Failed to allocate scratch config");
|
|
|
+ CAM_ERR(CAM_ISP, "Failed to allocate SFE scratch config");
|
|
|
rc = -ENOMEM;
|
|
|
goto free_cdm_cmd;
|
|
|
}
|
|
|
+
|
|
|
+ ife_ctx->sfe_info.ife_scratch_config =
|
|
|
+ kzalloc(sizeof(struct cam_ife_scratch_buf_cfg), GFP_KERNEL);
|
|
|
+ if (!ife_ctx->sfe_info.ife_scratch_config) {
|
|
|
+ CAM_ERR(CAM_ISP, "Failed to allocate IFE scratch config");
|
|
|
+ rc = -ENOMEM;
|
|
|
+ kfree(ife_ctx->sfe_info.scratch_config);
|
|
|
+ goto free_cdm_cmd;
|
|
|
+ }
|
|
|
+
|
|
|
/* Set scratch by default at stream on */
|
|
|
ife_ctx->sfe_info.skip_scratch_cfg_streamon = false;
|
|
|
}
|
|
@@ -6236,6 +6246,10 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
|
|
|
if (ctx->sfe_info.scratch_config)
|
|
|
memset(ctx->sfe_info.scratch_config, 0,
|
|
|
sizeof(struct cam_sfe_scratch_buf_cfg));
|
|
|
+
|
|
|
+ if (ctx->sfe_info.ife_scratch_config)
|
|
|
+ memset(ctx->sfe_info.ife_scratch_config, 0,
|
|
|
+ sizeof(struct cam_ife_scratch_buf_cfg));
|
|
|
}
|
|
|
ctx->sfe_info.skip_scratch_cfg_streamon = false;
|
|
|
|
|
@@ -6890,7 +6904,9 @@ static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
|
|
|
ctx->num_acq_sfe_out = 0;
|
|
|
|
|
|
kfree(ctx->sfe_info.scratch_config);
|
|
|
+ kfree(ctx->sfe_info.ife_scratch_config);
|
|
|
ctx->sfe_info.scratch_config = NULL;
|
|
|
+ ctx->sfe_info.ife_scratch_config = NULL;
|
|
|
|
|
|
memset(&ctx->flags, 0, sizeof(struct cam_ife_hw_mgr_ctx_flags));
|
|
|
atomic_set(&ctx->overflow_pending, 0);
|
|
@@ -7252,32 +7268,125 @@ end:
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int cam_isp_scratch_buf_update_util(
|
|
|
+ struct cam_isp_sfe_scratch_buf_info *buffer_info,
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *port_info)
|
|
|
+{
|
|
|
+ int rc = 0;
|
|
|
+ int mmu_hdl;
|
|
|
+ size_t size;
|
|
|
+ dma_addr_t io_addr;
|
|
|
+ bool is_buf_secure;
|
|
|
+
|
|
|
+ is_buf_secure = cam_mem_is_secure_buf(buffer_info->mem_handle);
|
|
|
+ if (is_buf_secure) {
|
|
|
+ port_info->is_secure = true;
|
|
|
+ mmu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure;
|
|
|
+ } else {
|
|
|
+ port_info->is_secure = false;
|
|
|
+ mmu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl;
|
|
|
+ }
|
|
|
+
|
|
|
+ rc = cam_mem_get_io_buf(buffer_info->mem_handle,
|
|
|
+ mmu_hdl, &io_addr, &size, NULL);
|
|
|
+ if (rc) {
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
+ "no scratch buf addr for res: 0x%x",
|
|
|
+ buffer_info->resource_type);
|
|
|
+ rc = -ENOMEM;
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+
|
|
|
+ port_info->res_id = buffer_info->resource_type;
|
|
|
+ port_info->io_addr = io_addr + buffer_info->offset;
|
|
|
+ port_info->width = buffer_info->width;
|
|
|
+ port_info->height = buffer_info->height;
|
|
|
+ port_info->stride = buffer_info->stride;
|
|
|
+ port_info->slice_height = buffer_info->slice_height;
|
|
|
+ port_info->offset = 0;
|
|
|
+ port_info->config_done = true;
|
|
|
+
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
+ "res_id: 0x%x w: 0x%x h: 0x%x s: 0x%x sh: 0x%x addr: 0x%x",
|
|
|
+ port_info->res_id, port_info->width,
|
|
|
+ port_info->height, port_info->stride,
|
|
|
+ port_info->slice_height, port_info->io_addr);
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+static int cam_isp_blob_ife_scratch_buf_update(
|
|
|
+ struct cam_isp_sfe_init_scratch_buf_config *scratch_config,
|
|
|
+ struct cam_hw_prepare_update_args *prepare)
|
|
|
+{
|
|
|
+ int rc = 0, i;
|
|
|
+ uint32_t res_id_out;
|
|
|
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
|
|
|
+ struct cam_isp_sfe_scratch_buf_info *buffer_info;
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *port_info;
|
|
|
+ struct cam_isp_hw_mgr_res *ife_out_res;
|
|
|
+ struct cam_ife_hw_mgr *ife_hw_mgr;
|
|
|
+ struct cam_ife_scratch_buf_cfg *ife_scratch_config;
|
|
|
+
|
|
|
+ ctx = prepare->ctxt_to_hw_map;
|
|
|
+ ife_hw_mgr = ctx->hw_mgr;
|
|
|
+ ife_scratch_config = ctx->sfe_info.ife_scratch_config;
|
|
|
+
|
|
|
+ for (i = 0; i < scratch_config->num_ports; i++) {
|
|
|
+ buffer_info = &scratch_config->port_scratch_cfg[i];
|
|
|
+ if (!cam_ife_hw_mgr_is_ife_out_port(buffer_info->resource_type))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ res_id_out = buffer_info->resource_type & 0xFF;
|
|
|
+
|
|
|
+ CAM_DBG(CAM_ISP, "scratch config idx: %d res: 0x%x",
|
|
|
+ i, buffer_info->resource_type);
|
|
|
+
|
|
|
+ ife_out_res = &ctx->res_list_ife_out[res_id_out];
|
|
|
+ if (!ife_out_res->hw_res[0]) {
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
+ "IFE rsrc_type: 0x%x not acquired, failing scratch config",
|
|
|
+ buffer_info->resource_type);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ife_scratch_config->num_config >= CAM_IFE_SCRATCH_NUM_MAX) {
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
+ "Incoming num of scratch buffers: %u exceeds max: %u",
|
|
|
+ ife_scratch_config->num_config, CAM_IFE_SCRATCH_NUM_MAX);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ port_info = &ife_scratch_config->buf_info[ife_scratch_config->num_config++];
|
|
|
+ rc = cam_isp_scratch_buf_update_util(buffer_info, port_info);
|
|
|
+ if (rc)
|
|
|
+ goto end;
|
|
|
+ }
|
|
|
+
|
|
|
+end:
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
static int cam_isp_blob_sfe_scratch_buf_update(
|
|
|
struct cam_isp_sfe_init_scratch_buf_config *scratch_config,
|
|
|
struct cam_hw_prepare_update_args *prepare)
|
|
|
{
|
|
|
- int rc, i, mmu_hdl;
|
|
|
+ int rc = 0, i;
|
|
|
uint32_t res_id_out;
|
|
|
- bool is_buf_secure;
|
|
|
- dma_addr_t io_addr;
|
|
|
- size_t size;
|
|
|
struct cam_ife_hw_mgr_ctx *ctx = NULL;
|
|
|
struct cam_isp_sfe_scratch_buf_info *buffer_info;
|
|
|
- struct cam_sfe_scratch_buf_info *port_info;
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *port_info;
|
|
|
struct cam_isp_hw_mgr_res *sfe_out_res;
|
|
|
struct cam_ife_hw_mgr *ife_hw_mgr;
|
|
|
|
|
|
ctx = prepare->ctxt_to_hw_map;
|
|
|
ife_hw_mgr = ctx->hw_mgr;
|
|
|
- if (scratch_config->num_ports != ctx->sfe_info.num_fetches)
|
|
|
- CAM_WARN(CAM_ISP,
|
|
|
- "Getting scratch buffer for %u ports on ctx: %u with num_fetches: %u",
|
|
|
- scratch_config->num_ports, ctx->ctx_index, ctx->sfe_info.num_fetches);
|
|
|
-
|
|
|
- CAM_DBG(CAM_ISP, "num_ports: %u", scratch_config->num_ports);
|
|
|
|
|
|
for (i = 0; i < scratch_config->num_ports; i++) {
|
|
|
buffer_info = &scratch_config->port_scratch_cfg[i];
|
|
|
+ if (!cam_ife_hw_mgr_is_sfe_out_port(buffer_info->resource_type))
|
|
|
+ continue;
|
|
|
+
|
|
|
res_id_out = buffer_info->resource_type & 0xFF;
|
|
|
|
|
|
CAM_DBG(CAM_ISP, "scratch config idx: %d res: 0x%x",
|
|
@@ -7298,42 +7407,22 @@ static int cam_isp_blob_sfe_scratch_buf_update(
|
|
|
}
|
|
|
|
|
|
port_info = &ctx->sfe_info.scratch_config->buf_info[res_id_out];
|
|
|
- is_buf_secure = cam_mem_is_secure_buf(buffer_info->mem_handle);
|
|
|
- if (is_buf_secure) {
|
|
|
- port_info->is_secure = true;
|
|
|
- mmu_hdl = ife_hw_mgr->mgr_common.img_iommu_hdl_secure;
|
|
|
- } else {
|
|
|
- port_info->is_secure = false;
|
|
|
- mmu_hdl = ife_hw_mgr->mgr_common.img_iommu_hdl;
|
|
|
- }
|
|
|
+ rc = cam_isp_scratch_buf_update_util(buffer_info, port_info);
|
|
|
+ if (rc)
|
|
|
+ goto end;
|
|
|
|
|
|
- rc = cam_mem_get_io_buf(buffer_info->mem_handle,
|
|
|
- mmu_hdl, &io_addr, &size, NULL);
|
|
|
- if (rc) {
|
|
|
- CAM_ERR(CAM_ISP,
|
|
|
- "no scratch buf addr for res: 0x%x",
|
|
|
- buffer_info->resource_type);
|
|
|
- rc = -ENOMEM;
|
|
|
- return rc;
|
|
|
- }
|
|
|
+ ctx->sfe_info.scratch_config->num_config++;
|
|
|
+ }
|
|
|
|
|
|
- port_info->res_id = buffer_info->resource_type;
|
|
|
- port_info->io_addr = io_addr + buffer_info->offset;
|
|
|
- port_info->width = buffer_info->width;
|
|
|
- port_info->height = buffer_info->height;
|
|
|
- port_info->stride = buffer_info->stride;
|
|
|
- port_info->slice_height = buffer_info->slice_height;
|
|
|
- port_info->offset = 0;
|
|
|
- port_info->config_done = true;
|
|
|
- CAM_DBG(CAM_ISP,
|
|
|
- "res_id: 0x%x w: 0x%x h: 0x%x s: 0x%x sh: 0x%x addr: 0x%x",
|
|
|
- port_info->res_id, port_info->width,
|
|
|
- port_info->height, port_info->stride,
|
|
|
- port_info->slice_height, port_info->io_addr);
|
|
|
+ if (ctx->sfe_info.scratch_config->num_config != ctx->sfe_info.num_fetches) {
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
+ "Mismatch in number of scratch buffers provided: %u expected: %u",
|
|
|
+ ctx->sfe_info.scratch_config->num_config, ctx->sfe_info.num_fetches);
|
|
|
+ rc = -EINVAL;
|
|
|
}
|
|
|
|
|
|
- ctx->sfe_info.scratch_config->num_config = scratch_config->num_ports;
|
|
|
- return 0;
|
|
|
+end:
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
static inline int __cam_isp_sfe_send_cache_config(
|
|
@@ -8231,7 +8320,7 @@ static int cam_ife_hw_mgr_update_scratch_offset(
|
|
|
struct cam_isp_vfe_wm_config *wm_config)
|
|
|
{
|
|
|
uint32_t res_id;
|
|
|
- struct cam_sfe_scratch_buf_info *port_info;
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *port_info;
|
|
|
|
|
|
if ((wm_config->port_type - CAM_ISP_SFE_OUT_RES_RDI_0) >=
|
|
|
ctx->sfe_info.num_fetches)
|
|
@@ -8627,6 +8716,63 @@ static int cam_isp_blob_ife_init_config_update(
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int cam_isp_validate_scratch_buffer_blob(
|
|
|
+ uint32_t blob_size,
|
|
|
+ struct cam_ife_hw_mgr_ctx *ife_mgr_ctx,
|
|
|
+ struct cam_isp_sfe_init_scratch_buf_config *scratch_config)
|
|
|
+{
|
|
|
+ if (!(ife_mgr_ctx->flags.is_sfe_fs ||
|
|
|
+ ife_mgr_ctx->flags.is_sfe_shdr)) {
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
+ "Not SFE sHDR/FS context: %u scratch buf blob not supported",
|
|
|
+ ife_mgr_ctx->ctx_index);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (blob_size <
|
|
|
+ sizeof(struct cam_isp_sfe_init_scratch_buf_config)) {
|
|
|
+ CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((scratch_config->num_ports >
|
|
|
+ (CAM_SFE_FE_RDI_NUM_MAX + CAM_IFE_SCRATCH_NUM_MAX)) ||
|
|
|
+ (scratch_config->num_ports == 0)) {
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
+ "Invalid num_ports %u in scratch buf config",
|
|
|
+ scratch_config->num_ports);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Check for integer overflow */
|
|
|
+ if (scratch_config->num_ports != 1) {
|
|
|
+ if (sizeof(struct cam_isp_sfe_scratch_buf_info) >
|
|
|
+ ((UINT_MAX -
|
|
|
+ sizeof(struct cam_isp_sfe_init_scratch_buf_config)) /
|
|
|
+ (scratch_config->num_ports - 1))) {
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
+ "Max size exceeded in scratch config num_ports: %u size per port: %lu",
|
|
|
+ scratch_config->num_ports,
|
|
|
+ sizeof(struct cam_isp_sfe_scratch_buf_info));
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (blob_size <
|
|
|
+ (sizeof(struct cam_isp_sfe_init_scratch_buf_config) +
|
|
|
+ (scratch_config->num_ports - 1) *
|
|
|
+ sizeof(struct cam_isp_sfe_scratch_buf_info))) {
|
|
|
+ CAM_ERR(CAM_ISP, "Invalid blob size: %u expected: %lu",
|
|
|
+ blob_size,
|
|
|
+ sizeof(struct cam_isp_sfe_init_scratch_buf_config) +
|
|
|
+ (scratch_config->num_ports - 1) *
|
|
|
+ sizeof(struct cam_isp_sfe_scratch_buf_info));
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int cam_isp_packet_generic_blob_handler(void *user_data,
|
|
|
uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
|
|
|
{
|
|
@@ -9136,6 +9282,23 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
|
|
|
if (rc)
|
|
|
CAM_ERR(CAM_ISP, "Discard initial frames update failed rc: %d", rc);
|
|
|
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG: {
|
|
|
+ struct cam_isp_sfe_init_scratch_buf_config *scratch_config;
|
|
|
+
|
|
|
+ scratch_config =
|
|
|
+ (struct cam_isp_sfe_init_scratch_buf_config *)blob_data;
|
|
|
+
|
|
|
+ rc = cam_isp_validate_scratch_buffer_blob(blob_size,
|
|
|
+ ife_mgr_ctx, scratch_config);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ rc = cam_isp_blob_ife_scratch_buf_update(
|
|
|
+ scratch_config, prepare);
|
|
|
+ if (rc)
|
|
|
+ CAM_ERR(CAM_ISP, "IFE scratch buffer update failed rc: %d", rc);
|
|
|
}
|
|
|
break;
|
|
|
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CLOCK_CONFIG:
|
|
@@ -9143,7 +9306,6 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
|
|
|
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_OUT_CONFIG:
|
|
|
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_HFR_CONFIG:
|
|
|
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG:
|
|
|
- case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG:
|
|
|
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_EXP_ORDER_CFG:
|
|
|
case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG:
|
|
|
break;
|
|
@@ -9481,65 +9643,18 @@ static int cam_sfe_packet_generic_blob_handler(void *user_data,
|
|
|
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG: {
|
|
|
struct cam_isp_sfe_init_scratch_buf_config *scratch_config;
|
|
|
|
|
|
- if (!(ife_mgr_ctx->flags.is_sfe_fs ||
|
|
|
- ife_mgr_ctx->flags.is_sfe_shdr)) {
|
|
|
- CAM_ERR(CAM_ISP,
|
|
|
- "Not SFE sHDR/FS context: %u scratch buf blob not supported",
|
|
|
- ife_mgr_ctx->ctx_index);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
- if (blob_size <
|
|
|
- sizeof(struct cam_isp_sfe_init_scratch_buf_config)) {
|
|
|
- CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
scratch_config =
|
|
|
(struct cam_isp_sfe_init_scratch_buf_config *)blob_data;
|
|
|
|
|
|
- if (scratch_config->num_ports > CAM_SFE_FE_RDI_NUM_MAX ||
|
|
|
- scratch_config->num_ports == 0) {
|
|
|
- CAM_ERR(CAM_ISP,
|
|
|
- "Invalid num_ports %u in scratch buf config",
|
|
|
- scratch_config->num_ports);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
- /* Check for integer overflow */
|
|
|
- if (scratch_config->num_ports != 1) {
|
|
|
- if (sizeof(struct cam_isp_sfe_scratch_buf_info) >
|
|
|
- ((UINT_MAX -
|
|
|
- sizeof(
|
|
|
- struct cam_isp_sfe_init_scratch_buf_config)) /
|
|
|
- (scratch_config->num_ports - 1))) {
|
|
|
- CAM_ERR(CAM_ISP,
|
|
|
- "Max size exceeded in scratch config num_ports: %u size per port: %lu",
|
|
|
- scratch_config->num_ports,
|
|
|
- sizeof(
|
|
|
- struct cam_isp_sfe_scratch_buf_info));
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (blob_size <
|
|
|
- (sizeof(struct cam_isp_sfe_init_scratch_buf_config) +
|
|
|
- (scratch_config->num_ports - 1) *
|
|
|
- sizeof(struct cam_isp_sfe_scratch_buf_info))) {
|
|
|
- CAM_ERR(CAM_ISP, "Invalid blob size: %u expected: %lu",
|
|
|
- blob_size,
|
|
|
- sizeof(
|
|
|
- struct cam_isp_sfe_init_scratch_buf_config) +
|
|
|
- (scratch_config->num_ports - 1) *
|
|
|
- sizeof(
|
|
|
- struct cam_isp_sfe_scratch_buf_info));
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
+ rc = cam_isp_validate_scratch_buffer_blob(blob_size,
|
|
|
+ ife_mgr_ctx, scratch_config);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
|
|
|
rc = cam_isp_blob_sfe_scratch_buf_update(
|
|
|
scratch_config, prepare);
|
|
|
if (rc)
|
|
|
- CAM_ERR(CAM_ISP, "SFE scratch buffer update failed");
|
|
|
+ CAM_ERR(CAM_ISP, "SFE scratch buffer update failed rc: %d", rc);
|
|
|
}
|
|
|
break;
|
|
|
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG: {
|
|
@@ -9712,14 +9827,14 @@ static inline bool cam_isp_sfe_validate_for_scratch_buf_config(
|
|
|
}
|
|
|
|
|
|
static int cam_isp_sfe_send_scratch_buf_upd(
|
|
|
- uint32_t remaining_size,
|
|
|
- enum cam_isp_hw_cmd_type cmd_type,
|
|
|
- struct cam_isp_resource_node *hw_res,
|
|
|
- struct cam_sfe_scratch_buf_info *buf_info,
|
|
|
- uint32_t *cpu_addr,
|
|
|
- uint32_t *used_bytes)
|
|
|
-{
|
|
|
- int rc;
|
|
|
+ uint32_t remaining_size,
|
|
|
+ enum cam_isp_hw_cmd_type cmd_type,
|
|
|
+ struct cam_isp_resource_node *hw_res,
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *buf_info,
|
|
|
+ uint32_t *cpu_addr,
|
|
|
+ uint32_t *used_bytes)
|
|
|
+{
|
|
|
+ int rc, i;
|
|
|
struct cam_isp_hw_get_cmd_update update_buf;
|
|
|
struct cam_isp_hw_get_wm_update wm_update;
|
|
|
dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
|
|
@@ -9731,7 +9846,14 @@ static int cam_isp_sfe_send_scratch_buf_upd(
|
|
|
update_buf.use_scratch_cfg = true;
|
|
|
|
|
|
wm_update.num_buf = 1;
|
|
|
- io_addr[0] = buf_info->io_addr;
|
|
|
+ /*
|
|
|
+ * Update same scratch buffer for different planes,
|
|
|
+ * when used for IFE clients, same scratch buffer
|
|
|
+ * is configured to both per plane clients
|
|
|
+ */
|
|
|
+ for (i = 0; i < CAM_PACKET_MAX_PLANES; i++)
|
|
|
+ io_addr[i] = buf_info->io_addr;
|
|
|
+
|
|
|
wm_update.image_buf = io_addr;
|
|
|
wm_update.width = buf_info->width;
|
|
|
wm_update.height = buf_info->height;
|
|
@@ -9747,13 +9869,12 @@ static int cam_isp_sfe_send_scratch_buf_upd(
|
|
|
cmd_type, &update_buf,
|
|
|
sizeof(struct cam_isp_hw_get_cmd_update));
|
|
|
if (rc) {
|
|
|
- CAM_ERR(CAM_ISP, "get buf cmd error:%d",
|
|
|
- hw_res->res_id);
|
|
|
- rc = -ENOMEM;
|
|
|
+ CAM_ERR(CAM_ISP, "Failed to send cmd: %u res: %u rc: %d",
|
|
|
+ cmd_type, hw_res->res_id, rc);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
- CAM_DBG(CAM_ISP, "Scratch buf configured for: 0x%x",
|
|
|
+ CAM_DBG(CAM_ISP, "Scratch buf configured for res: 0x%x",
|
|
|
hw_res->res_id);
|
|
|
|
|
|
/* Update used bytes if update is via CDM */
|
|
@@ -9777,8 +9898,8 @@ static int cam_isp_sfe_add_scratch_buffer_cfg(
|
|
|
uint32_t used_bytes = 0, remain_size = 0;
|
|
|
uint32_t io_cfg_used_bytes;
|
|
|
uint32_t *cpu_addr = NULL;
|
|
|
- struct cam_sfe_scratch_buf_info *buf_info;
|
|
|
- struct cam_isp_hw_mgr_res *hw_mgr_res;
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *buf_info;
|
|
|
+ struct cam_isp_hw_mgr_res *hw_mgr_res;
|
|
|
|
|
|
if (prepare->num_hw_update_entries + 1 >=
|
|
|
prepare->max_hw_update_entries) {
|
|
@@ -9916,6 +10037,87 @@ static int cam_isp_sfe_add_scratch_buffer_cfg(
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int cam_isp_ife_add_scratch_buffer_cfg(
|
|
|
+ uint32_t base_idx,
|
|
|
+ uint32_t scratch_cfg_mask,
|
|
|
+ struct cam_hw_prepare_update_args *prepare,
|
|
|
+ struct cam_kmd_buf_info *kmd_buf_info,
|
|
|
+ struct cam_isp_hw_mgr_res *res_list_isp_out,
|
|
|
+ struct cam_ife_hw_mgr_ctx *ctx)
|
|
|
+{
|
|
|
+ int i, j, res_id, rc = 0;
|
|
|
+ uint32_t used_bytes = 0, remain_size = 0, io_cfg_used_bytes;
|
|
|
+ uint32_t *cpu_addr = NULL;
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *buf_info;
|
|
|
+ struct cam_isp_hw_mgr_res *hw_mgr_res;
|
|
|
+
|
|
|
+ if (prepare->num_hw_update_entries + 1 >=
|
|
|
+ prepare->max_hw_update_entries) {
|
|
|
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
|
|
|
+ prepare->num_hw_update_entries,
|
|
|
+ prepare->max_hw_update_entries);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ io_cfg_used_bytes = 0;
|
|
|
+
|
|
|
+ /* Update scratch buffer for IFE WMs */
|
|
|
+ for (i = 0; i < ctx->sfe_info.ife_scratch_config->num_config; i++) {
|
|
|
+ /*
|
|
|
+ * Configure scratch only if the bit mask is not set for the given port,
|
|
|
+ * this is determined after parsing all the IO config buffers
|
|
|
+ */
|
|
|
+ if ((BIT(i) & scratch_cfg_mask))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ res_id = ctx->sfe_info.ife_scratch_config->buf_info[i].res_id & 0xFF;
|
|
|
+
|
|
|
+ hw_mgr_res = &res_list_isp_out[res_id];
|
|
|
+ for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
|
|
|
+ if (!hw_mgr_res->hw_res[j])
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) <
|
|
|
+ kmd_buf_info->size) {
|
|
|
+ remain_size = kmd_buf_info->size -
|
|
|
+ (kmd_buf_info->used_bytes +
|
|
|
+ io_cfg_used_bytes);
|
|
|
+ } else {
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
+ "no free kmd memory for base %u",
|
|
|
+ base_idx);
|
|
|
+ rc = -ENOMEM;
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+
|
|
|
+ cpu_addr = kmd_buf_info->cpu_addr +
|
|
|
+ kmd_buf_info->used_bytes / 4 + io_cfg_used_bytes / 4;
|
|
|
+ buf_info = &ctx->sfe_info.ife_scratch_config->buf_info[i];
|
|
|
+ CAM_DBG(CAM_ISP, "WM res_id: 0x%x io_addr: %pK",
|
|
|
+ hw_mgr_res->hw_res[j]->res_id, buf_info->io_addr);
|
|
|
+
|
|
|
+ rc = cam_isp_sfe_send_scratch_buf_upd(
|
|
|
+ remain_size,
|
|
|
+ CAM_ISP_HW_CMD_GET_BUF_UPDATE,
|
|
|
+ hw_mgr_res->hw_res[j], buf_info,
|
|
|
+ cpu_addr, &used_bytes);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ io_cfg_used_bytes += used_bytes;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (io_cfg_used_bytes)
|
|
|
+ cam_ife_mgr_update_hw_entries_util(
|
|
|
+ CAM_ISP_IOCFG_BL, io_cfg_used_bytes, kmd_buf_info, prepare);
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
static int cam_ife_mgr_csid_add_reg_update(struct cam_ife_hw_mgr_ctx *ctx,
|
|
|
struct cam_hw_prepare_update_args *prepare,
|
|
|
struct cam_kmd_buf_info *kmd_buf)
|
|
@@ -10120,6 +10322,32 @@ static int cam_ife_hw_mgr_update_cmd_buffer(
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static void cam_ife_hw_mgr_check_if_scratch_is_needed(
|
|
|
+ struct cam_ife_hw_mgr_ctx *ctx,
|
|
|
+ struct cam_isp_check_io_cfg_for_scratch *check_for_scratch)
|
|
|
+{
|
|
|
+ /* Validate for scratch buffer use-cases sHDR/FS */
|
|
|
+ if (!((ctx->flags.is_sfe_fs) || (ctx->flags.is_sfe_shdr)))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* For SFE use number of fetches = number of scratch buffers needed */
|
|
|
+ check_for_scratch->sfe_scratch_res_info.num_active_fe_rdis =
|
|
|
+ ctx->sfe_info.num_fetches;
|
|
|
+ check_for_scratch->validate_for_sfe = true;
|
|
|
+
|
|
|
+ /* Check if IFE has any scratch buffer */
|
|
|
+ if (ctx->sfe_info.ife_scratch_config->num_config) {
|
|
|
+ int i;
|
|
|
+
|
|
|
+ check_for_scratch->validate_for_ife = true;
|
|
|
+ for (i = 0; i < ctx->sfe_info.ife_scratch_config->num_config; i++) {
|
|
|
+ check_for_scratch->ife_scratch_res_info.ife_scratch_resources[i] =
|
|
|
+ ctx->sfe_info.ife_scratch_config->buf_info[i].res_id;
|
|
|
+ check_for_scratch->ife_scratch_res_info.num_ports++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
|
|
|
void *prepare_hw_update_args)
|
|
|
{
|
|
@@ -10137,8 +10365,8 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
|
|
|
struct cam_isp_prepare_hw_update_data *prepare_hw_data;
|
|
|
struct cam_isp_frame_header_info frame_header_info;
|
|
|
struct list_head *res_list_ife_rd_tmp = NULL;
|
|
|
- struct cam_isp_check_sfe_fe_io_cfg sfe_fe_chk_cfg;
|
|
|
struct cam_isp_cmd_buf_count cmd_buf_count = {0};
|
|
|
+ struct cam_isp_check_io_cfg_for_scratch check_for_scratch = {0};
|
|
|
|
|
|
if (!hw_mgr_priv || !prepare_hw_update_args) {
|
|
|
CAM_ERR(CAM_ISP, "Invalid args");
|
|
@@ -10211,6 +10439,8 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
|
|
|
else
|
|
|
prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_UPDATE_DEV;
|
|
|
|
|
|
+ cam_ife_hw_mgr_check_if_scratch_is_needed(ctx, &check_for_scratch);
|
|
|
+
|
|
|
for (i = 0; i < ctx->num_base; i++) {
|
|
|
|
|
|
memset(&frame_header_info, 0,
|
|
@@ -10230,15 +10460,6 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
|
|
|
goto end;
|
|
|
}
|
|
|
|
|
|
- sfe_fe_chk_cfg.sfe_fe_enabled = false;
|
|
|
- if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) &&
|
|
|
- ((ctx->flags.is_sfe_fs) || (ctx->flags.is_sfe_shdr))) {
|
|
|
- sfe_fe_chk_cfg.sfe_fe_enabled = true;
|
|
|
- sfe_fe_chk_cfg.sfe_rdi_cfg_mask = 0;
|
|
|
- sfe_fe_chk_cfg.num_active_fe_rdis =
|
|
|
- ctx->sfe_info.num_fetches;
|
|
|
- }
|
|
|
-
|
|
|
/* get IO buffers */
|
|
|
if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE)
|
|
|
rc = cam_isp_add_io_buffers(
|
|
@@ -10251,7 +10472,7 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
|
|
|
(CAM_ISP_IFE_OUT_RES_BASE + max_ife_out_res),
|
|
|
fill_ife_fence,
|
|
|
CAM_ISP_HW_TYPE_VFE, &frame_header_info,
|
|
|
- &sfe_fe_chk_cfg);
|
|
|
+ &check_for_scratch);
|
|
|
else if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE)
|
|
|
rc = cam_isp_add_io_buffers(
|
|
|
hw_mgr->mgr_common.img_iommu_hdl,
|
|
@@ -10262,7 +10483,7 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
|
|
|
CAM_ISP_SFE_OUT_RES_BASE,
|
|
|
CAM_ISP_SFE_OUT_RES_MAX, fill_sfe_fence,
|
|
|
CAM_ISP_HW_TYPE_SFE, &frame_header_info,
|
|
|
- &sfe_fe_chk_cfg);
|
|
|
+ &check_for_scratch);
|
|
|
if (rc) {
|
|
|
CAM_ERR(CAM_ISP,
|
|
|
"Failed in io buffers, i=%d, rc=%d hw_type=%s",
|
|
@@ -10271,33 +10492,27 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
|
|
|
goto end;
|
|
|
}
|
|
|
|
|
|
- /* fence map table entries need to fill only once in the loop */
|
|
|
- if ((ctx->base[i].hw_type ==
|
|
|
- CAM_ISP_HW_TYPE_SFE) &&
|
|
|
- fill_sfe_fence)
|
|
|
- fill_sfe_fence = false;
|
|
|
- else if ((ctx->base[i].hw_type ==
|
|
|
- CAM_ISP_HW_TYPE_VFE) &&
|
|
|
- fill_ife_fence)
|
|
|
- fill_ife_fence = false;
|
|
|
-
|
|
|
/*
|
|
|
* Add scratch buffer if there no output buffer for RDI WMs/RMs
|
|
|
* only for UPDATE packets. For INIT we could have ePCR enabled
|
|
|
* based on that decide to configure scratch via AHB at
|
|
|
* stream on or not
|
|
|
*/
|
|
|
- if (sfe_fe_chk_cfg.sfe_fe_enabled) {
|
|
|
- if ((sfe_fe_chk_cfg.sfe_rdi_cfg_mask) !=
|
|
|
+ if ((check_for_scratch.validate_for_sfe) &&
|
|
|
+ (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) && (fill_sfe_fence)) {
|
|
|
+ struct cam_isp_sfe_scratch_buf_res_info *sfe_res_info =
|
|
|
+ &check_for_scratch.sfe_scratch_res_info;
|
|
|
+
|
|
|
+ if ((sfe_res_info->sfe_rdi_cfg_mask) !=
|
|
|
((1 << ctx->sfe_info.num_fetches) - 1)) {
|
|
|
if (prepare_hw_data->packet_opcode_type ==
|
|
|
CAM_ISP_PACKET_UPDATE_DEV) {
|
|
|
CAM_DBG(CAM_ISP,
|
|
|
- "Adding scratch buffer cfg_mask expected: 0x%x actual: 0x%x",
|
|
|
+ "Adding SFE scratch buffer cfg_mask expected: 0x%x actual: 0x%x",
|
|
|
((1 << ctx->sfe_info.num_fetches) - 1),
|
|
|
- sfe_fe_chk_cfg.sfe_rdi_cfg_mask);
|
|
|
+ sfe_res_info->sfe_rdi_cfg_mask);
|
|
|
rc = cam_isp_sfe_add_scratch_buffer_cfg(
|
|
|
- ctx->base[i].idx, sfe_fe_chk_cfg.sfe_rdi_cfg_mask,
|
|
|
+ ctx->base[i].idx, sfe_res_info->sfe_rdi_cfg_mask,
|
|
|
prepare, &kmd_buf, ctx->res_list_sfe_out,
|
|
|
&ctx->res_list_ife_in_rd, ctx);
|
|
|
if (rc)
|
|
@@ -10309,7 +10524,36 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- memset(&sfe_fe_chk_cfg, 0, sizeof(sfe_fe_chk_cfg));
|
|
|
+ if ((check_for_scratch.validate_for_ife) &&
|
|
|
+ (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) && (fill_ife_fence)) {
|
|
|
+ struct cam_isp_ife_scratch_buf_res_info *ife_res_info =
|
|
|
+ &check_for_scratch.ife_scratch_res_info;
|
|
|
+
|
|
|
+ /* Config IFE scratch only for update packets only */
|
|
|
+ if ((ife_res_info->ife_scratch_cfg_mask) !=
|
|
|
+ ((1 << ife_res_info->num_ports) - 1)) {
|
|
|
+ if (prepare_hw_data->packet_opcode_type ==
|
|
|
+ CAM_ISP_PACKET_UPDATE_DEV) {
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
+ "Adding IFE scratch buffer cfg_mask expected: 0x%x actual: 0x%x",
|
|
|
+ ((1 << ife_res_info->num_ports) - 1),
|
|
|
+ ife_res_info->ife_scratch_cfg_mask);
|
|
|
+ rc = cam_isp_ife_add_scratch_buffer_cfg(
|
|
|
+ ctx->base[i].idx,
|
|
|
+ ife_res_info->ife_scratch_cfg_mask, prepare,
|
|
|
+ &kmd_buf, ctx->res_list_ife_out, ctx);
|
|
|
+ if (rc)
|
|
|
+ goto end;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* fence map table entries need to fill only once in the loop */
|
|
|
+ if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) && fill_sfe_fence)
|
|
|
+ fill_sfe_fence = false;
|
|
|
+ else if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) && fill_ife_fence)
|
|
|
+ fill_ife_fence = false;
|
|
|
+
|
|
|
if (frame_header_info.frame_header_res_id &&
|
|
|
frame_header_enable) {
|
|
|
frame_header_enable = false;
|
|
@@ -10757,6 +11001,45 @@ int cam_isp_config_csid_rup_aup(
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int cam_ife_mgr_configure_scratch_for_ife(
|
|
|
+ struct cam_ife_hw_mgr_ctx *ctx)
|
|
|
+{
|
|
|
+ int i, j, rc = 0;
|
|
|
+ uint32_t res_id;
|
|
|
+ struct cam_isp_hw_mgr_res *hw_mgr_res;
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *port_info;
|
|
|
+ struct cam_ife_scratch_buf_cfg *ife_buf_info;
|
|
|
+ struct cam_isp_hw_mgr_res *res_list_ife_out = NULL;
|
|
|
+
|
|
|
+ ife_buf_info = ctx->sfe_info.ife_scratch_config;
|
|
|
+ res_list_ife_out = ctx->res_list_ife_out;
|
|
|
+
|
|
|
+ for (i = 0; i < ife_buf_info->num_config; i++) {
|
|
|
+ res_id = ife_buf_info->buf_info[i].res_id & 0xFF;
|
|
|
+ port_info = &ife_buf_info->buf_info[i];
|
|
|
+ hw_mgr_res = &res_list_ife_out[res_id];
|
|
|
+
|
|
|
+ for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
|
|
|
+ /* j = 1 is not valid for this use-case */
|
|
|
+ if (!hw_mgr_res->hw_res[j])
|
|
|
+ continue;
|
|
|
+
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
+ "Configure scratch for IFE res: 0x%x io_addr %pK",
|
|
|
+ ife_buf_info->buf_info[i].res_id, port_info->io_addr);
|
|
|
+
|
|
|
+ rc = cam_isp_sfe_send_scratch_buf_upd(0x0,
|
|
|
+ CAM_ISP_HW_CMD_BUF_UPDATE,
|
|
|
+ hw_mgr_res->hw_res[j], port_info,
|
|
|
+ NULL, NULL);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Scratch buffer is for sHDR/FS usescases involing SFE RDI0-2
|
|
|
* There is no possibility of dual in this case, hence
|
|
@@ -10767,10 +11050,10 @@ static int cam_ife_mgr_prog_default_settings(
|
|
|
bool need_rup_aup, struct cam_ife_hw_mgr_ctx *ctx)
|
|
|
{
|
|
|
int i, j, res_id, rc = 0;
|
|
|
- struct cam_isp_hw_mgr_res *hw_mgr_res;
|
|
|
- struct cam_sfe_scratch_buf_info *buf_info;
|
|
|
- struct list_head *res_list_in_rd = NULL;
|
|
|
- struct cam_isp_hw_mgr_res *res_list_sfe_out = NULL;
|
|
|
+ struct cam_isp_hw_mgr_res *hw_mgr_res;
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *buf_info;
|
|
|
+ struct list_head *res_list_in_rd = NULL;
|
|
|
+ struct cam_isp_hw_mgr_res *res_list_sfe_out = NULL;
|
|
|
|
|
|
res_list_in_rd = &ctx->res_list_ife_in_rd;
|
|
|
res_list_sfe_out = ctx->res_list_sfe_out;
|
|
@@ -10842,6 +11125,13 @@ static int cam_ife_mgr_prog_default_settings(
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /* Check for IFE scratch buffer */
|
|
|
+ if (ctx->sfe_info.ife_scratch_config->num_config) {
|
|
|
+ rc = cam_ife_mgr_configure_scratch_for_ife(ctx);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+
|
|
|
/* Program rup & aup only at run time */
|
|
|
if (need_rup_aup) {
|
|
|
rc = cam_isp_config_csid_rup_aup(ctx);
|
|
@@ -12015,14 +12305,50 @@ static int cam_ife_hw_mgr_handle_hw_eof(
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static bool cam_ife_hw_mgr_last_consumed_addr_check(
|
|
|
+ uint32_t last_consumed_addr, struct cam_ife_sfe_scratch_buf_info *buf_info)
|
|
|
+{
|
|
|
+ dma_addr_t final_addr;
|
|
|
+ uint32_t cmp_addr = 0;
|
|
|
+
|
|
|
+ final_addr = buf_info->io_addr + buf_info->offset;
|
|
|
+ cmp_addr = cam_smmu_is_expanded_memory() ?
|
|
|
+ CAM_36BIT_INTF_GET_IOVA_BASE(final_addr) : final_addr;
|
|
|
+ if (cmp_addr == last_consumed_addr)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static int cam_ife_hw_mgr_check_ife_scratch_buf_done(
|
|
|
+ struct cam_ife_scratch_buf_cfg *scratch_cfg,
|
|
|
+ uint32_t res_id, uint32_t last_consumed_addr)
|
|
|
+{
|
|
|
+ int rc = 0, i;
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *buf_info;
|
|
|
+
|
|
|
+ for (i = 0; i < scratch_cfg->num_config; i++) {
|
|
|
+ if (scratch_cfg->buf_info[i].res_id == res_id) {
|
|
|
+ buf_info = &scratch_cfg->buf_info[i];
|
|
|
+
|
|
|
+ if (cam_ife_hw_mgr_last_consumed_addr_check(last_consumed_addr, buf_info)) {
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
+ "IFE res:0x%x buf done for scratch - skip ctx notify",
|
|
|
+ buf_info->res_id);
|
|
|
+ rc = -EAGAIN;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
static int cam_ife_hw_mgr_check_rdi_scratch_buf_done(
|
|
|
- const uint32_t ctx_index, struct cam_sfe_scratch_buf_cfg *scratch_cfg,
|
|
|
+ struct cam_sfe_scratch_buf_cfg *scratch_cfg,
|
|
|
uint32_t res_id, uint32_t last_consumed_addr)
|
|
|
{
|
|
|
int rc = 0;
|
|
|
- struct cam_sfe_scratch_buf_info *buf_info;
|
|
|
- dma_addr_t final_addr;
|
|
|
- uint32_t cmp_addr = 0;
|
|
|
+ struct cam_ife_sfe_scratch_buf_info *buf_info;
|
|
|
|
|
|
switch (res_id) {
|
|
|
case CAM_ISP_SFE_OUT_RES_RDI_0:
|
|
@@ -12032,12 +12358,10 @@ static int cam_ife_hw_mgr_check_rdi_scratch_buf_done(
|
|
|
if (!buf_info->config_done)
|
|
|
return 0;
|
|
|
|
|
|
- final_addr = buf_info->io_addr + buf_info->offset;
|
|
|
- cmp_addr = cam_smmu_is_expanded_memory() ?
|
|
|
- CAM_36BIT_INTF_GET_IOVA_BASE(final_addr) : final_addr;
|
|
|
- if (cmp_addr == last_consumed_addr) {
|
|
|
- CAM_DBG(CAM_ISP, "SFE RDI%u buf done for scratch - skip ctx notify",
|
|
|
- (res_id - CAM_ISP_SFE_OUT_RES_BASE));
|
|
|
+ if (cam_ife_hw_mgr_last_consumed_addr_check(last_consumed_addr, buf_info)) {
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
+ "SFE RDI: 0x%x buf done for scratch - skip ctx notify",
|
|
|
+ buf_info->res_id);
|
|
|
rc = -EAGAIN;
|
|
|
}
|
|
|
break;
|
|
@@ -12048,6 +12372,32 @@ static int cam_ife_hw_mgr_check_rdi_scratch_buf_done(
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int cam_ife_hw_mgr_check_for_scratch_buf_done(
|
|
|
+ struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx,
|
|
|
+ enum cam_isp_hw_type hw_type,
|
|
|
+ uint32_t res_id, uint32_t last_consumed_addr)
|
|
|
+{
|
|
|
+ int rc = 0;
|
|
|
+
|
|
|
+ switch (hw_type) {
|
|
|
+ case CAM_ISP_HW_TYPE_VFE:
|
|
|
+ if (ife_hw_mgr_ctx->sfe_info.ife_scratch_config->num_config)
|
|
|
+ rc = cam_ife_hw_mgr_check_ife_scratch_buf_done(
|
|
|
+ ife_hw_mgr_ctx->sfe_info.ife_scratch_config,
|
|
|
+ res_id, last_consumed_addr);
|
|
|
+ break;
|
|
|
+ case CAM_ISP_HW_TYPE_SFE:
|
|
|
+ rc = cam_ife_hw_mgr_check_rdi_scratch_buf_done(
|
|
|
+ ife_hw_mgr_ctx->sfe_info.scratch_config,
|
|
|
+ res_id, last_consumed_addr);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
static int cam_ife_hw_mgr_handle_hw_buf_done(
|
|
|
struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx,
|
|
|
struct cam_isp_hw_event_info *event_info)
|
|
@@ -12066,30 +12416,29 @@ static int cam_ife_hw_mgr_handle_hw_buf_done(
|
|
|
|
|
|
ife_hwr_irq_wm_done_cb = ife_hw_mgr_ctx->common.event_cb;
|
|
|
compdone_evt_info = (struct cam_isp_hw_compdone_event_info *)event_info->event_data;
|
|
|
- buf_done_event_data.num_handles = compdone_evt_info->num_res;
|
|
|
+ buf_done_event_data.num_handles = 0;
|
|
|
|
|
|
for (i = 0; i < compdone_evt_info->num_res; i++) {
|
|
|
- buf_done_event_data.resource_handle[i] =
|
|
|
- compdone_evt_info->res_id[i];
|
|
|
- buf_done_event_data.last_consumed_addr[i] =
|
|
|
- compdone_evt_info->last_consumed_addr[i];
|
|
|
-
|
|
|
- CAM_DBG(CAM_ISP, "Buf done for %s: %d res_id: 0x%x last consumed addr: 0x%x",
|
|
|
- ((event_info->hw_type == CAM_ISP_HW_TYPE_SFE) ? "SFE" : "IFE"),
|
|
|
- event_info->hw_idx, compdone_evt_info->res_id[i],
|
|
|
- compdone_evt_info->last_consumed_addr[i]);
|
|
|
-
|
|
|
- if (cam_ife_hw_mgr_is_shdr_fs_rdi_res(compdone_evt_info->res_id[i],
|
|
|
- ife_hw_mgr_ctx->flags.is_sfe_shdr,
|
|
|
- ife_hw_mgr_ctx->flags.is_sfe_fs)) {
|
|
|
- rc = cam_ife_hw_mgr_check_rdi_scratch_buf_done(
|
|
|
- ife_hw_mgr_ctx->ctx_index,
|
|
|
- ife_hw_mgr_ctx->sfe_info.scratch_config,
|
|
|
- compdone_evt_info->res_id[i],
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
+ "Buf done for %s: %d res_id: 0x%x last consumed addr: 0x%x ctx: %u",
|
|
|
+ ((event_info->hw_type == CAM_ISP_HW_TYPE_SFE) ? "SFE" : "IFE"),
|
|
|
+ event_info->hw_idx, compdone_evt_info->res_id[i],
|
|
|
+ compdone_evt_info->last_consumed_addr[i], ife_hw_mgr_ctx->ctx_index);
|
|
|
+
|
|
|
+ /* Check scratch for sHDR/FS use-cases */
|
|
|
+ if (ife_hw_mgr_ctx->flags.is_sfe_fs || ife_hw_mgr_ctx->flags.is_sfe_shdr) {
|
|
|
+ rc = cam_ife_hw_mgr_check_for_scratch_buf_done(ife_hw_mgr_ctx,
|
|
|
+ event_info->hw_type, compdone_evt_info->res_id[i],
|
|
|
compdone_evt_info->last_consumed_addr[i]);
|
|
|
if (rc)
|
|
|
- goto end;
|
|
|
+ continue;
|
|
|
}
|
|
|
+
|
|
|
+ buf_done_event_data.resource_handle[buf_done_event_data.num_handles] =
|
|
|
+ compdone_evt_info->res_id[i];
|
|
|
+ buf_done_event_data.last_consumed_addr[buf_done_event_data.num_handles] =
|
|
|
+ compdone_evt_info->last_consumed_addr[i];
|
|
|
+ buf_done_event_data.num_handles++;
|
|
|
}
|
|
|
|
|
|
|
|
@@ -12097,12 +12446,13 @@ static int cam_ife_hw_mgr_handle_hw_buf_done(
|
|
|
return 0;
|
|
|
|
|
|
if (buf_done_event_data.num_handles > 0 && ife_hwr_irq_wm_done_cb) {
|
|
|
- CAM_DBG(CAM_ISP, "Notify ISP context");
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
+ "Notify ISP context for %u handles in ctx: %u",
|
|
|
+ buf_done_event_data.num_handles, ife_hw_mgr_ctx->ctx_index);
|
|
|
ife_hwr_irq_wm_done_cb(ife_hw_mgr_ctx->common.cb_priv,
|
|
|
CAM_ISP_HW_EVENT_DONE, (void *)&buf_done_event_data);
|
|
|
}
|
|
|
|
|
|
-end:
|
|
|
return 0;
|
|
|
}
|
|
|
|