Selaa lähdekoodia

msm: camera: isp: Add support for IFE scratch buffer

Add support to configure IFE scratch buffer for sHDR/FS use-cases.
BW limiter is enabled for an IFE output port to achieve
the desired spread in frame timing. On occasions where there is
no packet in KMD to apply or for a request that has no buffer
for this IFE output port configure scratch buffer.
This will ensure BW limiter comes into affect, and the frame
is spread to acheive the desired FPS.

CRs-Fixed: 3045706
Change-Id: Id723e7c3d53e65cd7824827b12e86502e8bd91f0
Signed-off-by: Karthik Anantha Ram <[email protected]>
Karthik Anantha Ram 3 vuotta sitten
vanhempi
sitoutus
8a9fa2f908

+ 526 - 176
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c

@@ -4933,10 +4933,20 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 		ife_ctx->sfe_info.scratch_config =
 			kzalloc(sizeof(struct cam_sfe_scratch_buf_cfg), GFP_KERNEL);
 		if (!ife_ctx->sfe_info.scratch_config) {
-			CAM_ERR(CAM_ISP, "Failed to allocate scratch config");
+			CAM_ERR(CAM_ISP, "Failed to allocate SFE scratch config");
 			rc = -ENOMEM;
 			goto free_cdm_cmd;
 		}
+
+		ife_ctx->sfe_info.ife_scratch_config =
+			kzalloc(sizeof(struct cam_ife_scratch_buf_cfg), GFP_KERNEL);
+		if (!ife_ctx->sfe_info.ife_scratch_config) {
+			CAM_ERR(CAM_ISP, "Failed to allocate IFE scratch config");
+			rc = -ENOMEM;
+			kfree(ife_ctx->sfe_info.scratch_config);
+			goto free_cdm_cmd;
+		}
+
 		/* Set scratch by default at stream on */
 		ife_ctx->sfe_info.skip_scratch_cfg_streamon = false;
 	}
@@ -6236,6 +6246,10 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 		if (ctx->sfe_info.scratch_config)
 			memset(ctx->sfe_info.scratch_config, 0,
 				sizeof(struct cam_sfe_scratch_buf_cfg));
+
+		if (ctx->sfe_info.ife_scratch_config)
+			memset(ctx->sfe_info.ife_scratch_config, 0,
+				sizeof(struct cam_ife_scratch_buf_cfg));
 	}
 	ctx->sfe_info.skip_scratch_cfg_streamon = false;
 
@@ -6890,7 +6904,9 @@ static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
 	ctx->num_acq_sfe_out = 0;
 
 	kfree(ctx->sfe_info.scratch_config);
+	kfree(ctx->sfe_info.ife_scratch_config);
 	ctx->sfe_info.scratch_config = NULL;
+	ctx->sfe_info.ife_scratch_config = NULL;
 
 	memset(&ctx->flags, 0, sizeof(struct cam_ife_hw_mgr_ctx_flags));
 	atomic_set(&ctx->overflow_pending, 0);
@@ -7252,32 +7268,125 @@ end:
 	return rc;
 }
 
+static int cam_isp_scratch_buf_update_util(
+	struct cam_isp_sfe_scratch_buf_info   *buffer_info,
+	struct cam_ife_sfe_scratch_buf_info   *port_info)
+{
+	int                   rc = 0;
+	int                   mmu_hdl;
+	size_t                size;
+	dma_addr_t            io_addr;
+	bool                  is_buf_secure;
+
+	is_buf_secure = cam_mem_is_secure_buf(buffer_info->mem_handle);
+	if (is_buf_secure) {
+		port_info->is_secure = true;
+		mmu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure;
+	} else {
+		port_info->is_secure = false;
+		mmu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl;
+	}
+
+	rc = cam_mem_get_io_buf(buffer_info->mem_handle,
+		mmu_hdl, &io_addr, &size, NULL);
+	if (rc) {
+		CAM_ERR(CAM_ISP,
+			"no scratch buf addr for res: 0x%x",
+			buffer_info->resource_type);
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	port_info->res_id = buffer_info->resource_type;
+	port_info->io_addr = io_addr + buffer_info->offset;
+	port_info->width = buffer_info->width;
+	port_info->height = buffer_info->height;
+	port_info->stride = buffer_info->stride;
+	port_info->slice_height = buffer_info->slice_height;
+	port_info->offset = 0;
+	port_info->config_done = true;
+
+	CAM_DBG(CAM_ISP,
+		"res_id: 0x%x w: 0x%x h: 0x%x s: 0x%x sh: 0x%x addr: 0x%x",
+		port_info->res_id, port_info->width,
+		port_info->height, port_info->stride,
+		port_info->slice_height, port_info->io_addr);
+
+	return rc;
+}
+
+static int cam_isp_blob_ife_scratch_buf_update(
+	struct cam_isp_sfe_init_scratch_buf_config  *scratch_config,
+	struct cam_hw_prepare_update_args           *prepare)
+{
+	int rc = 0, i;
+	uint32_t                               res_id_out;
+	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
+	struct cam_isp_sfe_scratch_buf_info   *buffer_info;
+	struct cam_ife_sfe_scratch_buf_info   *port_info;
+	struct cam_isp_hw_mgr_res             *ife_out_res;
+	struct cam_ife_hw_mgr                 *ife_hw_mgr;
+	struct cam_ife_scratch_buf_cfg        *ife_scratch_config;
+
+	ctx = prepare->ctxt_to_hw_map;
+	ife_hw_mgr = ctx->hw_mgr;
+	ife_scratch_config = ctx->sfe_info.ife_scratch_config;
+
+	for (i = 0; i < scratch_config->num_ports; i++) {
+		buffer_info = &scratch_config->port_scratch_cfg[i];
+		if (!cam_ife_hw_mgr_is_ife_out_port(buffer_info->resource_type))
+			continue;
+
+		res_id_out = buffer_info->resource_type & 0xFF;
+
+		CAM_DBG(CAM_ISP, "scratch config idx: %d res: 0x%x",
+			i, buffer_info->resource_type);
+
+		ife_out_res = &ctx->res_list_ife_out[res_id_out];
+		if (!ife_out_res->hw_res[0]) {
+			CAM_ERR(CAM_ISP,
+				"IFE rsrc_type: 0x%x not acquired, failing scratch config",
+				buffer_info->resource_type);
+			return -EINVAL;
+		}
+
+		if (ife_scratch_config->num_config >= CAM_IFE_SCRATCH_NUM_MAX) {
+			CAM_ERR(CAM_ISP,
+				"Incoming num of scratch buffers: %u exceeds max: %u",
+				ife_scratch_config->num_config, CAM_IFE_SCRATCH_NUM_MAX);
+			return -EINVAL;
+		}
+
+		port_info = &ife_scratch_config->buf_info[ife_scratch_config->num_config++];
+		rc = cam_isp_scratch_buf_update_util(buffer_info, port_info);
+		if (rc)
+			goto end;
+	}
+
+end:
+	return rc;
+}
+
 static int cam_isp_blob_sfe_scratch_buf_update(
 	struct cam_isp_sfe_init_scratch_buf_config  *scratch_config,
 	struct cam_hw_prepare_update_args           *prepare)
 {
-	int rc, i, mmu_hdl;
+	int rc = 0, i;
 	uint32_t                               res_id_out;
-	bool                                   is_buf_secure;
-	dma_addr_t                             io_addr;
-	size_t                                 size;
 	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
 	struct cam_isp_sfe_scratch_buf_info   *buffer_info;
-	struct cam_sfe_scratch_buf_info       *port_info;
+	struct cam_ife_sfe_scratch_buf_info   *port_info;
 	struct cam_isp_hw_mgr_res             *sfe_out_res;
 	struct cam_ife_hw_mgr                 *ife_hw_mgr;
 
 	ctx = prepare->ctxt_to_hw_map;
 	ife_hw_mgr = ctx->hw_mgr;
-	if (scratch_config->num_ports != ctx->sfe_info.num_fetches)
-		CAM_WARN(CAM_ISP,
-			"Getting scratch buffer for %u ports on ctx: %u with num_fetches: %u",
-			scratch_config->num_ports, ctx->ctx_index, ctx->sfe_info.num_fetches);
-
-	CAM_DBG(CAM_ISP, "num_ports: %u", scratch_config->num_ports);
 
 	for (i = 0; i < scratch_config->num_ports; i++) {
 		buffer_info = &scratch_config->port_scratch_cfg[i];
+		if (!cam_ife_hw_mgr_is_sfe_out_port(buffer_info->resource_type))
+			continue;
+
 		res_id_out = buffer_info->resource_type & 0xFF;
 
 		CAM_DBG(CAM_ISP, "scratch config idx: %d res: 0x%x",
@@ -7298,42 +7407,22 @@ static int cam_isp_blob_sfe_scratch_buf_update(
 		}
 
 		port_info = &ctx->sfe_info.scratch_config->buf_info[res_id_out];
-		is_buf_secure = cam_mem_is_secure_buf(buffer_info->mem_handle);
-		if (is_buf_secure) {
-			port_info->is_secure = true;
-			mmu_hdl = ife_hw_mgr->mgr_common.img_iommu_hdl_secure;
-		} else {
-			port_info->is_secure = false;
-			mmu_hdl = ife_hw_mgr->mgr_common.img_iommu_hdl;
-		}
+		rc = cam_isp_scratch_buf_update_util(buffer_info, port_info);
+		if (rc)
+			goto end;
 
-		rc = cam_mem_get_io_buf(buffer_info->mem_handle,
-			mmu_hdl, &io_addr, &size, NULL);
-		if (rc) {
-			CAM_ERR(CAM_ISP,
-				"no scratch buf addr for res: 0x%x",
-				buffer_info->resource_type);
-			rc = -ENOMEM;
-			return rc;
-		}
+		ctx->sfe_info.scratch_config->num_config++;
+	}
 
-		port_info->res_id = buffer_info->resource_type;
-		port_info->io_addr = io_addr + buffer_info->offset;
-		port_info->width = buffer_info->width;
-		port_info->height = buffer_info->height;
-		port_info->stride = buffer_info->stride;
-		port_info->slice_height = buffer_info->slice_height;
-		port_info->offset = 0;
-		port_info->config_done = true;
-		CAM_DBG(CAM_ISP,
-			"res_id: 0x%x w: 0x%x h: 0x%x s: 0x%x sh: 0x%x addr: 0x%x",
-			port_info->res_id, port_info->width,
-			port_info->height, port_info->stride,
-			port_info->slice_height, port_info->io_addr);
+	if (ctx->sfe_info.scratch_config->num_config != ctx->sfe_info.num_fetches) {
+		CAM_ERR(CAM_ISP,
+			"Mismatch in number of scratch buffers provided: %u expected: %u",
+			ctx->sfe_info.scratch_config->num_config, ctx->sfe_info.num_fetches);
+		rc = -EINVAL;
 	}
 
-	ctx->sfe_info.scratch_config->num_config = scratch_config->num_ports;
-	return 0;
+end:
+	return rc;
 }
 
 static inline int __cam_isp_sfe_send_cache_config(
@@ -8231,7 +8320,7 @@ static int cam_ife_hw_mgr_update_scratch_offset(
 	struct cam_isp_vfe_wm_config          *wm_config)
 {
 	uint32_t res_id;
-	struct cam_sfe_scratch_buf_info       *port_info;
+	struct cam_ife_sfe_scratch_buf_info       *port_info;
 
 	if ((wm_config->port_type - CAM_ISP_SFE_OUT_RES_RDI_0) >=
 		ctx->sfe_info.num_fetches)
@@ -8627,6 +8716,63 @@ static int cam_isp_blob_ife_init_config_update(
 	return rc;
 }
 
+static int cam_isp_validate_scratch_buffer_blob(
+	uint32_t blob_size,
+	struct cam_ife_hw_mgr_ctx *ife_mgr_ctx,
+	struct cam_isp_sfe_init_scratch_buf_config *scratch_config)
+{
+	if (!(ife_mgr_ctx->flags.is_sfe_fs ||
+		ife_mgr_ctx->flags.is_sfe_shdr)) {
+		CAM_ERR(CAM_ISP,
+			"Not SFE sHDR/FS context: %u scratch buf blob not supported",
+			ife_mgr_ctx->ctx_index);
+		return -EINVAL;
+	}
+
+	if (blob_size <
+		sizeof(struct cam_isp_sfe_init_scratch_buf_config)) {
+		CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+		return -EINVAL;
+	}
+
+	if ((scratch_config->num_ports >
+		(CAM_SFE_FE_RDI_NUM_MAX + CAM_IFE_SCRATCH_NUM_MAX)) ||
+		(scratch_config->num_ports == 0)) {
+		CAM_ERR(CAM_ISP,
+			"Invalid num_ports %u in scratch buf config",
+			scratch_config->num_ports);
+		return -EINVAL;
+	}
+
+	/* Check for integer overflow */
+	if (scratch_config->num_ports != 1) {
+		if (sizeof(struct cam_isp_sfe_scratch_buf_info) >
+			((UINT_MAX -
+			sizeof(struct cam_isp_sfe_init_scratch_buf_config)) /
+			(scratch_config->num_ports - 1))) {
+			CAM_ERR(CAM_ISP,
+				"Max size exceeded in scratch config num_ports: %u size per port: %lu",
+				scratch_config->num_ports,
+				sizeof(struct cam_isp_sfe_scratch_buf_info));
+			return -EINVAL;
+		}
+	}
+
+	if (blob_size <
+		(sizeof(struct cam_isp_sfe_init_scratch_buf_config) +
+		(scratch_config->num_ports - 1) *
+		sizeof(struct cam_isp_sfe_scratch_buf_info))) {
+		CAM_ERR(CAM_ISP, "Invalid blob size: %u expected: %lu",
+			blob_size,
+			sizeof(struct cam_isp_sfe_init_scratch_buf_config) +
+			(scratch_config->num_ports - 1) *
+			sizeof(struct cam_isp_sfe_scratch_buf_info));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int cam_isp_packet_generic_blob_handler(void *user_data,
 	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
 {
@@ -9136,6 +9282,23 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
 		if (rc)
 			CAM_ERR(CAM_ISP, "Discard initial frames update failed rc: %d", rc);
 
+	}
+		break;
+	case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG: {
+		struct cam_isp_sfe_init_scratch_buf_config *scratch_config;
+
+		scratch_config =
+			(struct cam_isp_sfe_init_scratch_buf_config *)blob_data;
+
+		rc = cam_isp_validate_scratch_buffer_blob(blob_size,
+			ife_mgr_ctx, scratch_config);
+		if (rc)
+			return rc;
+
+		rc = cam_isp_blob_ife_scratch_buf_update(
+			scratch_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "IFE scratch buffer update failed rc: %d", rc);
 	}
 		break;
 	case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CLOCK_CONFIG:
@@ -9143,7 +9306,6 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
 	case CAM_ISP_GENERIC_BLOB_TYPE_SFE_OUT_CONFIG:
 	case CAM_ISP_GENERIC_BLOB_TYPE_SFE_HFR_CONFIG:
 	case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG:
-	case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG:
 	case CAM_ISP_GENERIC_BLOB_TYPE_SFE_EXP_ORDER_CFG:
 	case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG:
 		break;
@@ -9481,65 +9643,18 @@ static int cam_sfe_packet_generic_blob_handler(void *user_data,
 	case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG: {
 		struct cam_isp_sfe_init_scratch_buf_config *scratch_config;
 
-		if (!(ife_mgr_ctx->flags.is_sfe_fs ||
-			ife_mgr_ctx->flags.is_sfe_shdr)) {
-			CAM_ERR(CAM_ISP,
-				"Not SFE sHDR/FS context: %u scratch buf blob not supported",
-				ife_mgr_ctx->ctx_index);
-			return -EINVAL;
-		}
-
-		if (blob_size <
-			sizeof(struct cam_isp_sfe_init_scratch_buf_config)) {
-			CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
-			return -EINVAL;
-		}
-
 		scratch_config =
 			(struct cam_isp_sfe_init_scratch_buf_config *)blob_data;
 
-		if (scratch_config->num_ports > CAM_SFE_FE_RDI_NUM_MAX ||
-			scratch_config->num_ports == 0) {
-			CAM_ERR(CAM_ISP,
-				"Invalid num_ports %u in scratch buf config",
-				scratch_config->num_ports);
-			return -EINVAL;
-		}
-
-		/* Check for integer overflow */
-		if (scratch_config->num_ports != 1) {
-			if (sizeof(struct cam_isp_sfe_scratch_buf_info) >
-				((UINT_MAX -
-				sizeof(
-				struct cam_isp_sfe_init_scratch_buf_config)) /
-				(scratch_config->num_ports - 1))) {
-				CAM_ERR(CAM_ISP,
-					"Max size exceeded in scratch config num_ports: %u size per port: %lu",
-					scratch_config->num_ports,
-					sizeof(
-					struct cam_isp_sfe_scratch_buf_info));
-				return -EINVAL;
-			}
-		}
-
-		if (blob_size <
-			(sizeof(struct cam_isp_sfe_init_scratch_buf_config) +
-			(scratch_config->num_ports - 1) *
-			sizeof(struct cam_isp_sfe_scratch_buf_info))) {
-			CAM_ERR(CAM_ISP, "Invalid blob size: %u expected: %lu",
-				blob_size,
-				sizeof(
-				struct cam_isp_sfe_init_scratch_buf_config) +
-				(scratch_config->num_ports - 1) *
-				sizeof(
-				struct cam_isp_sfe_scratch_buf_info));
-			return -EINVAL;
-		}
+		rc = cam_isp_validate_scratch_buffer_blob(blob_size,
+			ife_mgr_ctx, scratch_config);
+		if (rc)
+			return rc;
 
 		rc = cam_isp_blob_sfe_scratch_buf_update(
 			scratch_config, prepare);
 		if (rc)
-			CAM_ERR(CAM_ISP, "SFE scratch buffer update failed");
+			CAM_ERR(CAM_ISP, "SFE scratch buffer update failed rc: %d", rc);
 	}
 		break;
 	case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG: {
@@ -9712,14 +9827,14 @@ static inline bool cam_isp_sfe_validate_for_scratch_buf_config(
 }
 
 static int cam_isp_sfe_send_scratch_buf_upd(
-	uint32_t                         remaining_size,
-	enum cam_isp_hw_cmd_type         cmd_type,
-	struct cam_isp_resource_node    *hw_res,
-	struct cam_sfe_scratch_buf_info *buf_info,
-	uint32_t                        *cpu_addr,
-	uint32_t                        *used_bytes)
-{
-	int rc;
+	uint32_t                             remaining_size,
+	enum cam_isp_hw_cmd_type             cmd_type,
+	struct cam_isp_resource_node        *hw_res,
+	struct cam_ife_sfe_scratch_buf_info *buf_info,
+	uint32_t                            *cpu_addr,
+	uint32_t                            *used_bytes)
+{
+	int rc, i;
 	struct cam_isp_hw_get_cmd_update   update_buf;
 	struct cam_isp_hw_get_wm_update    wm_update;
 	dma_addr_t                         io_addr[CAM_PACKET_MAX_PLANES];
@@ -9731,7 +9846,14 @@ static int cam_isp_sfe_send_scratch_buf_upd(
 	update_buf.use_scratch_cfg = true;
 
 	wm_update.num_buf = 1;
-	io_addr[0] = buf_info->io_addr;
+	/*
+	 * Update same scratch buffer for different planes,
+	 * when used for IFE clients, same scratch buffer
+	 * is configured to both per plane clients
+	 */
+	for (i = 0; i < CAM_PACKET_MAX_PLANES; i++)
+		io_addr[i] = buf_info->io_addr;
+
 	wm_update.image_buf = io_addr;
 	wm_update.width = buf_info->width;
 	wm_update.height = buf_info->height;
@@ -9747,13 +9869,12 @@ static int cam_isp_sfe_send_scratch_buf_upd(
 		cmd_type, &update_buf,
 		sizeof(struct cam_isp_hw_get_cmd_update));
 	if (rc) {
-		CAM_ERR(CAM_ISP, "get buf cmd error:%d",
-			hw_res->res_id);
-		rc = -ENOMEM;
+		CAM_ERR(CAM_ISP, "Failed to send cmd: %u res: %u rc: %d",
+			cmd_type, hw_res->res_id, rc);
 		return rc;
 	}
 
-	CAM_DBG(CAM_ISP, "Scratch buf configured for: 0x%x",
+	CAM_DBG(CAM_ISP, "Scratch buf configured for res: 0x%x",
 		hw_res->res_id);
 
 	/* Update used bytes if update is via CDM */
@@ -9777,8 +9898,8 @@ static int cam_isp_sfe_add_scratch_buffer_cfg(
 	uint32_t used_bytes = 0, remain_size = 0;
 	uint32_t io_cfg_used_bytes;
 	uint32_t *cpu_addr = NULL;
-	struct cam_sfe_scratch_buf_info   *buf_info;
-	struct cam_isp_hw_mgr_res         *hw_mgr_res;
+	struct cam_ife_sfe_scratch_buf_info *buf_info;
+	struct cam_isp_hw_mgr_res *hw_mgr_res;
 
 	if (prepare->num_hw_update_entries + 1 >=
 			prepare->max_hw_update_entries) {
@@ -9916,6 +10037,87 @@ static int cam_isp_sfe_add_scratch_buffer_cfg(
 	return rc;
 }
 
+static int cam_isp_ife_add_scratch_buffer_cfg(
+	uint32_t                              base_idx,
+	uint32_t                              scratch_cfg_mask,
+	struct cam_hw_prepare_update_args    *prepare,
+	struct cam_kmd_buf_info              *kmd_buf_info,
+	struct cam_isp_hw_mgr_res            *res_list_isp_out,
+	struct cam_ife_hw_mgr_ctx            *ctx)
+{
+	int i, j, res_id, rc = 0;
+	uint32_t used_bytes = 0, remain_size = 0, io_cfg_used_bytes;
+	uint32_t *cpu_addr = NULL;
+	struct cam_ife_sfe_scratch_buf_info *buf_info;
+	struct cam_isp_hw_mgr_res *hw_mgr_res;
+
+	if (prepare->num_hw_update_entries + 1 >=
+		prepare->max_hw_update_entries) {
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+			prepare->num_hw_update_entries,
+			prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	io_cfg_used_bytes = 0;
+
+	/* Update scratch buffer for IFE WMs */
+	for (i = 0; i < ctx->sfe_info.ife_scratch_config->num_config; i++) {
+		/*
+		 * Configure scratch only if the bit mask is not set for the given port,
+		 * this is determined after parsing all the IO config buffers
+		 */
+		if ((BIT(i) & scratch_cfg_mask))
+			continue;
+
+		res_id = ctx->sfe_info.ife_scratch_config->buf_info[i].res_id & 0xFF;
+
+		hw_mgr_res = &res_list_isp_out[res_id];
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			if (!hw_mgr_res->hw_res[j])
+				continue;
+
+			if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
+				continue;
+
+			if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) <
+				kmd_buf_info->size) {
+				remain_size = kmd_buf_info->size -
+				(kmd_buf_info->used_bytes +
+					io_cfg_used_bytes);
+			} else {
+				CAM_ERR(CAM_ISP,
+					"no free kmd memory for base %u",
+					base_idx);
+				rc = -ENOMEM;
+				return rc;
+			}
+
+			cpu_addr = kmd_buf_info->cpu_addr +
+				kmd_buf_info->used_bytes / 4 + io_cfg_used_bytes / 4;
+			buf_info = &ctx->sfe_info.ife_scratch_config->buf_info[i];
+			CAM_DBG(CAM_ISP, "WM res_id: 0x%x io_addr: %pK",
+				hw_mgr_res->hw_res[j]->res_id, buf_info->io_addr);
+
+			rc = cam_isp_sfe_send_scratch_buf_upd(
+				remain_size,
+				CAM_ISP_HW_CMD_GET_BUF_UPDATE,
+				hw_mgr_res->hw_res[j], buf_info,
+				cpu_addr, &used_bytes);
+			if (rc)
+				return rc;
+
+			io_cfg_used_bytes += used_bytes;
+		}
+	}
+
+	if (io_cfg_used_bytes)
+		cam_ife_mgr_update_hw_entries_util(
+			CAM_ISP_IOCFG_BL, io_cfg_used_bytes, kmd_buf_info, prepare);
+
+	return rc;
+}
+
 static int cam_ife_mgr_csid_add_reg_update(struct cam_ife_hw_mgr_ctx *ctx,
 	struct cam_hw_prepare_update_args *prepare,
 	struct cam_kmd_buf_info *kmd_buf)
@@ -10120,6 +10322,32 @@ static int cam_ife_hw_mgr_update_cmd_buffer(
 	return rc;
 }
 
+static void cam_ife_hw_mgr_check_if_scratch_is_needed(
+	struct cam_ife_hw_mgr_ctx               *ctx,
+	struct cam_isp_check_io_cfg_for_scratch *check_for_scratch)
+{
+	/* Validate for scratch buffer use-cases sHDR/FS */
+	if (!((ctx->flags.is_sfe_fs) || (ctx->flags.is_sfe_shdr)))
+		return;
+
+	/* For SFE use number of fetches = number of scratch buffers needed */
+	check_for_scratch->sfe_scratch_res_info.num_active_fe_rdis =
+		ctx->sfe_info.num_fetches;
+	check_for_scratch->validate_for_sfe = true;
+
+	/* Check if IFE has any scratch buffer */
+	if (ctx->sfe_info.ife_scratch_config->num_config) {
+		int i;
+
+		check_for_scratch->validate_for_ife = true;
+		for (i = 0; i < ctx->sfe_info.ife_scratch_config->num_config; i++) {
+			check_for_scratch->ife_scratch_res_info.ife_scratch_resources[i] =
+				ctx->sfe_info.ife_scratch_config->buf_info[i].res_id;
+			check_for_scratch->ife_scratch_res_info.num_ports++;
+		}
+	}
+}
+
 static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 	void *prepare_hw_update_args)
 {
@@ -10137,8 +10365,8 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 	struct cam_isp_prepare_hw_update_data   *prepare_hw_data;
 	struct cam_isp_frame_header_info         frame_header_info;
 	struct list_head                        *res_list_ife_rd_tmp = NULL;
-	struct cam_isp_check_sfe_fe_io_cfg       sfe_fe_chk_cfg;
 	struct cam_isp_cmd_buf_count             cmd_buf_count = {0};
+	struct cam_isp_check_io_cfg_for_scratch  check_for_scratch = {0};
 
 	if (!hw_mgr_priv || !prepare_hw_update_args) {
 		CAM_ERR(CAM_ISP, "Invalid args");
@@ -10211,6 +10439,8 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 	else
 		prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_UPDATE_DEV;
 
+	cam_ife_hw_mgr_check_if_scratch_is_needed(ctx, &check_for_scratch);
+
 	for (i = 0; i < ctx->num_base; i++) {
 
 		memset(&frame_header_info, 0,
@@ -10230,15 +10460,6 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 			goto end;
 		}
 
-		sfe_fe_chk_cfg.sfe_fe_enabled = false;
-		if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) &&
-			((ctx->flags.is_sfe_fs) || (ctx->flags.is_sfe_shdr))) {
-			sfe_fe_chk_cfg.sfe_fe_enabled = true;
-			sfe_fe_chk_cfg.sfe_rdi_cfg_mask = 0;
-			sfe_fe_chk_cfg.num_active_fe_rdis =
-				ctx->sfe_info.num_fetches;
-		}
-
 		/* get IO buffers */
 		if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE)
 			rc = cam_isp_add_io_buffers(
@@ -10251,7 +10472,7 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 				(CAM_ISP_IFE_OUT_RES_BASE + max_ife_out_res),
 				fill_ife_fence,
 				CAM_ISP_HW_TYPE_VFE, &frame_header_info,
-				&sfe_fe_chk_cfg);
+				&check_for_scratch);
 		else if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE)
 			rc = cam_isp_add_io_buffers(
 				hw_mgr->mgr_common.img_iommu_hdl,
@@ -10262,7 +10483,7 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 				CAM_ISP_SFE_OUT_RES_BASE,
 				CAM_ISP_SFE_OUT_RES_MAX, fill_sfe_fence,
 				CAM_ISP_HW_TYPE_SFE, &frame_header_info,
-				&sfe_fe_chk_cfg);
+				&check_for_scratch);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
 				"Failed in io buffers, i=%d, rc=%d hw_type=%s",
@@ -10271,33 +10492,27 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 			goto end;
 		}
 
-		/* fence map table entries need to fill only once in the loop */
-		if ((ctx->base[i].hw_type ==
-			CAM_ISP_HW_TYPE_SFE) &&
-			fill_sfe_fence)
-			fill_sfe_fence = false;
-		else if ((ctx->base[i].hw_type ==
-			CAM_ISP_HW_TYPE_VFE) &&
-			fill_ife_fence)
-			fill_ife_fence = false;
-
 		/*
 		 * Add scratch buffer if there no output buffer for RDI WMs/RMs
 		 * only for UPDATE packets. For INIT we could have ePCR enabled
 		 * based on that decide to configure scratch via AHB at
 		 * stream on or not
 		 */
-		if (sfe_fe_chk_cfg.sfe_fe_enabled) {
-			if ((sfe_fe_chk_cfg.sfe_rdi_cfg_mask) !=
+		if ((check_for_scratch.validate_for_sfe) &&
+			(ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) && (fill_sfe_fence)) {
+			struct cam_isp_sfe_scratch_buf_res_info *sfe_res_info =
+				&check_for_scratch.sfe_scratch_res_info;
+
+			if ((sfe_res_info->sfe_rdi_cfg_mask) !=
 				((1 << ctx->sfe_info.num_fetches) - 1)) {
 				if (prepare_hw_data->packet_opcode_type ==
 					CAM_ISP_PACKET_UPDATE_DEV) {
 					CAM_DBG(CAM_ISP,
-						"Adding scratch buffer cfg_mask expected: 0x%x actual: 0x%x",
+						"Adding SFE scratch buffer cfg_mask expected: 0x%x actual: 0x%x",
 						((1 << ctx->sfe_info.num_fetches) - 1),
-						sfe_fe_chk_cfg.sfe_rdi_cfg_mask);
+						sfe_res_info->sfe_rdi_cfg_mask);
 					rc = cam_isp_sfe_add_scratch_buffer_cfg(
-						ctx->base[i].idx, sfe_fe_chk_cfg.sfe_rdi_cfg_mask,
+						ctx->base[i].idx, sfe_res_info->sfe_rdi_cfg_mask,
 						prepare, &kmd_buf, ctx->res_list_sfe_out,
 						&ctx->res_list_ife_in_rd, ctx);
 					if (rc)
@@ -10309,7 +10524,36 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 			}
 		}
 
-		memset(&sfe_fe_chk_cfg, 0, sizeof(sfe_fe_chk_cfg));
+		if ((check_for_scratch.validate_for_ife) &&
+			(ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) && (fill_ife_fence)) {
+			struct cam_isp_ife_scratch_buf_res_info *ife_res_info =
+				&check_for_scratch.ife_scratch_res_info;
+
+			/* Config IFE scratch only for update packets only */
+			if ((ife_res_info->ife_scratch_cfg_mask) !=
+				((1 << ife_res_info->num_ports) - 1)) {
+				if (prepare_hw_data->packet_opcode_type ==
+					CAM_ISP_PACKET_UPDATE_DEV) {
+					CAM_DBG(CAM_ISP,
+						"Adding IFE scratch buffer cfg_mask expected: 0x%x actual: 0x%x",
+						((1 << ife_res_info->num_ports) - 1),
+						ife_res_info->ife_scratch_cfg_mask);
+					rc = cam_isp_ife_add_scratch_buffer_cfg(
+						ctx->base[i].idx,
+						ife_res_info->ife_scratch_cfg_mask, prepare,
+						&kmd_buf, ctx->res_list_ife_out, ctx);
+					if (rc)
+						goto end;
+				}
+			}
+		}
+
+		/* fence map table entries need to fill only once in the loop */
+		if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) && fill_sfe_fence)
+			fill_sfe_fence = false;
+		else if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) && fill_ife_fence)
+			fill_ife_fence = false;
+
 		if (frame_header_info.frame_header_res_id &&
 			frame_header_enable) {
 			frame_header_enable = false;
@@ -10757,6 +11001,45 @@ int cam_isp_config_csid_rup_aup(
 	return rc;
 }
 
+static int cam_ife_mgr_configure_scratch_for_ife(
+	struct cam_ife_hw_mgr_ctx *ctx)
+{
+	int i, j, rc = 0;
+	uint32_t res_id;
+	struct cam_isp_hw_mgr_res           *hw_mgr_res;
+	struct cam_ife_sfe_scratch_buf_info *port_info;
+	struct cam_ife_scratch_buf_cfg      *ife_buf_info;
+	struct cam_isp_hw_mgr_res           *res_list_ife_out = NULL;
+
+	ife_buf_info = ctx->sfe_info.ife_scratch_config;
+	res_list_ife_out = ctx->res_list_ife_out;
+
+	for (i = 0; i < ife_buf_info->num_config; i++) {
+		res_id = ife_buf_info->buf_info[i].res_id & 0xFF;
+		port_info = &ife_buf_info->buf_info[i];
+		hw_mgr_res = &res_list_ife_out[res_id];
+
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			/* j = 1 is not valid for this use-case */
+			if (!hw_mgr_res->hw_res[j])
+				continue;
+
+			CAM_DBG(CAM_ISP,
+				"Configure scratch for IFE res: 0x%x io_addr %pK",
+				ife_buf_info->buf_info[i].res_id, port_info->io_addr);
+
+			rc = cam_isp_sfe_send_scratch_buf_upd(0x0,
+				CAM_ISP_HW_CMD_BUF_UPDATE,
+				hw_mgr_res->hw_res[j], port_info,
+				NULL, NULL);
+			if (rc)
+				return rc;
+		}
+	}
+
+	return rc;
+}
+
 /*
  * Scratch buffer is for sHDR/FS usescases involing SFE RDI0-2
  * There is no possibility of dual in this case, hence
@@ -10767,10 +11050,10 @@ static int cam_ife_mgr_prog_default_settings(
 	bool need_rup_aup, struct cam_ife_hw_mgr_ctx *ctx)
 {
 	int i, j, res_id, rc = 0;
-	struct cam_isp_hw_mgr_res       *hw_mgr_res;
-	struct cam_sfe_scratch_buf_info *buf_info;
-	struct list_head                *res_list_in_rd = NULL;
-	struct cam_isp_hw_mgr_res       *res_list_sfe_out = NULL;
+	struct cam_isp_hw_mgr_res           *hw_mgr_res;
+	struct cam_ife_sfe_scratch_buf_info *buf_info;
+	struct list_head                    *res_list_in_rd = NULL;
+	struct cam_isp_hw_mgr_res           *res_list_sfe_out = NULL;
 
 	res_list_in_rd = &ctx->res_list_ife_in_rd;
 	res_list_sfe_out = ctx->res_list_sfe_out;
@@ -10842,6 +11125,13 @@ static int cam_ife_mgr_prog_default_settings(
 		}
 	}
 
+	/* Check for IFE scratch buffer */
+	if (ctx->sfe_info.ife_scratch_config->num_config) {
+		rc = cam_ife_mgr_configure_scratch_for_ife(ctx);
+		if (rc)
+			return rc;
+	}
+
 	/* Program rup & aup only at run time */
 	if (need_rup_aup) {
 		rc = cam_isp_config_csid_rup_aup(ctx);
@@ -12015,14 +12305,50 @@ static int cam_ife_hw_mgr_handle_hw_eof(
 	return 0;
 }
 
+static bool cam_ife_hw_mgr_last_consumed_addr_check(
+	uint32_t last_consumed_addr, struct cam_ife_sfe_scratch_buf_info *buf_info)
+{
+	dma_addr_t final_addr;
+	uint32_t cmp_addr = 0;
+
+	final_addr = buf_info->io_addr + buf_info->offset;
+	cmp_addr = cam_smmu_is_expanded_memory() ?
+		CAM_36BIT_INTF_GET_IOVA_BASE(final_addr) : final_addr;
+	if (cmp_addr == last_consumed_addr)
+		return true;
+
+	return false;
+}
+
+static int cam_ife_hw_mgr_check_ife_scratch_buf_done(
+	struct cam_ife_scratch_buf_cfg *scratch_cfg,
+	uint32_t res_id, uint32_t last_consumed_addr)
+{
+	int rc = 0, i;
+	struct cam_ife_sfe_scratch_buf_info *buf_info;
+
+	for (i = 0; i < scratch_cfg->num_config; i++) {
+		if (scratch_cfg->buf_info[i].res_id == res_id) {
+			buf_info = &scratch_cfg->buf_info[i];
+
+			if (cam_ife_hw_mgr_last_consumed_addr_check(last_consumed_addr, buf_info)) {
+				CAM_DBG(CAM_ISP,
+					"IFE res:0x%x buf done for scratch - skip ctx notify",
+					buf_info->res_id);
+				rc = -EAGAIN;
+			}
+		}
+	}
+
+	return rc;
+}
+
 static int cam_ife_hw_mgr_check_rdi_scratch_buf_done(
-	const uint32_t ctx_index, struct cam_sfe_scratch_buf_cfg *scratch_cfg,
+	struct cam_sfe_scratch_buf_cfg *scratch_cfg,
 	uint32_t res_id, uint32_t last_consumed_addr)
 {
 	int rc = 0;
-	struct cam_sfe_scratch_buf_info *buf_info;
-	dma_addr_t final_addr;
-	uint32_t cmp_addr = 0;
+	struct cam_ife_sfe_scratch_buf_info *buf_info;
 
 	switch (res_id) {
 	case CAM_ISP_SFE_OUT_RES_RDI_0:
@@ -12032,12 +12358,10 @@ static int cam_ife_hw_mgr_check_rdi_scratch_buf_done(
 		if (!buf_info->config_done)
 			return 0;
 
-		final_addr = buf_info->io_addr + buf_info->offset;
-		cmp_addr = cam_smmu_is_expanded_memory() ?
-			CAM_36BIT_INTF_GET_IOVA_BASE(final_addr) : final_addr;
-		if (cmp_addr == last_consumed_addr) {
-			CAM_DBG(CAM_ISP, "SFE RDI%u buf done for scratch - skip ctx notify",
-				(res_id - CAM_ISP_SFE_OUT_RES_BASE));
+		if (cam_ife_hw_mgr_last_consumed_addr_check(last_consumed_addr, buf_info)) {
+			CAM_DBG(CAM_ISP,
+				"SFE RDI: 0x%x buf done for scratch - skip ctx notify",
+				buf_info->res_id);
 			rc = -EAGAIN;
 		}
 		break;
@@ -12048,6 +12372,32 @@ static int cam_ife_hw_mgr_check_rdi_scratch_buf_done(
 	return rc;
 }
 
+static int cam_ife_hw_mgr_check_for_scratch_buf_done(
+	struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx,
+	enum cam_isp_hw_type hw_type,
+	uint32_t res_id, uint32_t last_consumed_addr)
+{
+	int rc = 0;
+
+	switch (hw_type) {
+	case CAM_ISP_HW_TYPE_VFE:
+		if (ife_hw_mgr_ctx->sfe_info.ife_scratch_config->num_config)
+			rc = cam_ife_hw_mgr_check_ife_scratch_buf_done(
+				ife_hw_mgr_ctx->sfe_info.ife_scratch_config,
+				res_id, last_consumed_addr);
+		break;
+	case CAM_ISP_HW_TYPE_SFE:
+		rc = cam_ife_hw_mgr_check_rdi_scratch_buf_done(
+			ife_hw_mgr_ctx->sfe_info.scratch_config,
+			res_id, last_consumed_addr);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
 static int cam_ife_hw_mgr_handle_hw_buf_done(
 	struct cam_ife_hw_mgr_ctx        *ife_hw_mgr_ctx,
 	struct cam_isp_hw_event_info     *event_info)
@@ -12066,30 +12416,29 @@ static int cam_ife_hw_mgr_handle_hw_buf_done(
 
 	ife_hwr_irq_wm_done_cb = ife_hw_mgr_ctx->common.event_cb;
 	compdone_evt_info = (struct cam_isp_hw_compdone_event_info *)event_info->event_data;
-	buf_done_event_data.num_handles = compdone_evt_info->num_res;
+	buf_done_event_data.num_handles = 0;
 
 	for (i = 0; i < compdone_evt_info->num_res; i++) {
-		buf_done_event_data.resource_handle[i] =
-			compdone_evt_info->res_id[i];
-		buf_done_event_data.last_consumed_addr[i] =
-			compdone_evt_info->last_consumed_addr[i];
-
-		CAM_DBG(CAM_ISP, "Buf done for %s: %d res_id: 0x%x last consumed addr: 0x%x",
-		((event_info->hw_type == CAM_ISP_HW_TYPE_SFE) ? "SFE" : "IFE"),
-		event_info->hw_idx, compdone_evt_info->res_id[i],
-		compdone_evt_info->last_consumed_addr[i]);
-
-		if (cam_ife_hw_mgr_is_shdr_fs_rdi_res(compdone_evt_info->res_id[i],
-			ife_hw_mgr_ctx->flags.is_sfe_shdr,
-			ife_hw_mgr_ctx->flags.is_sfe_fs)) {
-			rc = cam_ife_hw_mgr_check_rdi_scratch_buf_done(
-				ife_hw_mgr_ctx->ctx_index,
-				ife_hw_mgr_ctx->sfe_info.scratch_config,
-				compdone_evt_info->res_id[i],
+		CAM_DBG(CAM_ISP,
+			"Buf done for %s: %d res_id: 0x%x last consumed addr: 0x%x ctx: %u",
+			((event_info->hw_type == CAM_ISP_HW_TYPE_SFE) ? "SFE" : "IFE"),
+			event_info->hw_idx, compdone_evt_info->res_id[i],
+			compdone_evt_info->last_consumed_addr[i], ife_hw_mgr_ctx->ctx_index);
+
+		/* Check scratch for sHDR/FS use-cases */
+		if (ife_hw_mgr_ctx->flags.is_sfe_fs || ife_hw_mgr_ctx->flags.is_sfe_shdr) {
+			rc = cam_ife_hw_mgr_check_for_scratch_buf_done(ife_hw_mgr_ctx,
+				event_info->hw_type, compdone_evt_info->res_id[i],
 				compdone_evt_info->last_consumed_addr[i]);
 			if (rc)
-				goto end;
+				continue;
 		}
+
+		buf_done_event_data.resource_handle[buf_done_event_data.num_handles] =
+			compdone_evt_info->res_id[i];
+		buf_done_event_data.last_consumed_addr[buf_done_event_data.num_handles] =
+			compdone_evt_info->last_consumed_addr[i];
+		buf_done_event_data.num_handles++;
 	}
 
 
@@ -12097,12 +12446,13 @@ static int cam_ife_hw_mgr_handle_hw_buf_done(
 		return 0;
 
 	if (buf_done_event_data.num_handles > 0 && ife_hwr_irq_wm_done_cb) {
-		CAM_DBG(CAM_ISP, "Notify ISP context");
+		CAM_DBG(CAM_ISP,
+			"Notify ISP context for %u handles in ctx: %u",
+			buf_done_event_data.num_handles, ife_hw_mgr_ctx->ctx_index);
 		ife_hwr_irq_wm_done_cb(ife_hw_mgr_ctx->common.cb_priv,
 			CAM_ISP_HW_EVENT_DONE, (void *)&buf_done_event_data);
 	}
 
-end:
 	return 0;
 }
 

+ 24 - 8
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h

@@ -81,7 +81,7 @@ struct cam_ife_hw_mgr_ctx_pf_info {
 };
 
 /**
- * struct cam_sfe_scratch_buf_info - Scratch buf info
+ * struct cam_ife_sfe_scratch_buf_info - Scratch buf info
  *
  * @width: Width in pixels
  * @height: Height in pixels
@@ -93,7 +93,7 @@ struct cam_ife_hw_mgr_ctx_pf_info {
  * @config_done: To indicate if RDIx received scratch cfg
  * @is_secure: secure scratch buffer
  */
-struct cam_sfe_scratch_buf_info {
+struct cam_ife_sfe_scratch_buf_info {
 	uint32_t   width;
 	uint32_t   height;
 	uint32_t   stride;
@@ -108,29 +108,45 @@ struct cam_sfe_scratch_buf_info {
 /**
  * struct cam_sfe_scratch_buf_cfg - Scratch buf info
  *
- * @num_configs: Number of buffer configs [max of 3 currently]
+ * @num_configs : Total Number of scratch buffers provided
  * @curr_num_exp: Current num of exposures
- * @buf_info: Info on each of the buffers
+ * @buf_info    : Info on each of the buffers
  *
  */
 struct cam_sfe_scratch_buf_cfg {
-	uint32_t                        num_config;
-	uint32_t                        curr_num_exp;
-	struct cam_sfe_scratch_buf_info buf_info[
+	uint32_t                            num_config;
+	uint32_t                            curr_num_exp;
+	struct cam_ife_sfe_scratch_buf_info buf_info[
 		CAM_SFE_FE_RDI_NUM_MAX];
 };
 
+/**
+ * struct cam_sfe_scratch_buf_cfg - Scratch buf info
+ *
+ * @num_ports: Total Number of scratch buffers provided
+ * @buf_info : Info on each of the buffers
+ *
+ */
+struct cam_ife_scratch_buf_cfg {
+	uint32_t                            num_config;
+	struct cam_ife_sfe_scratch_buf_info buf_info[
+		CAM_IFE_SCRATCH_NUM_MAX];
+};
+
+
 /**
  * struct cam_ife_hw_mgr_sfe_info - SFE info
  *
  * @skip_scratch_cfg_streamon: Determine if scratch cfg needs to be programmed at stream on
  * @num_fetches:               Indicate number of SFE fetches for this stream
- * @scratch_config:            Scratch buffer config if any for this stream
+ * @scratch_config:            Scratch buffer config if any for SFE ports
+ * @ife_scratch_config:        Scratch buffer config if any for IFE ports
  */
 struct cam_ife_hw_mgr_sfe_info {
 	bool                            skip_scratch_cfg_streamon;
 	uint32_t                        num_fetches;
 	struct cam_sfe_scratch_buf_cfg *scratch_config;
+	struct cam_ife_scratch_buf_cfg *ife_scratch_config;
 };
 
 /**

+ 59 - 20
drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c

@@ -678,20 +678,51 @@ int cam_sfe_add_command_buffers(
 	return rc;
 }
 
+static void cam_isp_validate_for_sfe_scratch(
+	struct cam_isp_sfe_scratch_buf_res_info *sfe_res_info,
+	uint32_t res_type, uint32_t out_base)
+{
+	uint32_t res_id_out = res_type & 0xFF;
+
+	if ((res_id_out) < ((out_base & 0xFF) +
+		sfe_res_info->num_active_fe_rdis)) {
+		CAM_DBG(CAM_ISP,
+			"Buffer found for SFE port: 0x%x - skip scratch buffer",
+			res_type);
+		sfe_res_info->sfe_rdi_cfg_mask |= (1 << res_id_out);
+	}
+}
+
+static void cam_isp_validate_for_ife_scratch(
+	struct cam_isp_ife_scratch_buf_res_info *ife_res_info,
+	uint32_t res_type)
+{
+	int i;
+
+	for (i = 0; i < ife_res_info->num_ports; i++) {
+		if (res_type == ife_res_info->ife_scratch_resources[i]) {
+			CAM_DBG(CAM_ISP,
+				"Buffer found for IFE port: 0x%x - skip scratch buffer",
+				res_type);
+			ife_res_info->ife_scratch_cfg_mask |= (1 << i);
+		}
+	}
+}
+
 int cam_isp_add_io_buffers(
-	int                                   iommu_hdl,
-	int                                   sec_iommu_hdl,
-	struct cam_hw_prepare_update_args    *prepare,
-	uint32_t                              base_idx,
-	struct cam_kmd_buf_info              *kmd_buf_info,
-	struct cam_isp_hw_mgr_res            *res_list_isp_out,
-	struct list_head                     *res_list_in_rd,
-	uint32_t                              out_base,
-	uint32_t                              out_max,
-	bool                                  fill_fence,
-	enum cam_isp_hw_type                  hw_type,
-	struct cam_isp_frame_header_info     *frame_header_info,
-	struct cam_isp_check_sfe_fe_io_cfg   *check_sfe_fe_cfg)
+	int                                      iommu_hdl,
+	int                                      sec_iommu_hdl,
+	struct cam_hw_prepare_update_args       *prepare,
+	uint32_t                                 base_idx,
+	struct cam_kmd_buf_info                 *kmd_buf_info,
+	struct cam_isp_hw_mgr_res               *res_list_isp_out,
+	struct list_head                        *res_list_in_rd,
+	uint32_t                                 out_base,
+	uint32_t                                 out_max,
+	bool                                     fill_fence,
+	enum cam_isp_hw_type                     hw_type,
+	struct cam_isp_frame_header_info        *frame_header_info,
+	struct cam_isp_check_io_cfg_for_scratch *scratch_check_cfg)
 {
 	int                                 rc = 0;
 	dma_addr_t                          io_addr[CAM_PACKET_MAX_PLANES];
@@ -750,14 +781,22 @@ int cam_isp_add_io_buffers(
 				continue;
 
 			res_id_out = io_cfg[i].resource_type & 0xFF;
-			if (check_sfe_fe_cfg->sfe_fe_enabled &&
-				(res_id_out < ((out_base & 0xFF) +
-				 check_sfe_fe_cfg->num_active_fe_rdis))) {
-				CAM_DBG(CAM_ISP,
-					"SFE Write/Fetch engine cfg skip scratch buffer for res 0x%x",
+			if ((hw_type == CAM_ISP_HW_TYPE_SFE)  &&
+				(scratch_check_cfg->validate_for_sfe)) {
+				struct cam_isp_sfe_scratch_buf_res_info *sfe_res_info =
+					&scratch_check_cfg->sfe_scratch_res_info;
+
+				cam_isp_validate_for_sfe_scratch(sfe_res_info,
+					io_cfg[i].resource_type, out_base);
+			}
+
+			if ((hw_type == CAM_ISP_HW_TYPE_VFE) &&
+				(scratch_check_cfg->validate_for_ife)) {
+				struct cam_isp_ife_scratch_buf_res_info *ife_res_info =
+					&scratch_check_cfg->ife_scratch_res_info;
+
+				cam_isp_validate_for_ife_scratch(ife_res_info,
 					io_cfg[i].resource_type);
-				check_sfe_fe_cfg->sfe_rdi_cfg_mask |=
-					1 << res_id_out;
 			}
 
 			CAM_DBG(CAM_ISP,

+ 48 - 21
drivers/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h

@@ -50,19 +50,46 @@ struct cam_isp_frame_header_info {
 };
 
 /*
- * struct cam_isp_check_sfe_fe_io_cfg
- *
- * @sfe_fe_enabled     : True if SFE fetch engine is enabled
- * @num_active_fe_rdis : To indicate active RMs/RDIs
- * @sfe_rdi_cfg_mask   : To indicate IO buf cfg for RDIs
+ * struct cam_isp_sfe_scratch_buf_res_info
  *
+ * @num_active_fe_rdis    : To indicate active RMs/RDIs
+ * @sfe_rdi_cfg_mask      : Output mask to mark if the given RDI res has been
+ *                          provided with IO cfg buffer
  */
-struct cam_isp_check_sfe_fe_io_cfg {
-	bool                     sfe_fe_enabled;
+struct cam_isp_sfe_scratch_buf_res_info {
 	uint32_t                 num_active_fe_rdis;
 	uint32_t                 sfe_rdi_cfg_mask;
 };
 
+/*
+ * struct cam_isp_ife_scratch_buf_res_info
+ *
+ * @num_ports             : Number of ports for which scratch buffer is provided
+ * @ife_scratch_resources : IFE resources that have been provided a scratch buffer
+ * @ife_scratch_cfg_mask  : Output mask to mark if the given client has been
+ *                          provided with IO cfg buffer
+ */
+struct cam_isp_ife_scratch_buf_res_info {
+	uint32_t                 num_ports;
+	uint32_t                 ife_scratch_resources[CAM_IFE_SCRATCH_NUM_MAX];
+	uint32_t                 ife_scratch_cfg_mask;
+};
+
+/*
+ * struct cam_isp_check_io_cfg_for_scratch
+ *
+ * @sfe_scratch_res_info  : SFE scratch buffer validation info
+ * @ife_scratch_res_info  : IFE scratch buffer validation info
+ * @validate_for_sfe      : Validate for SFE clients, check if scratch is needed
+ * @validate_for_ife      : Validate for IFE clients, check if scratch is needed
+ */
+struct cam_isp_check_io_cfg_for_scratch {
+	struct cam_isp_sfe_scratch_buf_res_info sfe_scratch_res_info;
+	struct cam_isp_ife_scratch_buf_res_info ife_scratch_res_info;
+	bool                                    validate_for_sfe;
+	bool                                    validate_for_ife;
+};
+
 /*
  * struct cam_isp_change_base_args
  *
@@ -209,24 +236,24 @@ int cam_isp_add_command_buffers(
  * @fill_fence:            If true, Fence map table will be filled
  * @hw_type:               HW type for this ctx base (IFE/SFE)
  * @frame_header_info:     Frame header related params
- * @check_sfe_fe_cfg:      Validate if sfe fetch received IO cfg
+ * @scratch_check_cfg:     Validate info for IFE/SFE scratch buffers
  * @return:                0 for success
  *                         -EINVAL for Fail
  */
 int cam_isp_add_io_buffers(
-	int                                   iommu_hdl,
-	int                                   sec_iommu_hdl,
-	struct cam_hw_prepare_update_args    *prepare,
-	uint32_t                              base_idx,
-	struct cam_kmd_buf_info              *kmd_buf_info,
-	struct cam_isp_hw_mgr_res            *res_list_isp_out,
-	struct list_head                     *res_list_ife_in_rd,
-	uint32_t                              out_base,
-	uint32_t                              out_max,
-	bool                                  fill_fence,
-	enum cam_isp_hw_type                  hw_type,
-	struct cam_isp_frame_header_info     *frame_header_info,
-	struct cam_isp_check_sfe_fe_io_cfg   *check_sfe_fe_cfg);
+	int                                      iommu_hdl,
+	int                                      sec_iommu_hdl,
+	struct cam_hw_prepare_update_args       *prepare,
+	uint32_t                                 base_idx,
+	struct cam_kmd_buf_info                 *kmd_buf_info,
+	struct cam_isp_hw_mgr_res               *res_list_isp_out,
+	struct list_head                        *res_list_ife_in_rd,
+	uint32_t                                 out_base,
+	uint32_t                                 out_max,
+	bool                                     fill_fence,
+	enum cam_isp_hw_type                     hw_type,
+	struct cam_isp_frame_header_info        *frame_header_info,
+	struct cam_isp_check_io_cfg_for_scratch *scratch_check_cfg);
 
 /*
  * cam_isp_add_reg_update()

+ 11 - 9
drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h

@@ -13,15 +13,17 @@
 #include "cam_hw_mgr_intf.h"
 
 /* MAX IFE instance */
-#define CAM_IFE_HW_NUM_MAX      8
-#define CAM_SFE_HW_NUM_MAX      2
-#define CAM_IFE_RDI_NUM_MAX     4
-#define CAM_SFE_RDI_NUM_MAX     5
-#define CAM_SFE_FE_RDI_NUM_MAX  3
-#define CAM_ISP_BW_CONFIG_V1    1
-#define CAM_ISP_BW_CONFIG_V2    2
-#define CAM_TFE_HW_NUM_MAX      3
-#define CAM_TFE_RDI_NUM_MAX     3
+#define CAM_IFE_HW_NUM_MAX       8
+#define CAM_SFE_HW_NUM_MAX       2
+#define CAM_IFE_RDI_NUM_MAX      4
+#define CAM_SFE_RDI_NUM_MAX      5
+#define CAM_SFE_FE_RDI_NUM_MAX   3
+#define CAM_ISP_BW_CONFIG_V1     1
+#define CAM_ISP_BW_CONFIG_V2     2
+#define CAM_TFE_HW_NUM_MAX       3
+#define CAM_TFE_RDI_NUM_MAX      3
+#define CAM_IFE_SCRATCH_NUM_MAX  2
+
 
 /* maximum context numbers for TFE */
 #define CAM_TFE_CTX_MAX      4

+ 1 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c

@@ -536,6 +536,7 @@ int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
 	case CAM_ISP_HW_CMD_IFE_BUS_DEBUG_CFG:
 	case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG:
 	case CAM_ISP_HW_BUS_MINI_DUMP:
+	case CAM_ISP_HW_CMD_BUF_UPDATE:
 		rc = core_info->vfe_bus->hw_ops.process_cmd(
 			core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
 			arg_size);

+ 207 - 20
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c

@@ -194,6 +194,7 @@ struct cam_vfe_bus_ver3_vfe_out_data {
 	uint32_t                         secure_mode;
 	void                            *priv;
 	uint32_t                         mid[CAM_VFE_BUS_VER3_MAX_MID_PER_PORT];
+	bool                             limiter_enabled;
 };
 
 struct cam_vfe_bus_ver3_priv {
@@ -1976,6 +1977,7 @@ static int cam_vfe_bus_ver3_acquire_vfe_out(void *bus_priv, void *acquire_args,
 		out_acquire_args->disable_ubwc_comp;
 	rsrc_data->priv = acq_args->priv;
 	rsrc_data->bus_priv = ver3_bus_priv;
+	rsrc_data->limiter_enabled = false;
 	comp_acq_args.composite_mask = (1ULL << vfe_out_res_id);
 
 	/* for some hw versions, buf done is not received from vfe but
@@ -3131,6 +3133,165 @@ end:
 	return rc;
 }
 
+static int cam_vfe_bus_ver3_config_ubwc_regs(
+	struct cam_vfe_bus_ver3_wm_resource_data *wm_data)
+{
+	struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_regs =
+		(struct cam_vfe_bus_ver3_reg_offset_ubwc_client *)
+		wm_data->hw_regs->ubwc_regs;
+
+	cam_io_w_mb(wm_data->packer_cfg, wm_data->common_data->mem_base +
+		wm_data->hw_regs->packer_cfg);
+	CAM_DBG(CAM_ISP, "WM:%d packer cfg:0x%x",
+		wm_data->index, wm_data->packer_cfg);
+
+	cam_io_w_mb(wm_data->ubwc_meta_cfg,
+		wm_data->common_data->mem_base + ubwc_regs->meta_cfg);
+	CAM_DBG(CAM_ISP, "WM:%d meta stride:0x%x",
+		wm_data->index, wm_data->ubwc_meta_cfg);
+
+	if (wm_data->common_data->disable_ubwc_comp) {
+		wm_data->ubwc_mode_cfg &= ~ubwc_regs->ubwc_comp_en_bit;
+		CAM_DBG(CAM_ISP,
+			"Force disable UBWC compression on VFE:%d WM:%d",
+			wm_data->common_data->core_index, wm_data->index);
+	}
+
+	cam_io_w_mb(wm_data->ubwc_mode_cfg,
+		wm_data->common_data->mem_base + ubwc_regs->mode_cfg);
+	CAM_DBG(CAM_ISP, "WM:%d ubwc_mode_cfg:0x%x",
+		wm_data->index, wm_data->ubwc_mode_cfg);
+
+	cam_io_w_mb(wm_data->ubwc_ctrl_2,
+		wm_data->common_data->mem_base + ubwc_regs->ctrl_2);
+	CAM_DBG(CAM_ISP, "WM:%d ubwc_ctrl_2:0x%x",
+		wm_data->index, wm_data->ubwc_ctrl_2);
+
+	cam_io_w_mb(wm_data->ubwc_lossy_threshold_0,
+		wm_data->common_data->mem_base + ubwc_regs->lossy_thresh0);
+	CAM_DBG(CAM_ISP, "WM:%d lossy_thresh0: 0x%x",
+		wm_data->index, wm_data->ubwc_lossy_threshold_0);
+
+	cam_io_w_mb(wm_data->ubwc_lossy_threshold_1,
+		wm_data->common_data->mem_base + ubwc_regs->lossy_thresh1);
+	CAM_DBG(CAM_ISP, "WM:%d lossy_thresh0:0x%x",
+		wm_data->index, wm_data->ubwc_lossy_threshold_1);
+
+	cam_io_w_mb(wm_data->ubwc_offset_lossy_variance,
+		wm_data->common_data->mem_base + ubwc_regs->off_lossy_var);
+	CAM_DBG(CAM_ISP, "WM:%d off_lossy_var:0x%x",
+	wm_data->index, wm_data->ubwc_offset_lossy_variance);
+
+	/*
+	 * If limit value >= 0xFFFF, limit configured by
+	 * generic limiter blob
+	 */
+	if (wm_data->ubwc_bandwidth_limit < 0xFFFF) {
+		cam_io_w_mb(wm_data->ubwc_bandwidth_limit,
+			wm_data->common_data->mem_base + ubwc_regs->bw_limit);
+		CAM_DBG(CAM_ISP, "WM:%d ubwc bw limit:0x%x",
+			wm_data->index, wm_data->ubwc_bandwidth_limit);
+	}
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_config_wm(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver3_priv *bus_priv;
+	struct cam_isp_hw_get_cmd_update *update_buf;
+	struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL;
+	struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_regs;
+	uint32_t i, val, iova_addr, iova_offset, stride;
+	dma_addr_t iova;
+
+	bus_priv = (struct cam_vfe_bus_ver3_priv  *) priv;
+	update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *)
+		update_buf->res->res_priv;
+	if (!vfe_out_data) {
+		CAM_ERR(CAM_ISP, "Invalid data");
+		return -EINVAL;
+	}
+
+	if (!vfe_out_data->limiter_enabled)
+		CAM_WARN(CAM_ISP,
+			"Configuring scratch for VFE out_type: %u, with no BW limiter enabled",
+			vfe_out_data->out_type);
+
+	for (i = 0; i < vfe_out_data->num_wm; i++) {
+		wm_data = vfe_out_data->wm_res[i].res_priv;
+		ubwc_regs = (struct cam_vfe_bus_ver3_reg_offset_ubwc_client *)
+			wm_data->hw_regs->ubwc_regs;
+
+		stride =  update_buf->wm_update->stride;
+		val = stride;
+		val = ALIGNUP(val, 16);
+		if (val != stride &&
+			val != wm_data->stride)
+			CAM_WARN(CAM_SFE, "Warning stride %u expected %u",
+				stride, val);
+
+		if (wm_data->stride != val || !wm_data->init_cfg_done) {
+			cam_io_w_mb(stride, wm_data->common_data->mem_base +
+				wm_data->hw_regs->image_cfg_2);
+			wm_data->stride = val;
+			CAM_DBG(CAM_ISP, "WM:%d image stride 0x%x",
+				wm_data->index, stride);
+		}
+
+		/* WM Image address */
+		iova = update_buf->wm_update->image_buf[i];
+		if (cam_smmu_is_expanded_memory()) {
+			iova_addr = CAM_36BIT_INTF_GET_IOVA_BASE(iova);
+			iova_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET(iova);
+
+			cam_io_w_mb(iova_addr, wm_data->common_data->mem_base +
+				wm_data->hw_regs->image_addr);
+			cam_io_w_mb(iova_offset, wm_data->common_data->mem_base +
+				wm_data->hw_regs->addr_cfg);
+
+			CAM_DBG(CAM_ISP, "WM:%d image address 0x%x 0x%x",
+				wm_data->index, iova_addr, iova_offset);
+		} else {
+			iova_addr = iova;
+			cam_io_w_mb(iova_addr, wm_data->common_data->mem_base +
+				wm_data->hw_regs->image_addr);
+			CAM_DBG(CAM_ISP, "WM:%d image address 0x%X",
+				wm_data->index, iova_addr);
+		}
+
+		if (wm_data->en_ubwc) {
+			if (!wm_data->hw_regs->ubwc_regs) {
+				CAM_ERR(CAM_ISP,
+					"No UBWC register to configure for WM: %u",
+					wm_data->index);
+				return -EINVAL;
+			}
+
+			if (wm_data->ubwc_updated) {
+				wm_data->ubwc_updated = false;
+				cam_vfe_bus_ver3_config_ubwc_regs(wm_data);
+			}
+
+			cam_io_w_mb(iova_addr, wm_data->common_data->mem_base +
+				ubwc_regs->meta_addr);
+			CAM_DBG(CAM_ISP, "WM:%d meta address 0x%x",
+				wm_data->index, iova_addr);
+		}
+
+		/* enable the WM */
+		cam_io_w_mb(wm_data->en_cfg, wm_data->common_data->mem_base +
+			wm_data->hw_regs->cfg);
+		CAM_DBG(CAM_ISP, "WM:%d en_cfg 0x%x", wm_data->index, wm_data->en_cfg);
+	}
+
+	return 0;
+}
+
 static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
 	uint32_t arg_size)
 {
@@ -3145,7 +3306,7 @@ static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
 	uint32_t num_regval_pairs = 0;
 	uint32_t i, j, size = 0;
 	uint32_t frame_inc = 0, val;
-	uint32_t iova_addr, iova_offset, image_buf_offset = 0;
+	uint32_t iova_addr, iova_offset, image_buf_offset = 0, stride, slice_h;
 	dma_addr_t iova;
 
 	bus_priv = (struct cam_vfe_bus_ver3_priv  *) priv;
@@ -3159,7 +3320,8 @@ static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
 	}
 
 	cdm_util_ops = vfe_out_data->cdm_util_ops;
-	if (update_buf->wm_update->num_buf != vfe_out_data->num_wm) {
+	if ((update_buf->wm_update->num_buf != vfe_out_data->num_wm) &&
+		(!(update_buf->use_scratch_cfg))) {
 		CAM_ERR(CAM_ISP,
 			"Failed! Invalid number buffers:%d required:%d",
 			update_buf->wm_update->num_buf, vfe_out_data->num_wm);
@@ -3167,7 +3329,17 @@ static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
 	}
 
 	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
-	io_cfg = update_buf->wm_update->io_cfg;
+	if (update_buf->use_scratch_cfg) {
+		CAM_DBG(CAM_ISP, "Using scratch for IFE out_type: %u",
+			vfe_out_data->out_type);
+
+		if (!vfe_out_data->limiter_enabled)
+			CAM_WARN(CAM_ISP,
+				"Configuring scratch for VFE out_type: %u, with no BW limiter enabled",
+				vfe_out_data->out_type);
+	} else {
+		io_cfg = update_buf->wm_update->io_cfg;
+	}
 
 	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
 		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
@@ -3228,17 +3400,25 @@ static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
 			wm_data->index, reg_val_pair[j-1]);
 
 		/* For initial configuration program all bus registers */
-		val = io_cfg->planes[i].plane_stride;
+		if (update_buf->use_scratch_cfg) {
+			stride = update_buf->wm_update->stride;
+			slice_h = update_buf->wm_update->slice_height;
+		} else {
+			stride = io_cfg->planes[i].plane_stride;
+			slice_h = io_cfg->planes[i].slice_height;
+		}
+
+		val = stride;
 		CAM_DBG(CAM_ISP, "before stride %d", val);
 		val = ALIGNUP(val, 16);
-		if (val != io_cfg->planes[i].plane_stride)
+		if (val != stride)
 			CAM_DBG(CAM_ISP, "Warning stride %u expected %u",
-				io_cfg->planes[i].plane_stride, val);
+				stride, val);
 
 		if (wm_data->stride != val || !wm_data->init_cfg_done) {
 			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 				wm_data->hw_regs->image_cfg_2,
-				io_cfg->planes[i].plane_stride);
+				stride);
 			wm_data->stride = val;
 			CAM_DBG(CAM_ISP, "WM:%d image stride 0x%X",
 				wm_data->index, reg_val_pair[j-1]);
@@ -3266,19 +3446,20 @@ static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
 				update_buf->wm_update->image_buf[i]);
 		}
 
+		frame_inc = stride * slice_h;
 		if (wm_data->en_ubwc) {
-			frame_inc = ALIGNUP(io_cfg->planes[i].plane_stride *
-				io_cfg->planes[i].slice_height, 4096);
-			frame_inc += io_cfg->planes[i].meta_size;
-			CAM_DBG(CAM_ISP,
-				"WM:%d frm %d: ht: %d stride %d meta: %d",
-				wm_data->index, frame_inc,
-				io_cfg->planes[i].slice_height,
-				io_cfg->planes[i].plane_stride,
-				io_cfg->planes[i].meta_size);
-		} else {
-			frame_inc = io_cfg->planes[i].plane_stride *
-				io_cfg->planes[i].slice_height;
+			frame_inc = ALIGNUP(stride *
+				slice_h, 4096);
+
+			if (!update_buf->use_scratch_cfg) {
+				frame_inc += io_cfg->planes[i].meta_size;
+				CAM_DBG(CAM_ISP,
+					"WM:%d frm %d: ht: %d stride %d meta: %d",
+					wm_data->index, frame_inc,
+					io_cfg->planes[i].slice_height,
+					io_cfg->planes[i].plane_stride,
+					io_cfg->planes[i].meta_size);
+			}
 		}
 
 		if (!(wm_data->en_cfg & (0x3 << 16))) {
@@ -3288,7 +3469,7 @@ static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
 				wm_data->index, reg_val_pair[j-1]);
 		}
 
-		if (wm_data->en_ubwc)
+		if ((wm_data->en_ubwc) && (!update_buf->use_scratch_cfg))
 			image_buf_offset = io_cfg->planes[i].meta_size;
 		else if (wm_data->en_cfg & (0x3 << 16))
 			image_buf_offset = wm_data->offset;
@@ -3736,6 +3917,7 @@ static int cam_vfe_bus_update_bw_limiter(
 	uint32_t                                  counter_limit = 0, reg_val = 0;
 	uint32_t                                 *reg_val_pair, num_regval_pairs = 0;
 	uint32_t                                  i, j, size = 0;
+	bool                                      limiter_enabled = false;
 
 	bus_priv         = (struct cam_vfe_bus_ver3_priv  *) priv;
 	wm_config_update = (struct cam_isp_hw_get_cmd_update *) cmd_args;
@@ -3791,6 +3973,7 @@ static int cam_vfe_bus_update_bw_limiter(
 		if (wm_bw_limit_cfg->enable_limiter && counter_limit) {
 			reg_val = 1;
 			reg_val |= (counter_limit << 1);
+			limiter_enabled = true;
 		} else {
 			reg_val = 0;
 		}
@@ -3831,6 +4014,7 @@ add_reg_pair:
 		wm_config_update->cmd.used_bytes = 0;
 	}
 
+	vfe_out_data->limiter_enabled = limiter_enabled;
 	return 0;
 }
 
@@ -4041,6 +4225,9 @@ static int cam_vfe_bus_ver3_process_cmd(
 			"disabled" : "enabled");
 		rc = 0;
 		break;
+	case CAM_ISP_HW_CMD_BUF_UPDATE:
+		rc = cam_vfe_bus_ver3_config_wm(priv, cmd_args, arg_size);
+		break;
 	case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG:
 		rc = cam_vfe_bus_update_bw_limiter(priv, cmd_args, arg_size);
 		break;