瀏覽代碼

msm: camera: tfe: Add support to TFE driver

TFE is thin front end hardware that capture and process the
real time image. Support is added to enable the TFE
hardware.

CRs-Fixed: 2594541
Change-Id: I0ab2eff7924e9e01f6a0fcec772d3a7dda229b37
Signed-off-by: Ravikishore Pampana <[email protected]>
Signed-off-by: Trishansh Bhardwaj <[email protected]>
Trishansh Bhardwaj 5 年之前
父節點
當前提交
cb2e8943da
共有 43 個文件被更改,包括 18155 次插入30 次删除
  1. 16 0
      drivers/Makefile
  2. 62 9
      drivers/cam_isp/cam_isp_context.c
  3. 7 2
      drivers/cam_isp/cam_isp_context.h
  4. 23 4
      drivers/cam_isp/cam_isp_dev.c
  5. 7 8
      drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
  6. 5282 0
      drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c
  7. 196 0
      drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h
  8. 4 2
      drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
  9. 14 5
      drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
  10. 179 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_csid_hw_intf.h
  11. 253 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_hw_intf.h
  12. 74 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_top_tpg_hw_intf.h
  13. 15 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/Makefile
  14. 51 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid530.c
  15. 225 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid530.h
  16. 2824 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c
  17. 412 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.h
  18. 139 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.c
  19. 16 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.h
  20. 209 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.c
  21. 119 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.h
  22. 13 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/Makefile
  23. 42 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe.c
  24. 813 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe530.h
  25. 2149 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.c
  26. 240 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.h
  27. 2529 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.c
  28. 272 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.h
  29. 197 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.c
  30. 38 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.h
  31. 31 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_irq.h
  32. 240 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.c
  33. 117 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.h
  34. 15 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/Makefile
  35. 671 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_core.c
  36. 153 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_core.h
  37. 140 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_dev.c
  38. 12 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_dev.h
  39. 152 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_soc.c
  40. 78 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_soc.h
  41. 53 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_v1.c
  42. 56 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_v1.h
  43. 17 0
      drivers/camera_main.c

+ 16 - 0
drivers/Makefile

@@ -183,6 +183,22 @@ camera-$(CONFIG_SPECTRA_OPE) += \
 	cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.o\
 	cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.o
 
+camera-$(CONFIG_SPECTRA_CAMERA) += \
+	cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.o \
+	cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.o \
+	cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.o \
+	cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.o \
+	cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.o \
+	cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe.o \
+	cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.o \
+	cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.o \
+	cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.o \
+	cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid530.o \
+	cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_dev.o \
+	cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_soc.o \
+	cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_core.o \
+	cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_v1.o
+
 camera-y += camera_main.o
 
 obj-$(CONFIG_SPECTRA_CAMERA) += camera.o

+ 62 - 9
drivers/cam_isp/cam_isp_context.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/debugfs.h>
@@ -376,6 +376,38 @@ static const char *__cam_isp_resource_handle_id_to_type(
 	}
 }
 
+static const char *__cam_isp_tfe_resource_handle_id_to_type(
+	uint32_t resource_handle)
+{
+
+	switch (resource_handle) {
+	case CAM_ISP_TFE_OUT_RES_FULL:
+		return "FULL";
+	case CAM_ISP_TFE_OUT_RES_RAW_DUMP:
+		return "RAW_DUMP";
+	case CAM_ISP_TFE_OUT_RES_PDAF:
+		return "PDAF";
+	case CAM_ISP_TFE_OUT_RES_RDI_0:
+		return "RDI_0";
+	case CAM_ISP_TFE_OUT_RES_RDI_1:
+		return "RDI_1";
+	case CAM_ISP_TFE_OUT_RES_RDI_2:
+		return "RDI_2";
+	case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE:
+		return "STATS_HDR_BE";
+	case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST:
+		return "STATS_HDR_BHIST";
+	case CAM_ISP_TFE_OUT_RES_STATS_TL_BG:
+		return "STATS_TL_BG";
+	case CAM_ISP_TFE_OUT_RES_STATS_BF:
+		return "STATS_BF";
+	case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG:
+		return "STATS_AWB_BG";
+	default:
+		return "CAM_ISP_Invalid_Resource_Type";
+	}
+}
+
 static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
 {
 	uint64_t ts = 0;
@@ -471,9 +503,11 @@ static void __cam_isp_ctx_send_sof_timestamp(
 }
 
 static void __cam_isp_ctx_handle_buf_done_fail_log(
-	uint64_t request_id, struct cam_isp_ctx_req *req_isp)
+	uint64_t request_id, struct cam_isp_ctx_req *req_isp,
+	uint32_t isp_device_type)
 {
 	int i;
+	const char *handle_type;
 
 	if (req_isp->num_fence_map_out >= CAM_ISP_CTX_RES_MAX) {
 		CAM_ERR(CAM_ISP,
@@ -490,10 +524,18 @@ static void __cam_isp_ctx_handle_buf_done_fail_log(
 		"Resource Handles that fail to generate buf_done in prev frame");
 	for (i = 0; i < req_isp->num_fence_map_out; i++) {
 		if (req_isp->fence_map_out[i].sync_id != -1) {
+			if (isp_device_type == CAM_IFE_DEVICE_TYPE)
+				handle_type =
+				__cam_isp_resource_handle_id_to_type(
+				req_isp->fence_map_out[i].resource_handle);
+			else
+				handle_type =
+				__cam_isp_tfe_resource_handle_id_to_type(
+				req_isp->fence_map_out[i].resource_handle);
+
 			CAM_WARN(CAM_ISP,
 			"Resource_Handle: [%s][0x%x] Sync_ID: [0x%x]",
-			__cam_isp_resource_handle_id_to_type(
-			req_isp->fence_map_out[i].resource_handle),
+			handle_type,
 			req_isp->fence_map_out[i].resource_handle,
 			req_isp->fence_map_out[i].sync_id);
 		}
@@ -512,6 +554,7 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
 	struct cam_isp_ctx_req  *req_isp;
 	struct cam_context *ctx = ctx_isp->base;
 	uint64_t buf_done_req_id;
+	const char *handle_type;
 
 	trace_cam_buf_done("ISP", ctx, req);
 
@@ -541,11 +584,18 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
 		}
 
 		if (req_isp->fence_map_out[j].sync_id == -1) {
+			if (ctx_isp->isp_device_type == CAM_IFE_DEVICE_TYPE)
+				handle_type =
+				__cam_isp_resource_handle_id_to_type(
+				req_isp->fence_map_out[i].resource_handle);
+			else
+				handle_type =
+				__cam_isp_tfe_resource_handle_id_to_type(
+				req_isp->fence_map_out[i].resource_handle);
+
 			CAM_WARN(CAM_ISP,
 				"Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
-				req->request_id, i, j,
-				__cam_isp_resource_handle_id_to_type(
-				done->resource_handle[i]));
+				req->request_id, i, j, handle_type);
 
 			if (done_next_req) {
 				done_next_req->resource_handle
@@ -1967,7 +2017,8 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
 			active_req_isp =
 				(struct cam_isp_ctx_req *) active_req->req_priv;
 			__cam_isp_ctx_handle_buf_done_fail_log(
-				active_req->request_id, active_req_isp);
+				active_req->request_id, active_req_isp,
+				ctx_isp->isp_device_type);
 		}
 
 		rc = -EFAULT;
@@ -4354,7 +4405,8 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
 	struct cam_context *ctx_base,
 	struct cam_req_mgr_kmd_ops *crm_node_intf,
 	struct cam_hw_mgr_intf *hw_intf,
-	uint32_t ctx_id)
+	uint32_t ctx_id,
+	uint32_t isp_device_type)
 
 {
 	int rc = -1;
@@ -4378,6 +4430,7 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
 	ctx->substate_machine = cam_isp_ctx_activated_state_machine;
 	ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
 	ctx->init_timestamp = jiffies_to_msecs(jiffies);
+	ctx->isp_device_type = isp_device_type;
 
 	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
 		ctx->req_base[i].req_priv = &ctx->req_isp[i];

+ 7 - 2
drivers/cam_isp/cam_isp_context.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_ISP_CONTEXT_H_
@@ -10,6 +10,7 @@
 #include <linux/spinlock.h>
 #include <media/cam_isp.h>
 #include <media/cam_defs.h>
+#include <media/cam_tfe.h>
 
 #include "cam_context.h"
 #include "cam_isp_hw_mgr_intf.h"
@@ -177,6 +178,7 @@ struct cam_isp_context_state_monitor {
  * @init_received:             Indicate whether init config packet is received
  * @split_acquire:             Indicate whether a separate acquire is expected
  * @init_timestamp:            Timestamp at which this context is initialized
+ * @isp_device_type            ISP device type
  *
  */
 struct cam_isp_context {
@@ -207,6 +209,7 @@ struct cam_isp_context {
 	bool                                  init_received;
 	bool                                  split_acquire;
 	unsigned int                          init_timestamp;
+	uint32_t                              isp_device_type;
 };
 
 /**
@@ -218,13 +221,15 @@ struct cam_isp_context {
  * @bridge_ops:         Bridge call back funciton
  * @hw_intf:            ISP hw manager interface
  * @ctx_id:             ID for this context
+ * @isp_device_type     Isp device type
  *
  */
 int cam_isp_context_init(struct cam_isp_context *ctx,
 	struct cam_context *ctx_base,
 	struct cam_req_mgr_kmd_ops *bridge_ops,
 	struct cam_hw_mgr_intf *hw_intf,
-	uint32_t ctx_id);
+	uint32_t ctx_id,
+	uint32_t isp_device_type);
 
 /**
  * cam_isp_context_deinit()

+ 23 - 4
drivers/cam_isp/cam_isp_dev.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/delay.h>
@@ -117,12 +117,30 @@ static int cam_isp_dev_probe(struct platform_device *pdev)
 	int i;
 	struct cam_hw_mgr_intf         hw_mgr_intf;
 	struct cam_node               *node;
+	const char                    *compat_str = NULL;
+	uint32_t                       isp_device_type;
+
 	int iommu_hdl = -1;
 
+	rc = of_property_read_string_index(pdev->dev.of_node, "arch-compat", 0,
+		(const char **)&compat_str);
+
 	g_isp_dev.sd.internal_ops = &cam_isp_subdev_internal_ops;
 	/* Initialize the v4l2 subdevice first. (create cam_node) */
-	rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
+	if (strnstr(compat_str, "ife", strlen(compat_str))) {
+		rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
 		CAM_IFE_DEVICE_TYPE);
+		isp_device_type = CAM_IFE_DEVICE_TYPE;
+	} else if (strnstr(compat_str, "tfe", strlen(compat_str))) {
+		rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
+		CAM_TFE_DEVICE_TYPE);
+		isp_device_type = CAM_TFE_DEVICE_TYPE;
+	} else  {
+		CAM_ERR(CAM_ISP, "Invalid ISP hw type %s", compat_str);
+		rc = -EINVAL;
+		goto err;
+	}
+
 	if (rc) {
 		CAM_ERR(CAM_ISP, "ISP cam_subdev_probe failed!");
 		goto err;
@@ -130,7 +148,7 @@ static int cam_isp_dev_probe(struct platform_device *pdev)
 	node = (struct cam_node *) g_isp_dev.sd.token;
 
 	memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
-	rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf, &iommu_hdl);
+	rc = cam_isp_hw_mgr_init(compat_str, &hw_mgr_intf, &iommu_hdl);
 	if (rc != 0) {
 		CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!");
 		goto unregister;
@@ -141,7 +159,8 @@ static int cam_isp_dev_probe(struct platform_device *pdev)
 			&g_isp_dev.ctx[i],
 			&node->crm_node_intf,
 			&node->hw_mgr_intf,
-			i);
+			i,
+			isp_device_type);
 		if (rc) {
 			CAM_ERR(CAM_ISP, "ISP context init failed!");
 			goto unregister;

+ 7 - 8
drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c

@@ -1,26 +1,25 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_ife_hw_mgr.h"
 #include "cam_debug_util.h"
+#include "cam_tfe_hw_mgr.h"
 
 
-int cam_isp_hw_mgr_init(struct device_node *of_node,
+int cam_isp_hw_mgr_init(const char   *device_name_str,
 	struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl)
 {
 	int rc = 0;
-	const char *compat_str = NULL;
 
-	rc = of_property_read_string_index(of_node, "arch-compat", 0,
-		(const char **)&compat_str);
-
-	if (strnstr(compat_str, "ife", strlen(compat_str)))
+	if (strnstr(device_name_str, "ife", strlen(device_name_str)))
 		rc = cam_ife_hw_mgr_init(hw_mgr, iommu_hdl);
+	else if (strnstr(device_name_str, "tfe", strlen(device_name_str)))
+		rc = cam_tfe_hw_mgr_init(hw_mgr, iommu_hdl);
 	else {
-		CAM_ERR(CAM_ISP, "Invalid ISP hw type");
+		CAM_ERR(CAM_ISP, "Invalid ISP hw type :%s", device_name_str);
 		rc = -EINVAL;
 	}
 

+ 5282 - 0
drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c

@@ -0,0 +1,5282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <media/cam_tfe.h>
+#include "cam_smmu_api.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_tfe_csid_hw_intf.h"
+#include "cam_tfe_hw_intf.h"
+#include "cam_isp_packet_parser.h"
+#include "cam_tfe_hw_mgr.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_packet_util.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_common_util.h"
+#include "cam_compat.h"
+
+#define CAM_TFE_HW_ENTRIES_MAX  20
+#define CAM_TFE_HW_CONFIG_TIMEOUT 60
+
+#define TZ_SVC_SMMU_PROGRAM 0x15
+#define TZ_SAFE_SYSCALL_ID  0x3
+#define CAM_TFE_SAFE_DISABLE 0
+#define CAM_TFE_SAFE_ENABLE 1
+#define SMMU_SE_TFE 0
+
+
+static struct cam_tfe_hw_mgr g_tfe_hw_mgr;
+
+static int cam_tfe_hw_mgr_event_handler(
+	void                                *priv,
+	uint32_t                             evt_id,
+	void                                *evt_info);
+
+static int cam_tfe_mgr_regspace_data_cb(uint32_t reg_base_type,
+	void *hw_mgr_ctx, struct cam_hw_soc_info **soc_info_ptr,
+	uint32_t *reg_base_idx)
+{
+	int rc = 0;
+	struct cam_isp_hw_mgr_res *hw_mgr_res;
+	struct cam_hw_soc_info    *soc_info = NULL;
+	struct cam_isp_resource_node       *res;
+	struct cam_tfe_hw_mgr_ctx *ctx =
+		(struct cam_tfe_hw_mgr_ctx *) hw_mgr_ctx;
+
+	*soc_info_ptr = NULL;
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		if (hw_mgr_res->res_id != CAM_ISP_HW_TFE_IN_CAMIF)
+			continue;
+
+		switch (reg_base_type) {
+		case CAM_REG_DUMP_BASE_TYPE_CAMNOC:
+		case CAM_REG_DUMP_BASE_TYPE_ISP_LEFT:
+			if (!hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_LEFT])
+				continue;
+
+			res = hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA,
+				&soc_info, sizeof(void *));
+
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Failed in regspace data query split idx: %d rc : %d",
+					CAM_ISP_HW_SPLIT_LEFT, rc);
+				return rc;
+			}
+
+			if (reg_base_type == CAM_REG_DUMP_BASE_TYPE_ISP_LEFT)
+				*reg_base_idx = 0;
+			else
+				*reg_base_idx = 1;
+
+			*soc_info_ptr = soc_info;
+			break;
+		case CAM_REG_DUMP_BASE_TYPE_ISP_RIGHT:
+			if (!hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT])
+				continue;
+
+
+			res = hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA,
+				&soc_info, sizeof(void *));
+
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Failed in regspace data query split idx: %d rc : %d",
+					CAM_ISP_HW_SPLIT_RIGHT, rc);
+				return rc;
+			}
+
+			*reg_base_idx = 0;
+			*soc_info_ptr = soc_info;
+			break;
+		default:
+			CAM_ERR(CAM_ISP,
+				"Unrecognized reg base type: %d",
+				reg_base_type);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_tfe_mgr_handle_reg_dump(struct cam_tfe_hw_mgr_ctx *ctx,
+	struct cam_cmd_buf_desc *reg_dump_buf_desc, uint32_t num_reg_dump_buf,
+	uint32_t meta_type)
+{
+	int rc = 0, i;
+
+	if (!num_reg_dump_buf || !reg_dump_buf_desc) {
+		CAM_DBG(CAM_ISP,
+			"Invalid args for reg dump req_id: [%llu] ctx idx: [%u] meta_type: [%u] num_reg_dump_buf: [%u] reg_dump_buf_desc: [%pK]",
+			ctx->applied_req_id, ctx->ctx_index, meta_type,
+			num_reg_dump_buf, reg_dump_buf_desc);
+		return rc;
+	}
+
+	if (!atomic_read(&ctx->cdm_done))
+		CAM_WARN_RATE_LIMIT(CAM_ISP,
+			"Reg dump values might be from more than one request");
+
+	for (i = 0; i < num_reg_dump_buf; i++) {
+		CAM_DBG(CAM_ISP, "Reg dump cmd meta data: %u req_type: %u",
+			reg_dump_buf_desc[i].meta_data, meta_type);
+		if (reg_dump_buf_desc[i].meta_data == meta_type) {
+			rc = cam_soc_util_reg_dump_to_cmd_buf(ctx,
+				&reg_dump_buf_desc[i],
+				ctx->applied_req_id,
+				cam_tfe_mgr_regspace_data_cb);
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Reg dump failed at idx: %d, rc: %d req_id: %llu meta type: %u",
+					i, rc, ctx->applied_req_id, meta_type);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int cam_tfe_mgr_get_hw_caps(void *hw_mgr_priv,
+	void *hw_caps_args)
+{
+	int rc = 0;
+	int i;
+	uint32_t num_dev = 0;
+	struct cam_tfe_hw_mgr                  *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd               *query = hw_caps_args;
+	struct cam_isp_tfe_query_cap_cmd        query_isp;
+
+	CAM_DBG(CAM_ISP, "enter");
+
+	if (copy_from_user(&query_isp,
+		u64_to_user_ptr(query->caps_handle),
+		sizeof(struct cam_isp_tfe_query_cap_cmd))) {
+		rc = -EFAULT;
+		return rc;
+	}
+
+	query_isp.device_iommu.non_secure = hw_mgr->mgr_common.img_iommu_hdl;
+	query_isp.device_iommu.secure = hw_mgr->mgr_common.img_iommu_hdl_secure;
+	query_isp.cdm_iommu.non_secure = hw_mgr->mgr_common.cmd_iommu_hdl;
+	query_isp.cdm_iommu.secure = hw_mgr->mgr_common.cmd_iommu_hdl_secure;
+
+	for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
+		if (!hw_mgr->csid_devices[i])
+			continue;
+
+		query_isp.dev_caps[i].hw_type = CAM_ISP_TFE_HW_TFE;
+		query_isp.dev_caps[i].hw_version.major = 5;
+		query_isp.dev_caps[i].hw_version.minor = 3;
+		query_isp.dev_caps[i].hw_version.incr = 0;
+
+		/*
+		 * device number is based on number of full tfe
+		 * if pix is not supported, set reserve to 1
+		 */
+		if (hw_mgr->tfe_csid_dev_caps[i].num_pix) {
+			query_isp.dev_caps[i].hw_version.reserved = 0;
+			num_dev++;
+		} else
+			query_isp.dev_caps[i].hw_version.reserved = 1;
+	}
+
+	query_isp.num_dev = num_dev;
+
+	if (copy_to_user(u64_to_user_ptr(query->caps_handle),
+		&query_isp, sizeof(struct cam_isp_tfe_query_cap_cmd)))
+		rc = -EFAULT;
+
+	CAM_DBG(CAM_ISP, "exit rc :%d", rc);
+
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_is_rdi_res(uint32_t res_id)
+{
+	int rc = 0;
+
+	switch (res_id) {
+	case CAM_ISP_TFE_OUT_RES_RDI_0:
+	case CAM_ISP_TFE_OUT_RES_RDI_1:
+	case CAM_ISP_TFE_OUT_RES_RDI_2:
+		rc = 1;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_convert_rdi_out_res_id_to_in_res(int res_id)
+{
+	if (res_id == CAM_ISP_TFE_OUT_RES_RDI_0)
+		return CAM_ISP_HW_TFE_IN_RDI0;
+	else if (res_id == CAM_ISP_TFE_OUT_RES_RDI_1)
+		return CAM_ISP_HW_TFE_IN_RDI1;
+	else if (res_id == CAM_ISP_TFE_OUT_RES_RDI_2)
+		return CAM_ISP_HW_TFE_IN_RDI1;
+
+	return CAM_ISP_HW_TFE_IN_MAX;
+}
+
+static int cam_tfe_hw_mgr_reset_csid_res(
+	struct cam_isp_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	int rc = 0;
+	struct cam_hw_intf      *hw_intf;
+	struct cam_tfe_csid_reset_cfg_args  csid_reset_args;
+
+	csid_reset_args.reset_type = CAM_TFE_CSID_RESET_PATH;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		csid_reset_args.node_res = isp_hw_res->hw_res[i];
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		CAM_DBG(CAM_ISP, "Resetting csid hardware %d",
+			hw_intf->hw_idx);
+		if (hw_intf->hw_ops.reset) {
+			rc = hw_intf->hw_ops.reset(hw_intf->hw_priv,
+				&csid_reset_args,
+				sizeof(struct cam_tfe_csid_reset_cfg_args));
+			if (rc <= 0)
+				goto err;
+		}
+	}
+
+	return 0;
+err:
+	CAM_ERR(CAM_ISP, "RESET HW res failed: (type:%d, id:%d)",
+		isp_hw_res->res_type, isp_hw_res->res_id);
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_init_hw_res(
+	struct cam_isp_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	int rc = -EINVAL;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		CAM_DBG(CAM_ISP, "hw type %d hw index:%d",
+			hw_intf->hw_type, hw_intf->hw_idx);
+		if (hw_intf->hw_ops.init) {
+			rc = hw_intf->hw_ops.init(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+			if (rc)
+				goto err;
+		}
+	}
+
+	return 0;
+err:
+	CAM_ERR(CAM_ISP, "INIT HW res failed: (type:%d, id:%d)",
+		isp_hw_res->res_type, isp_hw_res->res_id);
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_start_hw_res(
+	struct cam_isp_hw_mgr_res   *isp_hw_res,
+	struct cam_tfe_hw_mgr_ctx   *ctx)
+{
+	int i;
+	int rc = -EINVAL;
+	struct cam_hw_intf      *hw_intf;
+
+	/* Start slave (which is right split) first */
+	for (i = CAM_ISP_HW_SPLIT_MAX - 1; i >= 0; i--) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.start) {
+			rc = hw_intf->hw_ops.start(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+			if (rc) {
+				CAM_ERR(CAM_ISP, "Can not start HW resources");
+				goto err;
+			}
+			CAM_DBG(CAM_ISP, "Start hw type:%d HW idx %d Res %d",
+				hw_intf->hw_type,
+				hw_intf->hw_idx,
+				isp_hw_res->hw_res[i]->res_id);
+		} else {
+			CAM_ERR(CAM_ISP, "function null");
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	CAM_ERR(CAM_ISP, "Start hw res failed (type:%d, id:%d)",
+		isp_hw_res->res_type, isp_hw_res->res_id);
+	return rc;
+}
+
+static void cam_tfe_hw_mgr_stop_hw_res(
+	struct cam_isp_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	struct cam_hw_intf      *hw_intf;
+	uint32_t dummy_args;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+
+		if (isp_hw_res->hw_res[i]->res_state !=
+			CAM_ISP_RESOURCE_STATE_STREAMING)
+			continue;
+
+		if (hw_intf->hw_ops.stop)
+			hw_intf->hw_ops.stop(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+		else
+			CAM_ERR(CAM_ISP, "stop null");
+		if (hw_intf->hw_ops.process_cmd &&
+			isp_hw_res->res_type == CAM_ISP_RESOURCE_TFE_OUT) {
+			hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
+				&dummy_args, sizeof(dummy_args));
+		}
+	}
+}
+
+static void cam_tfe_hw_mgr_deinit_hw_res(
+	struct cam_isp_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.deinit)
+			hw_intf->hw_ops.deinit(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+	}
+}
+
+static void cam_tfe_hw_mgr_deinit_hw(
+	struct cam_tfe_hw_mgr_ctx *ctx)
+{
+	struct cam_isp_hw_mgr_res *hw_mgr_res;
+
+	if (!ctx->init_done) {
+		CAM_WARN(CAM_ISP, "ctx is not in init state");
+		return;
+	}
+
+	/* Deinit TFE CSID hw */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+		CAM_DBG(CAM_ISP, "Going to DeInit TFE CSID");
+		cam_tfe_hw_mgr_deinit_hw_res(hw_mgr_res);
+	}
+
+	/* Deint TFE HW */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		cam_tfe_hw_mgr_deinit_hw_res(hw_mgr_res);
+	}
+
+	if (ctx->is_tpg)
+		cam_tfe_hw_mgr_deinit_hw_res(&ctx->res_list_tpg);
+
+	ctx->init_done = false;
+}
+
+static int cam_tfe_hw_mgr_init_hw(
+	struct cam_tfe_hw_mgr_ctx *ctx)
+{
+	struct cam_isp_hw_mgr_res *hw_mgr_res;
+	int rc = 0;
+
+	if (ctx->is_tpg) {
+		CAM_DBG(CAM_ISP, "INIT TPG ... in ctx id:%d",
+			ctx->ctx_index);
+		rc = cam_tfe_hw_mgr_init_hw_res(&ctx->res_list_tpg);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not INIT TFE TPG(id :%d)",
+				ctx->res_list_tpg.hw_res[0]->hw_intf->hw_idx);
+			goto deinit;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "INIT TFE csid ... in ctx id:%d",
+		ctx->ctx_index);
+	/* INIT TFE csid */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+		rc = cam_tfe_hw_mgr_init_hw_res(hw_mgr_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not INIT TFE CSID(id :%d)",
+				 hw_mgr_res->res_id);
+			goto deinit;
+		}
+	}
+
+	/* INIT TFE IN */
+	CAM_DBG(CAM_ISP, "INIT TFE in resource ctx id:%d",
+		ctx->ctx_index);
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		rc = cam_tfe_hw_mgr_init_hw_res(hw_mgr_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not INIT TFE SRC (%d)",
+				 hw_mgr_res->res_id);
+			goto deinit;
+		}
+	}
+
+	return rc;
+deinit:
+	ctx->init_done = true;
+	cam_tfe_hw_mgr_deinit_hw(ctx);
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_put_res(
+	struct list_head                *src_list,
+	struct cam_isp_hw_mgr_res      **res)
+{
+	struct cam_isp_hw_mgr_res *res_ptr  = NULL;
+
+	res_ptr = *res;
+	if (res_ptr)
+		list_add_tail(&res_ptr->list, src_list);
+
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_get_res(
+	struct list_head                *src_list,
+	struct cam_isp_hw_mgr_res      **res)
+{
+	int rc = 0;
+	struct cam_isp_hw_mgr_res *res_ptr  = NULL;
+
+	if (!list_empty(src_list)) {
+		res_ptr = list_first_entry(src_list,
+			struct cam_isp_hw_mgr_res, list);
+		list_del_init(&res_ptr->list);
+	} else {
+		CAM_ERR(CAM_ISP, "No more free tfe hw mgr ctx");
+		rc = -EINVAL;
+	}
+	*res = res_ptr;
+
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_free_hw_res(
+	struct cam_isp_hw_mgr_res   *isp_hw_res)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.release) {
+			rc = hw_intf->hw_ops.release(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+			if (rc)
+				CAM_ERR(CAM_ISP,
+					"Release hw resource id %d failed",
+					isp_hw_res->res_id);
+			isp_hw_res->hw_res[i] = NULL;
+		} else
+			CAM_ERR(CAM_ISP, "Release null");
+	}
+	/* caller should make sure the resource is in a list */
+	list_del_init(&isp_hw_res->list);
+	memset(isp_hw_res, 0, sizeof(*isp_hw_res));
+	INIT_LIST_HEAD(&isp_hw_res->list);
+
+	return 0;
+}
+
+static int cam_tfe_mgr_csid_stop_hw(
+	struct cam_tfe_hw_mgr_ctx *ctx, struct list_head  *stop_list,
+		uint32_t  base_idx, uint32_t stop_cmd)
+{
+	struct cam_isp_hw_mgr_res      *hw_mgr_res;
+	struct cam_isp_resource_node   *isp_res;
+	struct cam_isp_resource_node   *stop_res[CAM_TFE_CSID_PATH_RES_MAX];
+	struct cam_tfe_csid_hw_stop_args    stop;
+	struct cam_hw_intf             *hw_intf;
+	uint32_t i, cnt;
+
+	cnt = 0;
+	list_for_each_entry(hw_mgr_res, stop_list, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i] ||
+				(hw_mgr_res->hw_res[i]->res_state !=
+				CAM_ISP_RESOURCE_STATE_STREAMING))
+				continue;
+
+			isp_res = hw_mgr_res->hw_res[i];
+			if (isp_res->hw_intf->hw_idx != base_idx)
+				continue;
+			CAM_DBG(CAM_ISP, "base_idx %d res_id %d cnt %u",
+				base_idx, isp_res->res_id, cnt);
+			stop_res[cnt] = isp_res;
+			cnt++;
+		}
+	}
+
+	if (cnt) {
+		hw_intf =  stop_res[0]->hw_intf;
+		stop.num_res = cnt;
+		stop.node_res = stop_res;
+		stop.stop_cmd = stop_cmd;
+		hw_intf->hw_ops.stop(hw_intf->hw_priv, &stop, sizeof(stop));
+	}
+
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_release_hw_for_ctx(
+	struct cam_tfe_hw_mgr_ctx  *tfe_ctx)
+{
+	uint32_t                          i;
+	int rc = 0;
+	struct cam_isp_hw_mgr_res        *hw_mgr_res;
+	struct cam_isp_hw_mgr_res        *hw_mgr_res_temp;
+	struct cam_hw_intf               *hw_intf;
+
+	/* tfe out resource */
+	for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++)
+		cam_tfe_hw_mgr_free_hw_res(&tfe_ctx->res_list_tfe_out[i]);
+
+	/* tfe in resource */
+	list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+		&tfe_ctx->res_list_tfe_in, list) {
+		cam_tfe_hw_mgr_free_hw_res(hw_mgr_res);
+		cam_tfe_hw_mgr_put_res(&tfe_ctx->free_res_list, &hw_mgr_res);
+	}
+
+	/* tfe csid resource */
+	list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+		&tfe_ctx->res_list_tfe_csid, list) {
+		cam_tfe_hw_mgr_free_hw_res(hw_mgr_res);
+		cam_tfe_hw_mgr_put_res(&tfe_ctx->free_res_list, &hw_mgr_res);
+	}
+
+	/* release tpg resource */
+	if (tfe_ctx->is_tpg) {
+		hw_intf = tfe_ctx->res_list_tpg.hw_res[0]->hw_intf;
+		if (hw_intf->hw_ops.release) {
+			rc = hw_intf->hw_ops.release(hw_intf->hw_priv,
+				tfe_ctx->res_list_tpg.hw_res[0],
+				sizeof(struct cam_isp_resource_node));
+			if (rc)
+				CAM_ERR(CAM_ISP,
+					"TPG Release hw failed");
+			tfe_ctx->res_list_tpg.hw_res[0] = NULL;
+		} else
+			CAM_ERR(CAM_ISP, "TPG resource Release null");
+	}
+
+	/* clean up the callback function */
+	tfe_ctx->common.cb_priv = NULL;
+	memset(tfe_ctx->common.event_cb, 0, sizeof(tfe_ctx->common.event_cb));
+
+	CAM_DBG(CAM_ISP, "release context completed ctx id:%d",
+		tfe_ctx->ctx_index);
+
+	return 0;
+}
+
+
+static int cam_tfe_hw_mgr_put_ctx(
+	struct list_head                 *src_list,
+	struct cam_tfe_hw_mgr_ctx       **tfe_ctx)
+{
+	struct cam_tfe_hw_mgr_ctx *ctx_ptr  = NULL;
+
+	mutex_lock(&g_tfe_hw_mgr.ctx_mutex);
+	ctx_ptr = *tfe_ctx;
+	if (ctx_ptr)
+		list_add_tail(&ctx_ptr->list, src_list);
+	*tfe_ctx = NULL;
+	mutex_unlock(&g_tfe_hw_mgr.ctx_mutex);
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_get_ctx(
+	struct list_head                *src_list,
+	struct cam_tfe_hw_mgr_ctx       **tfe_ctx)
+{
+	int rc                              = 0;
+	struct cam_tfe_hw_mgr_ctx *ctx_ptr  = NULL;
+
+	mutex_lock(&g_tfe_hw_mgr.ctx_mutex);
+	if (!list_empty(src_list)) {
+		ctx_ptr = list_first_entry(src_list,
+			struct cam_tfe_hw_mgr_ctx, list);
+		list_del_init(&ctx_ptr->list);
+	} else {
+		CAM_ERR(CAM_ISP, "No more free tfe hw mgr ctx");
+		rc = -EINVAL;
+	}
+	*tfe_ctx = ctx_ptr;
+	mutex_unlock(&g_tfe_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static void cam_tfe_hw_mgr_dump_all_ctx(void)
+{
+	uint32_t i;
+	struct cam_tfe_hw_mgr_ctx       *ctx;
+	struct cam_isp_hw_mgr_res       *hw_mgr_res;
+
+	mutex_lock(&g_tfe_hw_mgr.ctx_mutex);
+	list_for_each_entry(ctx, &g_tfe_hw_mgr.used_ctx_list, list) {
+		CAM_INFO_RATE_LIMIT(CAM_ISP,
+			"ctx id:%d is_dual:%d is_tpg:%d num_base:%d rdi only:%d",
+			ctx->ctx_index, ctx->is_dual, ctx->is_tpg,
+			ctx->num_base, ctx->is_rdi_only_context);
+
+		if (ctx->res_list_tpg.res_type == CAM_ISP_RESOURCE_TPG) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"Acquired TPG HW:%d",
+				ctx->res_list_tpg.hw_res[0]->hw_intf->hw_idx);
+		}
+
+		list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid,
+			list) {
+			for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+				if (hw_mgr_res->hw_res[i])
+					continue;
+
+				CAM_INFO_RATE_LIMIT(CAM_ISP,
+					"csid:%d res_type:%d res_id:%d res_state:%d",
+					hw_mgr_res->hw_res[i]->hw_intf->hw_idx,
+					hw_mgr_res->hw_res[i]->res_type,
+					hw_mgr_res->hw_res[i]->res_id,
+					hw_mgr_res->hw_res[i]->res_state);
+			}
+		}
+
+		list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in,
+			list) {
+			for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+				if (hw_mgr_res->hw_res[i])
+					continue;
+
+				CAM_INFO_RATE_LIMIT(CAM_ISP,
+					"TFE IN:%d res_type:%d res_id:%d res_state:%d",
+					hw_mgr_res->hw_res[i]->hw_intf->hw_idx,
+					hw_mgr_res->hw_res[i]->res_type,
+					hw_mgr_res->hw_res[i]->res_id,
+					hw_mgr_res->hw_res[i]->res_state);
+			}
+		}
+	}
+	mutex_unlock(&g_tfe_hw_mgr.ctx_mutex);
+
+}
+
+static void cam_tfe_mgr_add_base_info(
+	struct cam_tfe_hw_mgr_ctx       *ctx,
+	enum cam_isp_hw_split_id         split_id,
+	uint32_t                         base_idx)
+{
+	uint32_t    i;
+
+	if (!ctx->num_base) {
+		ctx->base[0].split_id = split_id;
+		ctx->base[0].idx      = base_idx;
+		ctx->num_base++;
+		CAM_DBG(CAM_ISP,
+			"Add split id = %d for base idx = %d num_base=%d",
+			split_id, base_idx, ctx->num_base);
+	} else {
+		/*Check if base index already exists in the list */
+		for (i = 0; i < ctx->num_base; i++) {
+			if (ctx->base[i].idx == base_idx) {
+				if (split_id != CAM_ISP_HW_SPLIT_MAX &&
+					ctx->base[i].split_id ==
+						CAM_ISP_HW_SPLIT_MAX)
+					ctx->base[i].split_id = split_id;
+
+				break;
+			}
+		}
+
+		if (i == ctx->num_base) {
+			ctx->base[ctx->num_base].split_id = split_id;
+			ctx->base[ctx->num_base].idx      = base_idx;
+			ctx->num_base++;
+			CAM_DBG(CAM_ISP,
+				"Add split_id=%d for base idx=%d num_base=%d",
+				 split_id, base_idx, ctx->num_base);
+		}
+	}
+}
+
+static int cam_tfe_mgr_process_base_info(
+	struct cam_tfe_hw_mgr_ctx        *ctx)
+{
+	struct cam_isp_hw_mgr_res        *hw_mgr_res;
+	struct cam_isp_resource_node     *res = NULL;
+	uint32_t i;
+
+	if (list_empty(&ctx->res_list_tfe_in)) {
+		CAM_ERR(CAM_ISP, "tfe in list empty");
+		return -ENODEV;
+	}
+
+	/* TFE in resources */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT)
+			continue;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			res = hw_mgr_res->hw_res[i];
+			cam_tfe_mgr_add_base_info(ctx, i,
+					res->hw_intf->hw_idx);
+			CAM_DBG(CAM_ISP, "add base info for hw %d",
+				res->hw_intf->hw_idx);
+		}
+	}
+	CAM_DBG(CAM_ISP, "ctx base num = %d", ctx->num_base);
+
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_acquire_res_tfe_out_rdi(
+	struct cam_tfe_hw_mgr_ctx         *tfe_ctx,
+	struct cam_isp_hw_mgr_res         *tfe_in_res,
+	struct cam_isp_tfe_in_port_info   *in_port)
+{
+	int rc = -EINVAL;
+	struct cam_tfe_acquire_args              tfe_acquire;
+	struct cam_isp_tfe_out_port_info         *out_port = NULL;
+	struct cam_isp_hw_mgr_res                *tfe_out_res;
+	struct cam_hw_intf                       *hw_intf;
+	uint32_t  i, tfe_out_res_id, tfe_in_res_id;
+
+	/* take left resource */
+	tfe_in_res_id = tfe_in_res->hw_res[0]->res_id;
+
+	switch (tfe_in_res_id) {
+	case CAM_ISP_HW_TFE_IN_RDI0:
+		tfe_out_res_id = CAM_ISP_TFE_OUT_RES_RDI_0;
+		break;
+	case CAM_ISP_HW_TFE_IN_RDI1:
+		tfe_out_res_id = CAM_ISP_TFE_OUT_RES_RDI_1;
+		break;
+	case CAM_ISP_HW_TFE_IN_RDI2:
+		tfe_out_res_id = CAM_ISP_TFE_OUT_RES_RDI_2;
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "invalid resource type");
+		goto err;
+	}
+	CAM_DBG(CAM_ISP, "tfe_in_res_id = %d, tfe_out_red_id = %d",
+		tfe_in_res_id, tfe_out_res_id);
+
+	tfe_acquire.rsrc_type = CAM_ISP_RESOURCE_TFE_OUT;
+	tfe_acquire.tasklet = tfe_ctx->common.tasklet_info;
+
+	tfe_out_res = &tfe_ctx->res_list_tfe_out[tfe_out_res_id & 0xFF];
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+
+		CAM_DBG(CAM_ISP, "i = %d, tfe_out_res_id = %d, out_port: %d",
+			i, tfe_out_res_id, out_port->res_id);
+
+		if (tfe_out_res_id != out_port->res_id)
+			continue;
+
+		tfe_acquire.tfe_out.cdm_ops = tfe_ctx->cdm_ops;
+		tfe_acquire.priv = tfe_ctx;
+		tfe_acquire.tfe_out.out_port_info = out_port;
+		tfe_acquire.tfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
+		tfe_acquire.tfe_out.unique_id = tfe_ctx->ctx_index;
+		tfe_acquire.tfe_out.is_dual = 0;
+		tfe_acquire.event_cb = cam_tfe_hw_mgr_event_handler;
+		hw_intf = tfe_in_res->hw_res[0]->hw_intf;
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+			&tfe_acquire,
+			sizeof(struct cam_tfe_acquire_args));
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not acquire out resource 0x%x",
+				 out_port->res_id);
+			goto err;
+		}
+		break;
+	}
+
+	if (i == in_port->num_out_res) {
+		CAM_ERR(CAM_ISP,
+			"Cannot acquire out resource, i=%d, num_out_res=%d",
+			i, in_port->num_out_res);
+		goto err;
+	}
+
+	tfe_out_res->hw_res[0] = tfe_acquire.tfe_out.rsrc_node;
+	tfe_out_res->is_dual_isp = 0;
+	tfe_out_res->res_id = tfe_out_res_id;
+	tfe_out_res->res_type = CAM_ISP_RESOURCE_TFE_OUT;
+	tfe_in_res->num_children++;
+
+	return 0;
+err:
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_acquire_res_tfe_out_pixel(
+	struct cam_tfe_hw_mgr_ctx           *tfe_ctx,
+	struct cam_isp_hw_mgr_res           *tfe_in_res,
+	struct cam_isp_tfe_in_port_info     *in_port)
+{
+	int rc = -EINVAL;
+	uint32_t  i, j, k;
+	struct cam_tfe_acquire_args               tfe_acquire;
+	struct cam_isp_tfe_out_port_info          *out_port;
+	struct cam_isp_hw_mgr_res                *tfe_out_res;
+	struct cam_hw_intf                       *hw_intf;
+
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+		k = out_port->res_id & 0xFF;
+		if (k >= CAM_TFE_HW_OUT_RES_MAX) {
+			CAM_ERR(CAM_ISP, "invalid output resource type 0x%x",
+				 out_port->res_id);
+			continue;
+		}
+
+		if (cam_tfe_hw_mgr_is_rdi_res(out_port->res_id))
+			continue;
+
+		CAM_DBG(CAM_ISP, "res_type 0x%x", out_port->res_id);
+
+		tfe_out_res = &tfe_ctx->res_list_tfe_out[k];
+		tfe_out_res->is_dual_isp = in_port->usage_type;
+
+		tfe_acquire.rsrc_type = CAM_ISP_RESOURCE_TFE_OUT;
+		tfe_acquire.tasklet = tfe_ctx->common.tasklet_info;
+		tfe_acquire.tfe_out.cdm_ops = tfe_ctx->cdm_ops;
+		tfe_acquire.priv = tfe_ctx;
+		tfe_acquire.tfe_out.out_port_info =  out_port;
+		tfe_acquire.tfe_out.is_dual       = tfe_in_res->is_dual_isp;
+		tfe_acquire.tfe_out.unique_id     = tfe_ctx->ctx_index;
+		tfe_acquire.event_cb = cam_tfe_hw_mgr_event_handler;
+
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			if (!tfe_in_res->hw_res[j])
+				continue;
+
+			hw_intf = tfe_in_res->hw_res[j]->hw_intf;
+
+			if (j == CAM_ISP_HW_SPLIT_LEFT) {
+				tfe_acquire.tfe_out.split_id  =
+					CAM_ISP_HW_SPLIT_LEFT;
+				if (tfe_in_res->is_dual_isp)
+					tfe_acquire.tfe_out.is_master   = 1;
+				else
+					tfe_acquire.tfe_out.is_master   = 0;
+			} else {
+				tfe_acquire.tfe_out.split_id  =
+					CAM_ISP_HW_SPLIT_RIGHT;
+				tfe_acquire.tfe_out.is_master       = 0;
+			}
+
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&tfe_acquire,
+				sizeof(struct cam_tfe_acquire_args));
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Can not acquire out resource 0x%x",
+					out_port->res_id);
+				goto err;
+			}
+
+			tfe_out_res->hw_res[j] =
+				tfe_acquire.tfe_out.rsrc_node;
+			CAM_DBG(CAM_ISP, "resource type :0x%x res id:0x%x",
+				tfe_out_res->hw_res[j]->res_type,
+				tfe_out_res->hw_res[j]->res_id);
+
+		}
+		tfe_out_res->res_type = CAM_ISP_RESOURCE_TFE_OUT;
+		tfe_out_res->res_id = out_port->res_id;
+		tfe_in_res->num_children++;
+	}
+
+	return 0;
+err:
+	/* release resource at the entry function */
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_acquire_res_tfe_out(
+	struct cam_tfe_hw_mgr_ctx         *tfe_ctx,
+	struct cam_isp_tfe_in_port_info   *in_port)
+{
+	int rc = -EINVAL;
+	struct cam_isp_hw_mgr_res       *tfe_in_res;
+
+	list_for_each_entry(tfe_in_res, &tfe_ctx->res_list_tfe_in, list) {
+		if (tfe_in_res->num_children)
+			continue;
+
+		switch (tfe_in_res->res_id) {
+		case CAM_ISP_HW_TFE_IN_CAMIF:
+			rc = cam_tfe_hw_mgr_acquire_res_tfe_out_pixel(tfe_ctx,
+				tfe_in_res, in_port);
+			break;
+		case CAM_ISP_HW_TFE_IN_RDI0:
+		case CAM_ISP_HW_TFE_IN_RDI1:
+		case CAM_ISP_HW_TFE_IN_RDI2:
+			rc = cam_tfe_hw_mgr_acquire_res_tfe_out_rdi(tfe_ctx,
+				tfe_in_res, in_port);
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Unknown TFE SRC resource: %d",
+				tfe_in_res->res_id);
+			break;
+		}
+		if (rc)
+			goto err;
+	}
+
+	return 0;
+err:
+	/* release resource on entry function */
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_acquire_res_tfe_in(
+	struct cam_tfe_hw_mgr_ctx         *tfe_ctx,
+	struct cam_isp_tfe_in_port_info   *in_port,
+	uint32_t                          *pdaf_enable)
+{
+	int rc                = -EINVAL;
+	int i;
+	struct cam_isp_hw_mgr_res                  *csid_res;
+	struct cam_isp_hw_mgr_res                  *tfe_src_res;
+	struct cam_tfe_acquire_args                 tfe_acquire;
+	struct cam_hw_intf                         *hw_intf;
+	struct cam_tfe_hw_mgr                      *tfe_hw_mgr;
+
+	tfe_hw_mgr = tfe_ctx->hw_mgr;
+
+	list_for_each_entry(csid_res, &tfe_ctx->res_list_tfe_csid, list) {
+		if (csid_res->num_children)
+			continue;
+
+		rc = cam_tfe_hw_mgr_get_res(&tfe_ctx->free_res_list,
+			&tfe_src_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+			goto err;
+		}
+		cam_tfe_hw_mgr_put_res(&tfe_ctx->res_list_tfe_in,
+			&tfe_src_res);
+		tfe_src_res->hw_res[0] = NULL;
+		tfe_src_res->hw_res[1] = NULL;
+
+		tfe_acquire.rsrc_type = CAM_ISP_RESOURCE_TFE_IN;
+		tfe_acquire.tasklet = tfe_ctx->common.tasklet_info;
+		tfe_acquire.tfe_in.cdm_ops = tfe_ctx->cdm_ops;
+		tfe_acquire.tfe_in.in_port = in_port;
+		tfe_acquire.tfe_in.camif_pd_enable = *pdaf_enable;
+		tfe_acquire.priv = tfe_ctx;
+		tfe_acquire.event_cb = cam_tfe_hw_mgr_event_handler;
+
+		switch (csid_res->res_id) {
+		case CAM_TFE_CSID_PATH_RES_IPP:
+			tfe_acquire.tfe_in.res_id =
+				CAM_ISP_HW_TFE_IN_CAMIF;
+
+			if (csid_res->is_dual_isp)
+				tfe_acquire.tfe_in.sync_mode =
+				CAM_ISP_HW_SYNC_MASTER;
+			else
+				tfe_acquire.tfe_in.sync_mode =
+				CAM_ISP_HW_SYNC_NONE;
+
+			break;
+		case CAM_TFE_CSID_PATH_RES_RDI_0:
+			tfe_acquire.tfe_in.res_id = CAM_ISP_HW_TFE_IN_RDI0;
+			tfe_acquire.tfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		case CAM_TFE_CSID_PATH_RES_RDI_1:
+			tfe_acquire.tfe_in.res_id = CAM_ISP_HW_TFE_IN_RDI1;
+			tfe_acquire.tfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		case CAM_TFE_CSID_PATH_RES_RDI_2:
+			tfe_acquire.tfe_in.res_id = CAM_ISP_HW_TFE_IN_RDI2;
+			tfe_acquire.tfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Wrong TFE CSID Resource Node");
+			goto err;
+		}
+		tfe_src_res->res_type = tfe_acquire.rsrc_type;
+		tfe_src_res->res_id = tfe_acquire.tfe_in.res_id;
+		tfe_src_res->is_dual_isp = csid_res->is_dual_isp;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!csid_res->hw_res[i])
+				continue;
+
+			hw_intf = tfe_hw_mgr->tfe_devices[
+				csid_res->hw_res[i]->hw_intf->hw_idx];
+
+			/* fill in more acquire information as needed */
+			/* slave Camif resource, */
+			if (i == CAM_ISP_HW_SPLIT_RIGHT &&
+				tfe_src_res->is_dual_isp) {
+				tfe_acquire.tfe_in.sync_mode =
+				CAM_ISP_HW_SYNC_SLAVE;
+				tfe_acquire.tfe_in.dual_tfe_sync_sel_idx =
+					csid_res->hw_res[0]->hw_intf->hw_idx;
+			} else if (i == CAM_ISP_HW_SPLIT_LEFT &&
+				tfe_src_res->is_dual_isp)
+				tfe_acquire.tfe_in.dual_tfe_sync_sel_idx =
+					csid_res->hw_res[1]->hw_intf->hw_idx;
+
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+					&tfe_acquire,
+					sizeof(struct cam_tfe_acquire_args));
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Can not acquire TFE HW res %d",
+					csid_res->res_id);
+				goto err;
+			}
+			tfe_src_res->hw_res[i] = tfe_acquire.tfe_in.rsrc_node;
+			CAM_DBG(CAM_ISP,
+				"acquire success TFE:%d  res type :0x%x res id:0x%x",
+				hw_intf->hw_idx,
+				tfe_src_res->hw_res[i]->res_type,
+				tfe_src_res->hw_res[i]->res_id);
+
+		}
+		csid_res->num_children++;
+	}
+
+	return 0;
+err:
+	/* release resource at the entry function */
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_acquire_res_tfe_csid_pxl(
+	struct cam_tfe_hw_mgr_ctx              *tfe_ctx,
+	struct cam_isp_tfe_in_port_info        *in_port)
+{
+	int rc = -EINVAL;
+	int i, j;
+	uint32_t acquired_cnt = 0;
+	struct cam_tfe_hw_mgr                        *tfe_hw_mgr;
+	struct cam_isp_hw_mgr_res                    *csid_res;
+	struct cam_hw_intf                           *hw_intf;
+	struct cam_tfe_csid_hw_reserve_resource_args  csid_acquire;
+	enum cam_tfe_csid_path_res_id                 path_res_id;
+	struct cam_isp_hw_mgr_res        *csid_res_temp, *csid_res_iterator;
+	struct cam_isp_tfe_out_port_info        *out_port = NULL;
+
+	tfe_hw_mgr = tfe_ctx->hw_mgr;
+	/* get csid resource */
+	path_res_id = CAM_TFE_CSID_PATH_RES_IPP;
+
+	rc = cam_tfe_hw_mgr_get_res(&tfe_ctx->free_res_list, &csid_res);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+		goto end;
+	}
+
+	csid_res_temp = csid_res;
+
+	csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+	csid_acquire.res_id = path_res_id;
+	csid_acquire.in_port = in_port;
+	csid_acquire.out_port = in_port->data;
+	csid_acquire.node_res = NULL;
+	csid_acquire.event_cb_prv = tfe_ctx;
+	csid_acquire.event_cb = cam_tfe_hw_mgr_event_handler;
+	if (in_port->num_out_res)
+		out_port = &(in_port->data[0]);
+
+	if (tfe_ctx->is_tpg) {
+		if (tfe_ctx->res_list_tpg.hw_res[0]->hw_intf->hw_idx == 0)
+			csid_acquire.phy_sel = CAM_ISP_TFE_IN_RES_PHY_0;
+		else
+			csid_acquire.phy_sel = CAM_ISP_TFE_IN_RES_PHY_1;
+	}
+
+	if (in_port->usage_type)
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_MASTER;
+	else
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
+
+	/* Try acquiring CSID resource from previously acquired HW */
+	list_for_each_entry(csid_res_iterator, &tfe_ctx->res_list_tfe_csid,
+		list) {
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!csid_res_iterator->hw_res[i])
+				continue;
+
+			if (csid_res_iterator->is_secure == 1 ||
+				(csid_res_iterator->is_secure == 0 &&
+				in_port->num_out_res &&
+				out_port->secure_mode == 1))
+				continue;
+
+			hw_intf = csid_res_iterator->hw_res[i]->hw_intf;
+			csid_acquire.master_idx = hw_intf->hw_idx;
+
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc) {
+				CAM_DBG(CAM_ISP,
+					"No tfe csid resource from hw %d",
+					hw_intf->hw_idx);
+				continue;
+			}
+
+			csid_res_temp->hw_res[acquired_cnt++] =
+				csid_acquire.node_res;
+
+			CAM_DBG(CAM_ISP,
+				"acquired from old csid(%s)=%d CSID rsrc successfully",
+				(i == 0) ? "left" : "right",
+				hw_intf->hw_idx);
+
+			if (in_port->usage_type && acquired_cnt == 1 &&
+				path_res_id == CAM_TFE_CSID_PATH_RES_IPP)
+				/*
+				 * Continue to acquire Right for IPP.
+				 * Dual TFE for RDI is not currently
+				 * supported.
+				 */
+				continue;
+
+			if (acquired_cnt)
+				/*
+				 * If successfully acquired CSID from
+				 * previously acquired HW, skip the next
+				 * part
+				 */
+				goto acquire_successful;
+		}
+	}
+
+	/*
+	 * If successfully acquired CSID from
+	 * previously acquired HW, skip the next
+	 * part
+	 */
+	if (acquired_cnt)
+		goto acquire_successful;
+
+	/* Acquire Left if not already acquired */
+	if (in_port->usage_type) {
+		for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
+			if (!tfe_hw_mgr->csid_devices[i])
+				continue;
+
+			hw_intf = tfe_hw_mgr->csid_devices[i];
+			csid_acquire.master_idx = hw_intf->hw_idx;
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc)
+				continue;
+			else {
+				csid_res_temp->hw_res[acquired_cnt++] =
+					csid_acquire.node_res;
+				break;
+			}
+		}
+
+		if (i == CAM_TFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
+			CAM_ERR(CAM_ISP,
+				"Can not acquire tfe csid path resource %d",
+				path_res_id);
+			goto put_res;
+		}
+	} else {
+		for (i = (CAM_TFE_CSID_HW_NUM_MAX - 1); i >= 0; i--) {
+			if (!tfe_hw_mgr->csid_devices[i])
+				continue;
+
+			hw_intf = tfe_hw_mgr->csid_devices[i];
+			csid_acquire.master_idx = hw_intf->hw_idx;
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc)
+				continue;
+			else {
+				csid_res_temp->hw_res[acquired_cnt++] =
+					csid_acquire.node_res;
+				break;
+			}
+		}
+
+		if (i == -1 || !csid_acquire.node_res) {
+			CAM_ERR(CAM_ISP,
+				"Can not acquire tfe csid path resource %d",
+				path_res_id);
+			goto put_res;
+		}
+	}
+acquire_successful:
+	CAM_DBG(CAM_ISP, "CSID path left acquired success. is_dual %d",
+		in_port->usage_type);
+
+	csid_res_temp->res_type = CAM_ISP_RESOURCE_PIX_PATH;
+	csid_res_temp->res_id = path_res_id;
+
+	if (in_port->usage_type) {
+		csid_res_temp->is_dual_isp = 1;
+		tfe_ctx->is_dual = true;
+		tfe_ctx->master_hw_idx =
+			csid_res_temp->hw_res[0]->hw_intf->hw_idx;
+	} else
+		csid_res_temp->is_dual_isp = 0;
+
+	if (in_port->num_out_res)
+		csid_res_temp->is_secure = out_port->secure_mode;
+
+	cam_tfe_hw_mgr_put_res(&tfe_ctx->res_list_tfe_csid, &csid_res);
+
+	/*
+	 * Acquire Right if not already acquired.
+	 * Dual TFE for RDI is not currently supported.
+	 */
+	if (in_port->usage_type && (path_res_id == CAM_TFE_CSID_PATH_RES_IPP)
+		&& (acquired_cnt == 1)) {
+		memset(&csid_acquire, 0, sizeof(csid_acquire));
+		csid_acquire.node_res = NULL;
+		csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		csid_acquire.res_id = path_res_id;
+		csid_acquire.in_port = in_port;
+		csid_acquire.master_idx =
+			csid_res_temp->hw_res[0]->hw_intf->hw_idx;
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_SLAVE;
+		csid_acquire.node_res = NULL;
+		csid_acquire.out_port = in_port->data;
+		csid_acquire.event_cb_prv = tfe_ctx;
+		csid_acquire.event_cb = cam_tfe_hw_mgr_event_handler;
+
+		if (tfe_ctx->is_tpg) {
+			if (tfe_ctx->res_list_tpg.hw_res[0]->hw_intf->hw_idx
+				== 0)
+				csid_acquire.phy_sel = CAM_ISP_TFE_IN_RES_PHY_0;
+			else
+				csid_acquire.phy_sel = CAM_ISP_TFE_IN_RES_PHY_1;
+		}
+
+		for (j = 0; j < CAM_TFE_CSID_HW_NUM_MAX; j++) {
+			if (!tfe_hw_mgr->csid_devices[j])
+				continue;
+
+			if (j == csid_res_temp->hw_res[0]->hw_intf->hw_idx)
+				continue;
+
+			hw_intf = tfe_hw_mgr->csid_devices[j];
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc)
+				continue;
+			else
+				break;
+		}
+
+		if (j == CAM_TFE_CSID_HW_NUM_MAX) {
+			CAM_ERR(CAM_ISP,
+				"Can not acquire tfe csid pixel resource");
+			goto end;
+		}
+		csid_res_temp->hw_res[1] = csid_acquire.node_res;
+		CAM_DBG(CAM_ISP, "CSID right acquired success is_dual %d",
+			in_port->usage_type);
+	}
+
+	return 0;
+put_res:
+	cam_tfe_hw_mgr_put_res(&tfe_ctx->free_res_list, &csid_res);
+end:
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_acquire_tpg(
+	struct cam_tfe_hw_mgr_ctx               *tfe_ctx,
+	struct cam_isp_tfe_in_port_info        **in_port,
+	uint32_t                                 num_inport)
+{
+	int rc = -EINVAL;
+	uint32_t i, j = 0;
+	struct cam_tfe_hw_mgr                        *tfe_hw_mgr;
+	struct cam_hw_intf                           *hw_intf;
+	struct cam_top_tpg_hw_reserve_resource_args   tpg_reserve;
+
+	tfe_hw_mgr = tfe_ctx->hw_mgr;
+
+	for (i = 0; i < CAM_TOP_TPG_HW_NUM_MAX; i++) {
+		if (!tfe_hw_mgr->tpg_devices[i])
+			continue;
+
+		hw_intf = tfe_hw_mgr->tpg_devices[i];
+		tpg_reserve.num_inport = num_inport;
+		tpg_reserve.node_res = NULL;
+		for (j = 0; j < num_inport; j++)
+			tpg_reserve.in_port[j] = in_port[j];
+
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+			&tpg_reserve, sizeof(tpg_reserve));
+		if (!rc)
+			break;
+	}
+
+	if (i == CAM_TOP_TPG_HW_NUM_MAX || !tpg_reserve.node_res) {
+		CAM_ERR(CAM_ISP, "Can not acquire tfe TPG");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	tfe_ctx->res_list_tpg.res_type = CAM_ISP_RESOURCE_TPG;
+	tfe_ctx->res_list_tpg.hw_res[0] = tpg_reserve.node_res;
+
+end:
+	return rc;
+}
+
+static enum cam_tfe_csid_path_res_id
+	cam_tfe_hw_mgr_get_tfe_csid_rdi_res_type(
+	uint32_t                 out_port_type)
+{
+	enum cam_tfe_csid_path_res_id path_id;
+
+	CAM_DBG(CAM_ISP, "out_port_type %x", out_port_type);
+	switch (out_port_type) {
+	case CAM_ISP_TFE_OUT_RES_RDI_0:
+		path_id = CAM_TFE_CSID_PATH_RES_RDI_0;
+		break;
+	case CAM_ISP_TFE_OUT_RES_RDI_1:
+		path_id = CAM_TFE_CSID_PATH_RES_RDI_1;
+		break;
+	case CAM_ISP_TFE_OUT_RES_RDI_2:
+		path_id = CAM_TFE_CSID_PATH_RES_RDI_2;
+		break;
+	default:
+		path_id = CAM_TFE_CSID_PATH_RES_MAX;
+		CAM_DBG(CAM_ISP, "maximum rdi type exceeded out_port_type:%d ",
+			out_port_type);
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "out_port %x path_id %d", out_port_type, path_id);
+
+	return path_id;
+}
+
+static int cam_tfe_hw_mgr_acquire_res_tfe_csid_rdi(
+	struct cam_tfe_hw_mgr_ctx         *tfe_ctx,
+	struct cam_isp_tfe_in_port_info   *in_port)
+{
+	int rc = -EINVAL;
+	int i, j;
+
+	struct cam_tfe_hw_mgr               *tfe_hw_mgr;
+	struct cam_isp_hw_mgr_res           *csid_res;
+	struct cam_hw_intf                  *hw_intf;
+	struct cam_isp_tfe_out_port_info    *out_port;
+	struct cam_tfe_csid_hw_reserve_resource_args  csid_acquire;
+	struct cam_isp_hw_mgr_res             *csid_res_iterator;
+	enum cam_tfe_csid_path_res_id        path_res_id;
+
+	tfe_hw_mgr = tfe_ctx->hw_mgr;
+
+	for (j = 0; j < in_port->num_out_res; j++) {
+		out_port = &in_port->data[j];
+		path_res_id = cam_tfe_hw_mgr_get_tfe_csid_rdi_res_type(
+			out_port->res_id);
+
+		if (path_res_id == CAM_TFE_CSID_PATH_RES_MAX)
+			continue;
+
+		rc = cam_tfe_hw_mgr_get_res(&tfe_ctx->free_res_list, &csid_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+			goto end;
+		}
+
+		memset(&csid_acquire, 0, sizeof(csid_acquire));
+		csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		csid_acquire.res_id = path_res_id;
+		csid_acquire.in_port = in_port;
+		csid_acquire.out_port = in_port->data;
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
+		csid_acquire.node_res = NULL;
+
+		if (tfe_ctx->is_tpg) {
+			if (tfe_ctx->res_list_tpg.hw_res[0]->hw_intf->hw_idx ==
+				0)
+				csid_acquire.phy_sel = CAM_ISP_TFE_IN_RES_PHY_0;
+			else
+				csid_acquire.phy_sel = CAM_ISP_TFE_IN_RES_PHY_1;
+		}
+
+		/* Try acquiring CSID resource from previously acquired HW */
+		list_for_each_entry(csid_res_iterator,
+			&tfe_ctx->res_list_tfe_csid, list) {
+
+			for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+				if (!csid_res_iterator->hw_res[i])
+					continue;
+
+				if (csid_res_iterator->is_secure == 1 ||
+					(csid_res_iterator->is_secure == 0 &&
+					in_port->num_out_res &&
+					out_port->secure_mode == 1))
+					continue;
+
+				hw_intf = csid_res_iterator->hw_res[i]->hw_intf;
+
+				rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+					&csid_acquire, sizeof(csid_acquire));
+				if (rc) {
+					CAM_DBG(CAM_ISP,
+						"No tfe csid resource from hw %d",
+						hw_intf->hw_idx);
+					continue;
+				}
+
+				if (csid_acquire.node_res == NULL) {
+					CAM_ERR(CAM_ISP,
+						"Acquire RDI:%d rsrc failed",
+						path_res_id);
+					goto put_res;
+				}
+
+				csid_res->hw_res[0] = csid_acquire.node_res;
+
+				CAM_DBG(CAM_ISP,
+					"acquired from old csid(%s)=%d CSID rsrc successfully",
+					(i == 0) ? "left" : "right",
+					hw_intf->hw_idx);
+				/*
+				 * If successfully acquired CSID from
+				 * previously acquired HW, skip the next
+				 * part
+				 */
+				goto acquire_successful;
+			}
+		}
+
+		/* Acquire if not already acquired */
+		if (tfe_ctx->is_dual) {
+			for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
+				if (!tfe_hw_mgr->csid_devices[i])
+					continue;
+
+				hw_intf = tfe_hw_mgr->csid_devices[i];
+				rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+					&csid_acquire, sizeof(csid_acquire));
+				if (rc)
+					continue;
+				else {
+					csid_res->hw_res[0] =
+						csid_acquire.node_res;
+					break;
+				}
+			}
+
+			if (i == CAM_TFE_CSID_HW_NUM_MAX ||
+				!csid_acquire.node_res) {
+				CAM_ERR(CAM_ISP,
+					"Can not acquire tfe csid rdi path%d",
+					path_res_id);
+
+				rc = -EINVAL;
+				goto put_res;
+			}
+		} else {
+			for (i = CAM_TFE_CSID_HW_NUM_MAX - 1; i >= 0; i--) {
+				if (!tfe_hw_mgr->csid_devices[i])
+					continue;
+
+				hw_intf = tfe_hw_mgr->csid_devices[i];
+				rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+					&csid_acquire, sizeof(csid_acquire));
+				if (rc)
+					continue;
+				else {
+					csid_res->hw_res[0] =
+						csid_acquire.node_res;
+					break;
+				}
+			}
+
+			if (i == -1 || !csid_acquire.node_res) {
+				CAM_ERR(CAM_ISP,
+					"Can not acquire tfe csid rdi path %d",
+					path_res_id);
+
+				rc = -EINVAL;
+				goto put_res;
+			}
+		}
+
+acquire_successful:
+		CAM_DBG(CAM_ISP, "CSID path :%d acquired success", path_res_id);
+		csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		csid_res->res_id = path_res_id;
+		csid_res->hw_res[1] = NULL;
+		csid_res->is_dual_isp = 0;
+
+		if (in_port->num_out_res)
+			csid_res->is_secure = out_port->secure_mode;
+
+		cam_tfe_hw_mgr_put_res(&tfe_ctx->res_list_tfe_csid, &csid_res);
+	}
+
+	return 0;
+put_res:
+	cam_tfe_hw_mgr_put_res(&tfe_ctx->free_res_list, &csid_res);
+end:
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_preprocess_port(
+	struct cam_tfe_hw_mgr_ctx       *tfe_ctx,
+	struct cam_isp_tfe_in_port_info *in_port,
+	int                             *ipp_count,
+	int                             *rdi_count,
+	int                             *pdaf_enable)
+{
+	int ipp_num        = 0;
+	int rdi_num        = 0;
+	bool rdi2_enable   = false;
+	uint32_t i;
+	struct cam_isp_tfe_out_port_info      *out_port;
+	struct cam_tfe_hw_mgr                 *tfe_hw_mgr;
+
+	tfe_hw_mgr = tfe_ctx->hw_mgr;
+
+
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+		CAM_DBG(CAM_ISP, "out_res id %d", out_port->res_id);
+
+		if (cam_tfe_hw_mgr_is_rdi_res(out_port->res_id)) {
+			rdi_num++;
+			if (out_port->res_id == CAM_ISP_TFE_OUT_RES_RDI_2)
+				rdi2_enable = true;
+		} else {
+			ipp_num++;
+			if (out_port->res_id == CAM_ISP_TFE_OUT_RES_PDAF)
+				*pdaf_enable = 1;
+		}
+	}
+
+	if (*pdaf_enable && rdi2_enable) {
+		CAM_ERR(CAM_ISP, "invalid outports both RDI2 and PDAF enabled");
+		return -EINVAL;
+	}
+
+	*ipp_count = ipp_num;
+	*rdi_count = rdi_num;
+
+	CAM_DBG(CAM_ISP, "rdi: %d ipp: %d pdaf:%d", rdi_num, ipp_num,
+		*pdaf_enable);
+
+	return 0;
+}
+
+static int cam_tfe_mgr_acquire_hw_for_ctx(
+	struct cam_tfe_hw_mgr_ctx              *tfe_ctx,
+	struct cam_isp_tfe_in_port_info        *in_port,
+	uint32_t *num_pix_port, uint32_t  *num_rdi_port,
+	uint32_t *pdaf_enable)
+{
+	int rc                                    = -EINVAL;
+	int is_dual_isp                           = 0;
+	int ipp_count                             = 0;
+	int rdi_count                             = 0;
+
+	is_dual_isp = in_port->usage_type;
+
+	cam_tfe_hw_mgr_preprocess_port(tfe_ctx, in_port, &ipp_count,
+		&rdi_count, pdaf_enable);
+
+	if (!ipp_count && !rdi_count) {
+		CAM_ERR(CAM_ISP,
+			"No PIX or RDI");
+		return -EINVAL;
+	}
+
+	if (ipp_count) {
+		/* get tfe csid IPP resource */
+		rc = cam_tfe_hw_mgr_acquire_res_tfe_csid_pxl(tfe_ctx,
+			in_port);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Acquire TFE CSID IPP resource Failed");
+			goto err;
+		}
+	}
+
+	if (rdi_count) {
+		/* get tfe csid rdi resource */
+		rc = cam_tfe_hw_mgr_acquire_res_tfe_csid_rdi(tfe_ctx, in_port);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Acquire TFE CSID RDI resource Failed");
+			goto err;
+		}
+	}
+
+	rc = cam_tfe_hw_mgr_acquire_res_tfe_in(tfe_ctx, in_port, pdaf_enable);
+	if (rc) {
+		CAM_ERR(CAM_ISP,
+		"Acquire TFE IN resource Failed");
+		goto err;
+	}
+
+	CAM_DBG(CAM_ISP, "Acquiring TFE OUT resource...");
+	rc = cam_tfe_hw_mgr_acquire_res_tfe_out(tfe_ctx, in_port);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Acquire TFE OUT resource Failed");
+		goto err;
+	}
+
+	*num_pix_port += ipp_count;
+	*num_rdi_port += rdi_count;
+
+	return 0;
+err:
+	/* release resource at the acquire entry funciton */
+	return rc;
+}
+
+void cam_tfe_cam_cdm_callback(uint32_t handle, void *userdata,
+	enum cam_cdm_cb_status status, uint64_t cookie)
+{
+	struct cam_isp_prepare_hw_update_data *hw_update_data = NULL;
+	struct cam_tfe_hw_mgr_ctx *ctx = NULL;
+
+	if (!userdata) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return;
+	}
+
+	hw_update_data = (struct cam_isp_prepare_hw_update_data *)userdata;
+	ctx = (struct cam_tfe_hw_mgr_ctx *)hw_update_data->isp_mgr_ctx;
+
+	if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+		complete_all(&ctx->config_done_complete);
+		atomic_set(&ctx->cdm_done, 1);
+		if (g_tfe_hw_mgr.debug_cfg.per_req_reg_dump)
+			cam_tfe_mgr_handle_reg_dump(ctx,
+				hw_update_data->reg_dump_buf_desc,
+				hw_update_data->num_reg_dump_buf,
+				CAM_ISP_TFE_PACKET_META_REG_DUMP_PER_REQUEST);
+		CAM_DBG(CAM_ISP,
+			"Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu ctx_index=%d",
+			 handle, userdata, status, cookie, ctx->ctx_index);
+	} else {
+		CAM_WARN(CAM_ISP,
+			"Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
+			 handle, userdata, status, cookie);
+	}
+}
+
+/* entry function: acquire_hw */
+static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	struct cam_tfe_hw_mgr *tfe_hw_mgr            = hw_mgr_priv;
+	struct cam_hw_acquire_args *acquire_args     = acquire_hw_args;
+	int rc                                       = -EINVAL;
+	int i, j;
+	struct cam_tfe_hw_mgr_ctx          *tfe_ctx;
+	struct cam_isp_tfe_in_port_info    *in_port = NULL;
+	struct cam_cdm_acquire_data         cdm_acquire;
+	uint32_t                            num_pix_port_per_in = 0;
+	uint32_t                            num_rdi_port_per_in = 0;
+	uint32_t                            pdaf_enable = 0;
+	uint32_t                            total_pix_port = 0;
+	uint32_t                            total_rdi_port = 0;
+	uint32_t                            in_port_length = 0;
+	uint32_t                            total_in_port_length = 0;
+	struct cam_isp_tfe_acquire_hw_info *acquire_hw_info = NULL;
+	struct cam_isp_tfe_in_port_info
+		*tpg_inport[CAM_TOP_TPG_MAX_SUPPORTED_DT] = {0, 0, 0, 0};
+
+	CAM_DBG(CAM_ISP, "Enter...");
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
+		return -EINVAL;
+	}
+
+	/* get the tfe ctx */
+	rc = cam_tfe_hw_mgr_get_ctx(&tfe_hw_mgr->free_ctx_list, &tfe_ctx);
+	if (rc || !tfe_ctx) {
+		CAM_ERR(CAM_ISP, "Get tfe hw context failed");
+		goto err;
+	}
+
+	tfe_ctx->common.cb_priv = acquire_args->context_data;
+	for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++)
+		tfe_ctx->common.event_cb[i] = acquire_args->event_cb;
+
+	tfe_ctx->hw_mgr = tfe_hw_mgr;
+
+	memcpy(cdm_acquire.identifier, "tfe", sizeof("tfe"));
+	cdm_acquire.cell_index = 0;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = tfe_ctx;
+	cdm_acquire.priority = CAM_CDM_BL_FIFO_0;
+	cdm_acquire.base_array_cnt = CAM_TFE_HW_NUM_MAX;
+	for (i = 0, j = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
+		if (tfe_hw_mgr->cdm_reg_map[i])
+			cdm_acquire.base_array[j++] =
+				tfe_hw_mgr->cdm_reg_map[i];
+	}
+	cdm_acquire.base_array_cnt = j;
+
+
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.cam_cdm_callback = cam_tfe_cam_cdm_callback;
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
+		goto free_ctx;
+	}
+
+	CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
+		cdm_acquire.handle);
+	tfe_ctx->cdm_handle = cdm_acquire.handle;
+	tfe_ctx->cdm_ops = cdm_acquire.ops;
+	atomic_set(&tfe_ctx->cdm_done, 1);
+
+	acquire_hw_info = (struct cam_isp_tfe_acquire_hw_info *)
+		acquire_args->acquire_info;
+	in_port = (struct cam_isp_tfe_in_port_info *)
+		((uint8_t *)&acquire_hw_info->data +
+		 acquire_hw_info->input_info_offset);
+
+	/* Check any inport has dual tfe usage  */
+	tfe_ctx->is_dual = false;
+	for (i = 0; i < acquire_hw_info->num_inputs; i++) {
+		if (in_port->usage_type)
+			tfe_ctx->is_dual = true;
+
+		in_port_length =
+			sizeof(struct cam_isp_tfe_in_port_info) +
+			(in_port->num_out_res - 1) *
+			sizeof(struct cam_isp_tfe_out_port_info);
+		total_in_port_length += in_port_length;
+		if (total_in_port_length >
+			acquire_hw_info->input_info_size) {
+			CAM_ERR(CAM_ISP,
+				"buffer size is not enough %d %d",
+				total_in_port_length,
+				acquire_hw_info->input_info_size);
+			rc = -EINVAL;
+			goto free_cdm;
+		}
+
+		in_port = (struct cam_isp_tfe_in_port_info *)
+			((uint8_t *)in_port + in_port_length);
+	}
+
+	in_port_length = 0;
+	total_in_port_length = 0;
+	in_port = (struct cam_isp_tfe_in_port_info *)
+		((uint8_t *)&acquire_hw_info->data +
+		 acquire_hw_info->input_info_offset);
+
+	if (in_port->res_id == CAM_ISP_TFE_IN_RES_TPG) {
+		if (acquire_hw_info->num_inputs >
+			CAM_TOP_TPG_MAX_SUPPORTED_DT) {
+			CAM_ERR(CAM_ISP, "too many number inport:%d for TPG ",
+				acquire_hw_info->num_inputs);
+			rc = -EINVAL;
+			goto free_cdm;
+		}
+
+		for (i = 0; i < acquire_hw_info->num_inputs; i++) {
+			if (in_port->res_id != CAM_ISP_TFE_IN_RES_TPG) {
+				CAM_ERR(CAM_ISP, "Inval :%d inport res id:0x%x",
+					i, in_port->res_id);
+				rc = -EINVAL;
+				goto free_cdm;
+			}
+
+			tpg_inport[i] = in_port;
+			in_port_length =
+				sizeof(struct cam_isp_tfe_in_port_info) +
+				(in_port->num_out_res - 1) *
+				sizeof(struct cam_isp_tfe_out_port_info);
+			total_in_port_length += in_port_length;
+			if (total_in_port_length >
+				acquire_hw_info->input_info_size) {
+				CAM_ERR(CAM_ISP,
+					"buffer size is not enough %d %d",
+					total_in_port_length,
+					acquire_hw_info->input_info_size);
+				rc = -EINVAL;
+				goto free_cdm;
+			}
+
+			in_port = (struct cam_isp_tfe_in_port_info *)
+				((uint8_t *)in_port + in_port_length);
+		}
+
+		rc = cam_tfe_hw_mgr_acquire_tpg(tfe_ctx, tpg_inport,
+			acquire_hw_info->num_inputs);
+		if (rc)
+			goto free_cdm;
+
+		tfe_ctx->is_tpg = true;
+	}
+
+	in_port = (struct cam_isp_tfe_in_port_info *)
+		((uint8_t *)&acquire_hw_info->data +
+		 acquire_hw_info->input_info_offset);
+	in_port_length = 0;
+	total_in_port_length = 0;
+
+	/* acquire HW resources */
+	for (i = 0; i < acquire_hw_info->num_inputs; i++) {
+
+		if (in_port->num_out_res > CAM_TFE_HW_OUT_RES_MAX) {
+			CAM_ERR(CAM_ISP, "too many output res %d",
+				in_port->num_out_res);
+			rc = -EINVAL;
+			goto free_res;
+		}
+
+		in_port_length = sizeof(struct cam_isp_tfe_in_port_info) +
+			(in_port->num_out_res - 1) *
+			sizeof(struct cam_isp_tfe_out_port_info);
+		total_in_port_length += in_port_length;
+
+		if (total_in_port_length > acquire_hw_info->input_info_size) {
+			CAM_ERR(CAM_ISP, "buffer size is not enough");
+			rc = -EINVAL;
+			goto free_res;
+		}
+		CAM_DBG(CAM_ISP, "in_res_id %x", in_port->res_id);
+		rc = cam_tfe_mgr_acquire_hw_for_ctx(tfe_ctx, in_port,
+			&num_pix_port_per_in, &num_rdi_port_per_in,
+			&pdaf_enable);
+		total_pix_port += num_pix_port_per_in;
+		total_rdi_port += num_rdi_port_per_in;
+
+		if (rc) {
+			CAM_ERR(CAM_ISP, "can not acquire resource");
+			goto free_res;
+		}
+		in_port = (struct cam_isp_tfe_in_port_info *)
+			((uint8_t *)in_port + in_port_length);
+	}
+
+	/* Check whether context has only RDI resource */
+	if (!total_pix_port) {
+		tfe_ctx->is_rdi_only_context = 1;
+		CAM_DBG(CAM_ISP, "RDI only context");
+	} else
+		tfe_ctx->is_rdi_only_context = 0;
+
+	/* Process base info */
+	rc = cam_tfe_mgr_process_base_info(tfe_ctx);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Process base info failed");
+		goto free_res;
+	}
+
+	acquire_args->ctxt_to_hw_map = tfe_ctx;
+	tfe_ctx->ctx_in_use = 1;
+
+	cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->used_ctx_list, &tfe_ctx);
+
+	CAM_DBG(CAM_ISP, "Exit...(success)");
+
+	return 0;
+free_res:
+	cam_tfe_hw_mgr_release_hw_for_ctx(tfe_ctx);
+	tfe_ctx->ctx_in_use = 0;
+	tfe_ctx->is_rdi_only_context = 0;
+	tfe_ctx->cdm_handle = 0;
+	tfe_ctx->cdm_ops = NULL;
+	tfe_ctx->init_done = false;
+	tfe_ctx->is_dual = false;
+	tfe_ctx->is_tpg  = false;
+	tfe_ctx->res_list_tpg.res_type = CAM_ISP_RESOURCE_MAX;
+free_cdm:
+	cam_cdm_release(tfe_ctx->cdm_handle);
+free_ctx:
+	cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->free_ctx_list, &tfe_ctx);
+err:
+	/* Dump all the current acquired HW */
+	cam_tfe_hw_mgr_dump_all_ctx();
+
+	CAM_ERR_RATE_LIMIT(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+}
+
+/* entry function: acquire_hw */
+static int cam_tfe_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	struct cam_tfe_hw_mgr *tfe_hw_mgr            = hw_mgr_priv;
+	struct cam_hw_acquire_args *acquire_args     = acquire_hw_args;
+	int rc                                       = -EINVAL;
+	int i, j;
+	struct cam_tfe_hw_mgr_ctx         *tfe_ctx;
+	struct cam_isp_tfe_in_port_info   *in_port = NULL;
+	struct cam_isp_resource           *isp_resource = NULL;
+	struct cam_cdm_acquire_data        cdm_acquire;
+	uint32_t                           num_pix_port_per_in = 0;
+	uint32_t                           num_rdi_port_per_in = 0;
+	uint32_t                           pdad_enable         = 0;
+	uint32_t                           total_pix_port = 0;
+	uint32_t                           total_rdi_port = 0;
+	uint32_t                           in_port_length = 0;
+
+	CAM_DBG(CAM_ISP, "Enter...");
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
+		return -EINVAL;
+	}
+
+	/* get the tfe ctx */
+	rc = cam_tfe_hw_mgr_get_ctx(&tfe_hw_mgr->free_ctx_list, &tfe_ctx);
+	if (rc || !tfe_ctx) {
+		CAM_ERR(CAM_ISP, "Get tfe hw context failed");
+		goto err;
+	}
+
+	tfe_ctx->common.cb_priv = acquire_args->context_data;
+	for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++)
+		tfe_ctx->common.event_cb[i] = acquire_args->event_cb;
+
+	tfe_ctx->hw_mgr = tfe_hw_mgr;
+
+	memcpy(cdm_acquire.identifier, "tfe", sizeof("tfe"));
+	cdm_acquire.cell_index = 0;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = tfe_ctx;
+	cdm_acquire.base_array_cnt = CAM_TFE_HW_NUM_MAX;
+	for (i = 0, j = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
+		if (tfe_hw_mgr->cdm_reg_map[i])
+			cdm_acquire.base_array[j++] =
+				tfe_hw_mgr->cdm_reg_map[i];
+	}
+	cdm_acquire.base_array_cnt = j;
+
+
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.cam_cdm_callback = cam_tfe_cam_cdm_callback;
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
+		goto free_ctx;
+	}
+
+	CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
+		cdm_acquire.handle);
+	tfe_ctx->cdm_handle = cdm_acquire.handle;
+	tfe_ctx->cdm_ops = cdm_acquire.ops;
+	atomic_set(&tfe_ctx->cdm_done, 1);
+
+	isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info;
+
+	/* acquire HW resources */
+	for (i = 0; i < acquire_args->num_acq; i++) {
+		if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT)
+			continue;
+
+		CAM_DBG(CAM_ISP, "acquire no = %d total = %d", i,
+			acquire_args->num_acq);
+		CAM_DBG(CAM_ISP,
+			"start copy from user handle %lld with len = %d",
+			isp_resource[i].res_hdl,
+			isp_resource[i].length);
+
+		in_port_length = sizeof(struct cam_isp_tfe_in_port_info);
+
+		if (in_port_length > isp_resource[i].length) {
+			CAM_ERR(CAM_ISP, "buffer size is not enough");
+			rc = -EINVAL;
+			goto free_res;
+		}
+
+		in_port = memdup_user(
+			u64_to_user_ptr(isp_resource[i].res_hdl),
+			isp_resource[i].length);
+		if (!IS_ERR(in_port)) {
+			if (in_port->num_out_res > CAM_TFE_HW_OUT_RES_MAX) {
+				CAM_ERR(CAM_ISP, "too many output res %d",
+					in_port->num_out_res);
+				rc = -EINVAL;
+				kfree(in_port);
+				goto free_res;
+			}
+
+			in_port_length =
+				sizeof(struct cam_isp_tfe_in_port_info) +
+				(in_port->num_out_res - 1) *
+				sizeof(struct cam_isp_tfe_out_port_info);
+			if (in_port_length > isp_resource[i].length) {
+				CAM_ERR(CAM_ISP, "buffer size is not enough");
+				rc = -EINVAL;
+				kfree(in_port);
+				goto free_res;
+			}
+
+			rc = cam_tfe_mgr_acquire_hw_for_ctx(tfe_ctx, in_port,
+				&num_pix_port_per_in, &num_rdi_port_per_in,
+				&pdad_enable);
+			total_pix_port += num_pix_port_per_in;
+			total_rdi_port += num_rdi_port_per_in;
+
+			kfree(in_port);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "can not acquire resource");
+				goto free_res;
+			}
+		} else {
+			CAM_ERR(CAM_ISP,
+				"Copy from user failed with in_port = %pK",
+				in_port);
+			rc = -EFAULT;
+			goto free_res;
+		}
+	}
+
+	/* Check whether context has only RDI resource */
+	if (!total_pix_port) {
+		tfe_ctx->is_rdi_only_context = 1;
+		CAM_DBG(CAM_ISP, "RDI only context");
+	} else
+		tfe_ctx->is_rdi_only_context = 0;
+
+	/* Process base info */
+	rc = cam_tfe_mgr_process_base_info(tfe_ctx);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Process base info failed");
+		goto free_res;
+	}
+
+	acquire_args->ctxt_to_hw_map = tfe_ctx;
+	tfe_ctx->ctx_in_use = 1;
+
+	cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->used_ctx_list, &tfe_ctx);
+
+	CAM_DBG(CAM_ISP, "Exit...(success)");
+
+	return 0;
+free_res:
+	cam_tfe_hw_mgr_release_hw_for_ctx(tfe_ctx);
+	cam_cdm_release(tfe_ctx->cdm_handle);
+free_ctx:
+	cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->free_ctx_list, &tfe_ctx);
+err:
+	CAM_ERR_RATE_LIMIT(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+}
+
+/* entry function: acquire_hw */
+static int cam_tfe_mgr_acquire(void *hw_mgr_priv,
+	void *acquire_hw_args)
+{
+	struct cam_hw_acquire_args *acquire_args     = acquire_hw_args;
+	int rc                                       = -EINVAL;
+
+	CAM_DBG(CAM_ISP, "Enter...");
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
+		return -EINVAL;
+	}
+
+	if (acquire_args->num_acq == CAM_API_COMPAT_CONSTANT)
+		rc = cam_tfe_mgr_acquire_hw(hw_mgr_priv, acquire_hw_args);
+	else
+		rc = cam_tfe_mgr_acquire_dev(hw_mgr_priv, acquire_hw_args);
+
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+}
+
+static const char *cam_tfe_util_usage_data_to_string(
+	uint32_t usage_data)
+{
+	switch (usage_data) {
+	case CAM_ISP_TFE_USAGE_LEFT_PX:
+		return "LEFT_PX";
+	case CAM_ISP_TFE_USAGE_RIGHT_PX:
+		return "RIGHT_PX";
+	case CAM_ISP_TFE_USAGE_RDI:
+		return "RDI";
+	default:
+		return "USAGE_INVALID";
+	}
+}
+
+static int cam_tfe_classify_vote_info(
+	struct cam_isp_hw_mgr_res            *hw_mgr_res,
+	struct cam_isp_bw_config_internal_v2 *bw_config,
+	struct cam_axi_vote                  *isp_vote,
+	uint32_t                              split_idx,
+	bool                                 *camif_l_bw_updated,
+	bool                                 *camif_r_bw_updated)
+{
+	int                                   rc = 0, i, j = 0;
+
+	if (hw_mgr_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) {
+		if (split_idx == CAM_ISP_HW_SPLIT_LEFT) {
+			if (*camif_l_bw_updated)
+				return rc;
+
+			for (i = 0; i < bw_config->num_paths; i++) {
+				if (bw_config->axi_path[i].usage_data ==
+					CAM_ISP_TFE_USAGE_LEFT_PX) {
+					memcpy(&isp_vote->axi_path[j],
+						&bw_config->axi_path[i],
+						sizeof(struct
+						cam_axi_per_path_bw_vote));
+					j++;
+				}
+			}
+			isp_vote->num_paths = j;
+
+			*camif_l_bw_updated = true;
+		} else {
+			if (*camif_r_bw_updated)
+				return rc;
+
+			for (i = 0; i < bw_config->num_paths; i++) {
+				if (bw_config->axi_path[i].usage_data ==
+					CAM_ISP_TFE_USAGE_RIGHT_PX) {
+					memcpy(&isp_vote->axi_path[j],
+						&bw_config->axi_path[i],
+						sizeof(struct
+						cam_axi_per_path_bw_vote));
+					j++;
+				}
+			}
+			isp_vote->num_paths = j;
+
+			*camif_r_bw_updated = true;
+		}
+	} else if ((hw_mgr_res->res_id >= CAM_ISP_HW_TFE_IN_RDI0)
+		&& (hw_mgr_res->res_id <=
+		CAM_ISP_HW_TFE_IN_RDI2)) {
+		for (i = 0; i < bw_config->num_paths; i++) {
+			if ((bw_config->axi_path[i].usage_data ==
+				CAM_ISP_TFE_USAGE_RDI) &&
+				((bw_config->axi_path[i].path_data_type -
+				CAM_AXI_PATH_DATA_IFE_RDI0) ==
+				(hw_mgr_res->res_id -
+				CAM_ISP_HW_TFE_IN_RDI0))) {
+				memcpy(&isp_vote->axi_path[j],
+					&bw_config->axi_path[i],
+					sizeof(struct
+					cam_axi_per_path_bw_vote));
+				j++;
+			}
+		}
+		isp_vote->num_paths = j;
+
+	} else {
+		if (hw_mgr_res->hw_res[split_idx]) {
+			CAM_ERR(CAM_ISP, "Invalid res_id %u, split_idx: %u",
+				hw_mgr_res->res_id, split_idx);
+			rc = -EINVAL;
+			return rc;
+		}
+	}
+
+	for (i = 0; i < isp_vote->num_paths; i++) {
+		CAM_DBG(CAM_PERF,
+			"CLASSIFY_VOTE [%s] [%s] [%s] [%llu] [%llu] [%llu]",
+			cam_tfe_util_usage_data_to_string(
+			isp_vote->axi_path[i].usage_data),
+			cam_cpas_axi_util_path_type_to_string(
+			isp_vote->axi_path[i].path_data_type),
+			cam_cpas_axi_util_trans_type_to_string(
+			isp_vote->axi_path[i].transac_type),
+			isp_vote->axi_path[i].camnoc_bw,
+			isp_vote->axi_path[i].mnoc_ab_bw,
+			isp_vote->axi_path[i].mnoc_ib_bw);
+	}
+
+	return rc;
+}
+
+static int cam_isp_tfe_blob_bw_update(
+	struct cam_isp_bw_config_internal_v2  *bw_config,
+	struct cam_tfe_hw_mgr_ctx             *ctx)
+{
+	struct cam_isp_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	struct cam_tfe_bw_update_args          bw_upd_args;
+	int                                    rc = -EINVAL;
+	uint32_t                               i, split_idx;
+	bool                                   camif_l_bw_updated = false;
+	bool                                   camif_r_bw_updated = false;
+
+	for (i = 0; i < bw_config->num_paths; i++) {
+		CAM_DBG(CAM_PERF,
+			"ISP_BLOB usage_type=%u [%s] [%s] [%s] [%llu] [%llu] [%llu]",
+			bw_config->usage_type,
+			cam_tfe_util_usage_data_to_string(
+			bw_config->axi_path[i].usage_data),
+			cam_cpas_axi_util_path_type_to_string(
+			bw_config->axi_path[i].path_data_type),
+			cam_cpas_axi_util_trans_type_to_string(
+			bw_config->axi_path[i].transac_type),
+			bw_config->axi_path[i].camnoc_bw,
+			bw_config->axi_path[i].mnoc_ab_bw,
+			bw_config->axi_path[i].mnoc_ib_bw);
+	}
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		for (split_idx = 0; split_idx < CAM_ISP_HW_SPLIT_MAX;
+			split_idx++) {
+			if (!hw_mgr_res->hw_res[split_idx])
+				continue;
+
+			memset(&bw_upd_args.isp_vote, 0,
+				sizeof(struct cam_axi_vote));
+			rc = cam_tfe_classify_vote_info(hw_mgr_res, bw_config,
+				&bw_upd_args.isp_vote, split_idx,
+				&camif_l_bw_updated, &camif_r_bw_updated);
+			if (rc)
+				return rc;
+
+			if (!bw_upd_args.isp_vote.num_paths)
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[split_idx]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				bw_upd_args.node_res =
+					hw_mgr_res->hw_res[split_idx];
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_BW_UPDATE_V2,
+					&bw_upd_args,
+					sizeof(
+					struct cam_tfe_bw_update_args));
+				if (rc)
+					CAM_ERR(CAM_ISP,
+						"BW Update failed rc: %d", rc);
+			} else {
+				CAM_WARN(CAM_ISP, "NULL hw_intf!");
+			}
+		}
+	}
+
+	return rc;
+}
+
+/* entry function: config_hw */
+static int cam_tfe_mgr_config_hw(void *hw_mgr_priv,
+	void *config_hw_args)
+{
+	int rc = -EINVAL, i, skip = 0;
+	struct cam_hw_config_args *cfg;
+	struct cam_hw_update_entry *cmd;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_tfe_hw_mgr_ctx *ctx;
+	struct cam_isp_prepare_hw_update_data *hw_update_data;
+
+	CAM_DBG(CAM_ISP, "Enter");
+	if (!hw_mgr_priv || !config_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	cfg = config_hw_args;
+	ctx = (struct cam_tfe_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
+	if (!ctx) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
+		CAM_ERR(CAM_ISP, "Invalid context parameters");
+		return -EPERM;
+	}
+	if (atomic_read(&ctx->overflow_pending))
+		return -EINVAL;
+
+	hw_update_data = (struct cam_isp_prepare_hw_update_data  *) cfg->priv;
+
+	for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
+		if (hw_update_data->bw_config_valid[i] == true) {
+
+			CAM_DBG(CAM_ISP, "idx=%d, bw_config_version=%d",
+				ctx, ctx->ctx_index, i,
+				hw_update_data->bw_config_version);
+			if (hw_update_data->bw_config_version ==
+				CAM_ISP_BW_CONFIG_V2) {
+				rc = cam_isp_tfe_blob_bw_update(
+					&hw_update_data->bw_config_v2[i], ctx);
+				if (rc)
+					CAM_ERR(CAM_ISP,
+					"Bandwidth Update Failed rc: %d", rc);
+			} else {
+				CAM_ERR(CAM_ISP,
+					"Invalid bw config version: %d",
+					hw_update_data->bw_config_version);
+			}
+		}
+	}
+
+	CAM_DBG(CAM_ISP,
+		"Enter ctx id:%d num_hw_upd_entries %d request id: %llu",
+		ctx->ctx_index, cfg->num_hw_update_entries, cfg->request_id);
+
+	if (cfg->num_hw_update_entries > 0) {
+		cdm_cmd = ctx->cdm_cmd;
+		cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = true;
+		cdm_cmd->userdata = hw_update_data;
+		cdm_cmd->cookie = cfg->request_id;
+		cdm_cmd->gen_irq_arb = false;
+
+		for (i = 0 ; i < cfg->num_hw_update_entries; i++) {
+			cmd = (cfg->hw_update_entries + i);
+			if (cfg->reapply && cmd->flags == CAM_ISP_IQ_BL) {
+				skip++;
+				continue;
+			}
+
+			if (cmd->flags == CAM_ISP_UNUSED_BL ||
+				cmd->flags >= CAM_ISP_BL_MAX)
+				CAM_ERR(CAM_ISP, "Unexpected BL type %d",
+					cmd->flags);
+
+			cdm_cmd->cmd[i - skip].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i - skip].offset = cmd->offset;
+			cdm_cmd->cmd[i - skip].len = cmd->len;
+			cdm_cmd->cmd[i - skip].arbitrate = false;
+		}
+		cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries - skip;
+
+		reinit_completion(&ctx->config_done_complete);
+		ctx->applied_req_id = cfg->request_id;
+
+		CAM_DBG(CAM_ISP, "Submit to CDM");
+		atomic_set(&ctx->cdm_done, 0);
+		rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Failed to apply the configs");
+			return rc;
+		}
+
+		if (cfg->init_packet) {
+			rc = wait_for_completion_timeout(
+				&ctx->config_done_complete,
+				msecs_to_jiffies(CAM_TFE_HW_CONFIG_TIMEOUT));
+			if (rc <= 0) {
+				CAM_ERR(CAM_ISP,
+					"config done completion timeout for req_id=%llu rc=%d ctx_index %d",
+					cfg->request_id, rc, ctx->ctx_index);
+				if (rc == 0)
+					rc = -ETIMEDOUT;
+			} else {
+				rc = 0;
+				CAM_DBG(CAM_ISP,
+					"config done Success for req_id=%llu ctx_index %d",
+					cfg->request_id, ctx->ctx_index);
+			}
+		}
+	} else {
+		CAM_ERR(CAM_ISP, "No commands to config");
+	}
+	CAM_DBG(CAM_ISP, "Exit: Config Done: %llu",  cfg->request_id);
+
+	return rc;
+}
+
+static int cam_tfe_mgr_stop_hw_in_overflow(void *stop_hw_args)
+{
+	int                               rc        = 0;
+	struct cam_hw_stop_args          *stop_args = stop_hw_args;
+	struct cam_isp_hw_mgr_res        *hw_mgr_res;
+	struct cam_tfe_hw_mgr_ctx        *ctx;
+	uint32_t                          i, master_base_idx = 0;
+
+	if (!stop_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+	ctx = (struct cam_tfe_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
+		ctx->ctx_index);
+
+	if (!ctx->num_base) {
+		CAM_ERR(CAM_ISP, "Number of bases are zero");
+		return -EINVAL;
+	}
+
+	/* get master base index first */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
+			master_base_idx = ctx->base[i].idx;
+			break;
+		}
+	}
+
+	if (i == ctx->num_base)
+		master_base_idx = ctx->base[0].idx;
+
+
+	/* stop the master CSID path first */
+	cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid,
+		master_base_idx, CAM_TFE_CSID_HALT_IMMEDIATELY);
+
+	/* Stop rest of the CSID paths  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+
+		cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid,
+			ctx->base[i].idx, CAM_TFE_CSID_HALT_IMMEDIATELY);
+	}
+
+	/* TFE in resources */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		cam_tfe_hw_mgr_stop_hw_res(hw_mgr_res);
+	}
+
+	/* TFE out resources */
+	for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++)
+		cam_tfe_hw_mgr_stop_hw_res(&ctx->res_list_tfe_out[i]);
+
+	if (ctx->is_tpg)
+		cam_tfe_hw_mgr_stop_hw_res(&ctx->res_list_tpg);
+
+	/* Stop tasklet for context */
+	cam_tasklet_stop(ctx->common.tasklet_info);
+	CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
+		ctx->ctx_index, rc);
+
+	return rc;
+}
+
+static int cam_tfe_mgr_bw_control(struct cam_tfe_hw_mgr_ctx *ctx,
+	enum cam_tfe_bw_control_action action)
+{
+	struct cam_isp_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	struct cam_tfe_bw_control_args         bw_ctrl_args;
+	int                                    rc = -EINVAL;
+	uint32_t                               i;
+
+	CAM_DBG(CAM_ISP, "Enter...ctx id:%d", ctx->ctx_index);
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				bw_ctrl_args.node_res =
+					hw_mgr_res->hw_res[i];
+				bw_ctrl_args.action = action;
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_BW_CONTROL,
+					&bw_ctrl_args,
+					sizeof(struct cam_tfe_bw_control_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "BW Update failed");
+			} else
+				CAM_WARN(CAM_ISP, "NULL hw_intf!");
+		}
+	}
+
+	return rc;
+}
+
+static int cam_tfe_mgr_pause_hw(struct cam_tfe_hw_mgr_ctx *ctx)
+{
+	return cam_tfe_mgr_bw_control(ctx, CAM_TFE_BW_CONTROL_EXCLUDE);
+}
+
+/* entry function: stop_hw */
+static int cam_tfe_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
+{
+	int                               rc        = 0;
+	struct cam_hw_stop_args          *stop_args = stop_hw_args;
+	struct cam_isp_stop_args         *stop_isp;
+	struct cam_isp_hw_mgr_res        *hw_mgr_res;
+	struct cam_tfe_hw_mgr_ctx        *ctx;
+	enum cam_tfe_csid_halt_cmd        csid_halt_type;
+	uint32_t                          i, master_base_idx = 0;
+
+	if (!hw_mgr_priv || !stop_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_tfe_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, " Enter...ctx id:%d", ctx->ctx_index);
+	stop_isp = (struct cam_isp_stop_args    *)stop_args->args;
+
+	/* Set the csid halt command */
+	if (stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY)
+		csid_halt_type = CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY;
+	else
+		csid_halt_type = CAM_TFE_CSID_HALT_IMMEDIATELY;
+
+	/* Note:stop resource will remove the irq mask from the hardware */
+
+	if (!ctx->num_base) {
+		CAM_ERR(CAM_ISP, "number of bases are zero");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "Halting CSIDs");
+
+	/* get master base index first */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
+			master_base_idx = ctx->base[i].idx;
+			break;
+		}
+	}
+
+	/*
+	 * If Context does not have PIX resources and has only RDI resource
+	 * then take the first base index.
+	 */
+	if (i == ctx->num_base)
+		master_base_idx = ctx->base[0].idx;
+	CAM_DBG(CAM_ISP, "Stopping master CSID idx %d", master_base_idx);
+
+	/* Stop the master CSID path first */
+	cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid,
+		master_base_idx, csid_halt_type);
+
+	/* stop rest of the CSID paths  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (ctx->base[i].idx == master_base_idx)
+			continue;
+		CAM_DBG(CAM_ISP, "Stopping CSID idx %d i %d master %d",
+			ctx->base[i].idx, i, master_base_idx);
+
+		cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid,
+			ctx->base[i].idx, csid_halt_type);
+	}
+
+	CAM_DBG(CAM_ISP, "Going to stop TFE Out");
+
+	/* TFE out resources */
+	for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++)
+		cam_tfe_hw_mgr_stop_hw_res(&ctx->res_list_tfe_out[i]);
+
+	CAM_DBG(CAM_ISP, "Going to stop TFE IN");
+
+	/* TFE in resources */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		cam_tfe_hw_mgr_stop_hw_res(hw_mgr_res);
+	}
+
+	cam_tasklet_stop(ctx->common.tasklet_info);
+
+	cam_tfe_mgr_pause_hw(ctx);
+
+	wait_for_completion(&ctx->config_done_complete);
+
+	if (stop_isp->stop_only)
+		goto end;
+
+	if (cam_cdm_stream_off(ctx->cdm_handle))
+		CAM_ERR(CAM_ISP, "CDM stream off failed %d", ctx->cdm_handle);
+
+	if (ctx->is_tpg)
+		cam_tfe_hw_mgr_stop_hw_res(&ctx->res_list_tpg);
+
+	cam_tfe_hw_mgr_deinit_hw(ctx);
+
+	CAM_DBG(CAM_ISP,
+		"Stop success for ctx id:%d rc :%d", ctx->ctx_index, rc);
+
+	mutex_lock(&g_tfe_hw_mgr.ctx_mutex);
+	atomic_dec_return(&g_tfe_hw_mgr.active_ctx_cnt);
+	mutex_unlock(&g_tfe_hw_mgr.ctx_mutex);
+
+end:
+	return rc;
+}
+
+static int cam_tfe_mgr_reset_tfe_hw(struct cam_tfe_hw_mgr *hw_mgr,
+	uint32_t hw_idx)
+{
+	uint32_t i = 0;
+	struct cam_hw_intf             *tfe_hw_intf;
+	uint32_t tfe_reset_type;
+
+	if (!hw_mgr) {
+		CAM_DBG(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+	/* Reset TFE HW*/
+	tfe_reset_type = CAM_TFE_HW_RESET_HW;
+
+	for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
+		if (hw_idx != hw_mgr->tfe_devices[i]->hw_idx)
+			continue;
+		CAM_DBG(CAM_ISP, "TFE (id = %d) reset", hw_idx);
+		tfe_hw_intf = hw_mgr->tfe_devices[i];
+		tfe_hw_intf->hw_ops.reset(tfe_hw_intf->hw_priv,
+			&tfe_reset_type, sizeof(tfe_reset_type));
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "Exit Successfully");
+	return 0;
+}
+
+static int cam_tfe_mgr_restart_hw(void *start_hw_args)
+{
+	int                               rc = -EINVAL;
+	struct cam_hw_start_args         *start_args = start_hw_args;
+	struct cam_tfe_hw_mgr_ctx        *ctx;
+	struct cam_isp_hw_mgr_res        *hw_mgr_res;
+	uint32_t                          i;
+
+	if (!start_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_tfe_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, "START TFE OUT ... in ctx id:%d", ctx->ctx_index);
+
+	cam_tasklet_start(ctx->common.tasklet_info);
+
+	/* start the TFE out devices */
+	for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++) {
+		rc = cam_tfe_hw_mgr_start_hw_res(
+			&ctx->res_list_tfe_out[i], ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start TFE OUT (%d)", i);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START TFE SRC ... in ctx id:%d", ctx->ctx_index);
+
+	/* Start the TFE in devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start TFE IN (%d)",
+				 hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
+	/* Start the TFE CSID HW devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+		rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start TFE CSID (%d)",
+				 hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Exit...(success)");
+	return 0;
+
+err:
+	cam_tfe_mgr_stop_hw_in_overflow(start_hw_args);
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+}
+
+static int cam_tfe_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
+{
+	int                               rc = -EINVAL;
+	struct cam_isp_start_args        *start_isp = start_hw_args;
+	struct cam_hw_stop_args           stop_args;
+	struct cam_isp_stop_args          stop_isp;
+	struct cam_tfe_hw_mgr_ctx        *ctx;
+	struct cam_isp_hw_mgr_res        *hw_mgr_res;
+	struct cam_isp_resource_node     *rsrc_node = NULL;
+	uint32_t                          i, camif_debug;
+	bool                              res_rdi_context_set = false;
+	uint32_t                          primary_rdi_in_res;
+	uint32_t                          primary_rdi_out_res;
+
+	primary_rdi_in_res = CAM_ISP_HW_TFE_IN_MAX;
+	primary_rdi_out_res = CAM_ISP_TFE_OUT_RES_MAX;
+
+	if (!hw_mgr_priv || !start_isp) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_tfe_hw_mgr_ctx *)
+		start_isp->hw_config.ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	if ((!ctx->init_done) && start_isp->start_only) {
+		CAM_ERR(CAM_ISP, "Invalid args init_done %d start_only %d",
+			ctx->init_done, start_isp->start_only);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "Enter... ctx id:%d",
+		ctx->ctx_index);
+
+	/* update Bandwidth should be done at the hw layer */
+
+	cam_tasklet_start(ctx->common.tasklet_info);
+
+	if (ctx->init_done && start_isp->start_only)
+		goto start_only;
+
+	/* set current csid debug information to CSID HW */
+	for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
+		if (g_tfe_hw_mgr.csid_devices[i])
+			rc = g_tfe_hw_mgr.csid_devices[i]->hw_ops.process_cmd(
+				g_tfe_hw_mgr.csid_devices[i]->hw_priv,
+				CAM_TFE_CSID_SET_CSID_DEBUG,
+				&g_tfe_hw_mgr.debug_cfg.csid_debug,
+				sizeof(g_tfe_hw_mgr.debug_cfg.csid_debug));
+	}
+
+	camif_debug = g_tfe_hw_mgr.debug_cfg.camif_debug;
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			rsrc_node = hw_mgr_res->hw_res[i];
+			if (rsrc_node->process_cmd && (rsrc_node->res_id ==
+				CAM_ISP_HW_TFE_IN_CAMIF)) {
+				rc = hw_mgr_res->hw_res[i]->process_cmd(
+					hw_mgr_res->hw_res[i],
+					CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
+					&camif_debug,
+					sizeof(camif_debug));
+			}
+		}
+	}
+
+	rc = cam_tfe_hw_mgr_init_hw(ctx);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Init failed");
+		goto tasklet_stop;
+	}
+
+	ctx->init_done = true;
+
+	mutex_lock(&g_tfe_hw_mgr.ctx_mutex);
+	atomic_fetch_inc(&g_tfe_hw_mgr.active_ctx_cnt);
+	mutex_unlock(&g_tfe_hw_mgr.ctx_mutex);
+
+	CAM_DBG(CAM_ISP, "start cdm interface");
+	rc = cam_cdm_stream_on(ctx->cdm_handle);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Can not start cdm (%d)",
+			 ctx->cdm_handle);
+		goto deinit_hw;
+	}
+
+start_only:
+	/* Apply initial configuration */
+	CAM_DBG(CAM_ISP, "Config HW");
+	rc = cam_tfe_mgr_config_hw(hw_mgr_priv, &start_isp->hw_config);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Config HW failed");
+		goto cdm_streamoff;
+	}
+
+	CAM_DBG(CAM_ISP, "START TFE OUT ... in ctx id:%d",
+		ctx->ctx_index);
+	/* start the TFE out devices */
+	for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++) {
+		hw_mgr_res = &ctx->res_list_tfe_out[i];
+		switch (hw_mgr_res->res_id) {
+		case CAM_ISP_TFE_OUT_RES_RDI_0:
+		case CAM_ISP_TFE_OUT_RES_RDI_1:
+		case CAM_ISP_TFE_OUT_RES_RDI_2:
+			if (!res_rdi_context_set && ctx->is_rdi_only_context) {
+				hw_mgr_res->hw_res[0]->rdi_only_ctx =
+					ctx->is_rdi_only_context;
+				res_rdi_context_set = true;
+				primary_rdi_out_res = hw_mgr_res->res_id;
+			}
+		}
+
+		rc = cam_tfe_hw_mgr_start_hw_res(
+			&ctx->res_list_tfe_out[i], ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start TFE OUT (%d)",
+				 i);
+			goto err;
+		}
+	}
+
+	if (primary_rdi_out_res < CAM_ISP_TFE_OUT_RES_MAX)
+		primary_rdi_in_res =
+			cam_tfe_hw_mgr_convert_rdi_out_res_id_to_in_res(
+			primary_rdi_out_res);
+
+	CAM_DBG(CAM_ISP, "START TFE IN ... in ctx id:%d",
+		ctx->ctx_index);
+	/* Start the TFE in resources devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		/*
+		 * if rdi only context has two rdi resources then only one irq
+		 * subscription should be sufficient
+		 */
+		if (primary_rdi_in_res == hw_mgr_res->res_id)
+			hw_mgr_res->hw_res[0]->rdi_only_ctx =
+				ctx->is_rdi_only_context;
+
+		rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start TFE in resource (%d)",
+				 hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d",
+		ctx->ctx_index);
+	/* Start the TFE CSID HW devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+		rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start TFE CSID (%d)",
+				 hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	if (ctx->is_tpg) {
+		CAM_DBG(CAM_ISP, "START TPG HW ... in ctx id:%d",
+			ctx->ctx_index);
+		rc = cam_tfe_hw_mgr_start_hw_res(&ctx->res_list_tpg, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start TFE TPG (%d)",
+				ctx->res_list_tpg.res_id);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	stop_isp.stop_only = false;
+	stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
+	stop_args.ctxt_to_hw_map = start_isp->hw_config.ctxt_to_hw_map;
+	stop_args.args = (void *)(&stop_isp);
+
+	cam_tfe_mgr_stop_hw(hw_mgr_priv, &stop_args);
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+
+cdm_streamoff:
+	cam_cdm_stream_off(ctx->cdm_handle);
+
+deinit_hw:
+	cam_tfe_hw_mgr_deinit_hw(ctx);
+
+tasklet_stop:
+	cam_tasklet_stop(ctx->common.tasklet_info);
+
+	return rc;
+}
+
+static int cam_tfe_mgr_read(void *hw_mgr_priv, void *read_args)
+{
+	return -EPERM;
+}
+
+static int cam_tfe_mgr_write(void *hw_mgr_priv, void *write_args)
+{
+	return -EPERM;
+}
+
+static int cam_tfe_mgr_reset(void *hw_mgr_priv, void *hw_reset_args)
+{
+	struct cam_tfe_hw_mgr            *hw_mgr = hw_mgr_priv;
+	struct cam_hw_reset_args         *reset_args = hw_reset_args;
+	struct cam_tfe_hw_mgr_ctx        *ctx;
+	struct cam_isp_hw_mgr_res        *hw_mgr_res;
+	int                               rc = 0, i = 0;
+
+	if (!hw_mgr_priv || !hw_reset_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_tfe_hw_mgr_ctx *)reset_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, "Reset CSID and TFE");
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+		rc = cam_tfe_hw_mgr_reset_csid_res(hw_mgr_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Failed to reset CSID:%d rc: %d",
+				hw_mgr_res->res_id, rc);
+			goto end;
+		}
+	}
+
+	for (i = 0; i < ctx->num_base; i++) {
+		rc = cam_tfe_mgr_reset_tfe_hw(hw_mgr, ctx->base[i].idx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Failed to reset TFE:%d rc: %d",
+				ctx->base[i].idx, rc);
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int cam_tfe_mgr_release_hw(void *hw_mgr_priv,
+					void *release_hw_args)
+{
+	int                               rc           = 0;
+	struct cam_hw_release_args       *release_args = release_hw_args;
+	struct cam_tfe_hw_mgr            *hw_mgr       = hw_mgr_priv;
+	struct cam_tfe_hw_mgr_ctx        *ctx;
+	uint32_t                          i;
+
+	if (!hw_mgr_priv || !release_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_tfe_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
+		ctx->ctx_index);
+
+	if (ctx->init_done)
+		cam_tfe_hw_mgr_deinit_hw(ctx);
+
+	/* we should called the stop hw before this already */
+	cam_tfe_hw_mgr_release_hw_for_ctx(ctx);
+
+	/* reset base info */
+	ctx->num_base = 0;
+	memset(ctx->base, 0, sizeof(ctx->base));
+
+	/* release cdm handle */
+	cam_cdm_release(ctx->cdm_handle);
+
+	/* clean context */
+	list_del_init(&ctx->list);
+	ctx->ctx_in_use = 0;
+	ctx->is_rdi_only_context = 0;
+	ctx->cdm_handle = 0;
+	ctx->cdm_ops = NULL;
+	ctx->init_done = false;
+	ctx->is_dual = false;
+	ctx->is_tpg  = false;
+	ctx->res_list_tpg.res_type = CAM_ISP_RESOURCE_MAX;
+	atomic_set(&ctx->overflow_pending, 0);
+	for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
+		ctx->sof_cnt[i] = 0;
+		ctx->eof_cnt[i] = 0;
+		ctx->epoch_cnt[i] = 0;
+	}
+	CAM_DBG(CAM_ISP, "Exit...ctx id:%d",
+		ctx->ctx_index);
+	cam_tfe_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx);
+	return rc;
+}
+
+
+static int cam_isp_tfe_blob_hfr_update(
+	uint32_t                                  blob_type,
+	struct cam_isp_generic_blob_info         *blob_info,
+	struct cam_isp_tfe_resource_hfr_config   *hfr_config,
+	struct cam_hw_prepare_update_args        *prepare)
+{
+	struct cam_isp_tfe_port_hfr_config    *port_hfr_config;
+	struct cam_kmd_buf_info               *kmd_buf_info;
+	struct cam_tfe_hw_mgr_ctx             *ctx = NULL;
+	struct cam_isp_hw_mgr_res             *hw_mgr_res;
+	uint32_t                               res_id_out, i;
+	uint32_t                               total_used_bytes = 0;
+	uint32_t                               kmd_buf_remain_size;
+	uint32_t                              *cmd_buf_addr;
+	uint32_t                               bytes_used = 0;
+	int                                    num_ent, rc = 0;
+
+	ctx = prepare->ctxt_to_hw_map;
+	CAM_DBG(CAM_ISP, "num_ports= %d", hfr_config->num_ports);
+
+	/* Max one hw entries required for hfr config update */
+	if (prepare->num_hw_update_entries + 1 >=
+			prepare->max_hw_update_entries) {
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+			prepare->num_hw_update_entries,
+			prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	kmd_buf_info = blob_info->kmd_buf_info;
+	for (i = 0; i < hfr_config->num_ports; i++) {
+		port_hfr_config = &hfr_config->port_hfr_config[i];
+		res_id_out = port_hfr_config->resource_type & 0xFF;
+
+		CAM_DBG(CAM_ISP, "hfr config idx %d, type=%d", i,
+			res_id_out);
+
+		if (res_id_out >= CAM_TFE_HW_OUT_RES_MAX) {
+			CAM_ERR(CAM_ISP, "invalid out restype:%x",
+				port_hfr_config->resource_type);
+			return -EINVAL;
+		}
+
+		if ((kmd_buf_info->used_bytes
+			+ total_used_bytes) < kmd_buf_info->size) {
+			kmd_buf_remain_size = kmd_buf_info->size -
+			(kmd_buf_info->used_bytes +
+			total_used_bytes);
+		} else {
+			CAM_ERR(CAM_ISP,
+			"no free kmd memory for base %d",
+			blob_info->base_info->idx);
+			rc = -ENOMEM;
+			return rc;
+		}
+
+		cmd_buf_addr = kmd_buf_info->cpu_addr +
+			kmd_buf_info->used_bytes/4 +
+			total_used_bytes/4;
+		hw_mgr_res = &ctx->res_list_tfe_out[res_id_out];
+
+		rc = cam_isp_add_cmd_buf_update(
+			hw_mgr_res, blob_type, CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+			blob_info->base_info->idx,
+			(void *)cmd_buf_addr,
+			kmd_buf_remain_size,
+			(void *)port_hfr_config,
+			&bytes_used);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP,
+				"Failed cmd_update, base_idx=%d, rc=%d",
+				blob_info->base_info->idx, bytes_used);
+			return rc;
+		}
+
+		total_used_bytes += bytes_used;
+	}
+
+	if (total_used_bytes) {
+		/* Update the HW entries */
+		num_ent = prepare->num_hw_update_entries;
+		prepare->hw_update_entries[num_ent].handle =
+			kmd_buf_info->handle;
+		prepare->hw_update_entries[num_ent].len = total_used_bytes;
+		prepare->hw_update_entries[num_ent].offset =
+			kmd_buf_info->offset;
+		num_ent++;
+
+		kmd_buf_info->used_bytes += total_used_bytes;
+		kmd_buf_info->offset     += total_used_bytes;
+		prepare->num_hw_update_entries = num_ent;
+	}
+
+	return rc;
+}
+
+static int cam_isp_tfe_blob_csid_clock_update(
+	uint32_t                                   blob_type,
+	struct cam_isp_generic_blob_info          *blob_info,
+	struct cam_isp_tfe_csid_clock_config      *clock_config,
+	struct cam_hw_prepare_update_args         *prepare)
+{
+	struct cam_tfe_hw_mgr_ctx             *ctx = NULL;
+	struct cam_isp_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	struct cam_tfe_csid_clock_update_args  csid_clock_upd_args;
+	struct cam_top_tpg_clock_update_args   tpg_clock_upd_args;
+	uint64_t                               clk_rate = 0;
+	int                                    rc = -EINVAL;
+	uint32_t                               i;
+
+	ctx = prepare->ctxt_to_hw_map;
+
+	CAM_DBG(CAM_ISP, "csid clk=%llu", clock_config->csid_clock);
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			clk_rate = 0;
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+			clk_rate = clock_config->csid_clock;
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				csid_clock_upd_args.clk_rate = clk_rate;
+				CAM_DBG(CAM_ISP, "i= %d csid clk=%llu",
+				i, csid_clock_upd_args.clk_rate);
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE,
+					&csid_clock_upd_args,
+					sizeof(
+					struct cam_tfe_csid_clock_update_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "Clock Update failed");
+			} else
+				CAM_ERR(CAM_ISP, "NULL hw_intf!");
+		}
+	}
+
+	if (ctx->res_list_tpg.res_type == CAM_ISP_RESOURCE_TPG) {
+		tpg_clock_upd_args.clk_rate = clock_config->phy_clock;
+		hw_intf = ctx->res_list_tpg.hw_res[0]->hw_intf;
+		if (hw_intf && hw_intf->hw_ops.process_cmd) {
+			CAM_DBG(CAM_ISP, "i= %d phy clk=%llu",
+				tpg_clock_upd_args.clk_rate);
+			rc = hw_intf->hw_ops.process_cmd(
+				hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_TPG_PHY_CLOCK_UPDATE,
+				&tpg_clock_upd_args,
+				sizeof(struct cam_top_tpg_clock_update_args));
+			if (rc)
+				CAM_ERR(CAM_ISP, "Clock Update failed");
+		} else
+			CAM_ERR(CAM_ISP, "NULL hw_intf!");
+	}
+
+	return rc;
+}
+
+static int cam_isp_tfe_blob_clock_update(
+	uint32_t                               blob_type,
+	struct cam_isp_generic_blob_info      *blob_info,
+	struct cam_isp_tfe_clock_config       *clock_config,
+	struct cam_hw_prepare_update_args     *prepare)
+{
+	struct cam_tfe_hw_mgr_ctx             *ctx = NULL;
+	struct cam_isp_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	struct cam_tfe_clock_update_args       clock_upd_args;
+	uint64_t                               clk_rate = 0;
+	int                                    rc = -EINVAL;
+	uint32_t                               i;
+	uint32_t                               j;
+	bool                                   camif_l_clk_updated = false;
+	bool                                   camif_r_clk_updated = false;
+
+	ctx = prepare->ctxt_to_hw_map;
+
+	CAM_DBG(CAM_PERF,
+		"usage=%u left_clk= %lu right_clk=%lu",
+		clock_config->usage_type,
+		clock_config->left_pix_hz,
+		clock_config->right_pix_hz);
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			clk_rate = 0;
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			if (hw_mgr_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) {
+				if (i == CAM_ISP_HW_SPLIT_LEFT) {
+					if (camif_l_clk_updated)
+						continue;
+
+					clk_rate =
+						clock_config->left_pix_hz;
+
+					camif_l_clk_updated = true;
+				} else {
+					if (camif_r_clk_updated)
+						continue;
+
+					clk_rate =
+						clock_config->right_pix_hz;
+
+					camif_r_clk_updated = true;
+				}
+			} else if ((hw_mgr_res->res_id >=
+				CAM_ISP_HW_TFE_IN_RDI0) && (hw_mgr_res->res_id
+				<= CAM_ISP_HW_TFE_IN_RDI2)) {
+				for (j = 0; j < clock_config->num_rdi; j++)
+					clk_rate = max(clock_config->rdi_hz[j],
+						clk_rate);
+			} else {
+				CAM_ERR(CAM_ISP, "Invalid res_id %u",
+					hw_mgr_res->res_id);
+				rc = -EINVAL;
+				return rc;
+			}
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				clock_upd_args.node_res =
+					hw_mgr_res->hw_res[i];
+				CAM_DBG(CAM_ISP,
+				"res_id=%u i= %d clk=%llu",
+				hw_mgr_res->res_id, i, clk_rate);
+
+				clock_upd_args.clk_rate = clk_rate;
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_CLOCK_UPDATE,
+					&clock_upd_args,
+					sizeof(
+					struct cam_tfe_clock_update_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "Clock Update failed");
+			} else
+				CAM_WARN(CAM_ISP, "NULL hw_intf!");
+		}
+	}
+
+	return rc;
+}
+
+static int cam_isp_tfe_packet_generic_blob_handler(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+	int rc = 0;
+	struct cam_isp_generic_blob_info  *blob_info = user_data;
+	struct cam_hw_prepare_update_args *prepare = NULL;
+
+	if (!blob_data || (blob_size == 0) || !blob_info) {
+		CAM_ERR(CAM_ISP, "Invalid args data %pK size %d info %pK",
+			blob_data, blob_size, blob_info);
+		return -EINVAL;
+	}
+
+	prepare = blob_info->prepare;
+	if (!prepare) {
+		CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d",
+			blob_type);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "BLOB Type: %d", blob_type);
+	switch (blob_type) {
+	case CAM_ISP_TFE_GENERIC_BLOB_TYPE_HFR_CONFIG: {
+		struct cam_isp_tfe_resource_hfr_config    *hfr_config =
+			(struct cam_isp_tfe_resource_hfr_config *)blob_data;
+
+		if (blob_size <
+			sizeof(struct cam_isp_tfe_resource_hfr_config)) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+			return -EINVAL;
+		}
+
+		if (hfr_config->num_ports > CAM_ISP_TFE_OUT_RES_MAX) {
+			CAM_ERR(CAM_ISP, "Invalid num_ports %u in hfr config",
+				hfr_config->num_ports);
+			return -EINVAL;
+		}
+
+		/* Check for integer overflow */
+		if (hfr_config->num_ports > 1) {
+			if (sizeof(struct cam_isp_tfe_resource_hfr_config) >
+				((UINT_MAX -
+				sizeof(struct cam_isp_tfe_resource_hfr_config))
+				/ (hfr_config->num_ports - 1))) {
+				CAM_ERR(CAM_ISP,
+					"Max size exceeded in hfr config num_ports:%u size per port:%lu",
+					hfr_config->num_ports,
+					sizeof(struct
+					cam_isp_tfe_resource_hfr_config));
+				return -EINVAL;
+			}
+		}
+
+		if ((hfr_config->num_ports != 0) && (blob_size <
+			(sizeof(struct cam_isp_tfe_resource_hfr_config) +
+			(hfr_config->num_ports - 1) *
+			sizeof(struct cam_isp_tfe_resource_hfr_config)))) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
+				blob_size,
+				sizeof(struct cam_isp_tfe_resource_hfr_config) +
+				(hfr_config->num_ports - 1) *
+				sizeof(struct cam_isp_tfe_resource_hfr_config));
+			return -EINVAL;
+		}
+
+		rc = cam_isp_tfe_blob_hfr_update(blob_type, blob_info,
+			hfr_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "HFR Update Failed");
+	}
+		break;
+	case CAM_ISP_TFE_GENERIC_BLOB_TYPE_CLOCK_CONFIG: {
+		struct cam_isp_tfe_clock_config    *clock_config =
+			(struct cam_isp_tfe_clock_config *)blob_data;
+
+		if (blob_size < sizeof(struct cam_isp_tfe_clock_config)) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+			return -EINVAL;
+		}
+
+		if (clock_config->num_rdi > CAM_TFE_RDI_NUM_MAX) {
+			CAM_ERR(CAM_ISP, "Invalid num_rdi %u in clock config",
+				clock_config->num_rdi);
+			return -EINVAL;
+		}
+		/* Check integer overflow */
+		if (clock_config->num_rdi > 1) {
+			if (sizeof(uint64_t) > ((UINT_MAX-
+				sizeof(struct cam_isp_tfe_clock_config))/
+				(clock_config->num_rdi - 1))) {
+				CAM_ERR(CAM_ISP,
+					"Max size exceeded in clock config num_rdi:%u size per port:%lu",
+					clock_config->num_rdi,
+					sizeof(uint64_t));
+				return -EINVAL;
+			}
+		}
+
+		if ((clock_config->num_rdi != 0) && (blob_size <
+			(sizeof(struct cam_isp_tfe_clock_config) +
+			sizeof(uint64_t) * (clock_config->num_rdi - 1)))) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
+				blob_size,
+				sizeof(uint32_t) * 2 + sizeof(uint64_t) *
+				(clock_config->num_rdi + 2));
+			return -EINVAL;
+		}
+
+		rc = cam_isp_tfe_blob_clock_update(blob_type, blob_info,
+			clock_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Clock Update Failed");
+	}
+		break;
+	case CAM_ISP_TFE_GENERIC_BLOB_TYPE_BW_CONFIG_V2: {
+		size_t bw_config_size = 0;
+		struct cam_isp_tfe_bw_config_v2    *bw_config =
+			(struct cam_isp_tfe_bw_config_v2 *)blob_data;
+		struct cam_isp_prepare_hw_update_data   *prepare_hw_data;
+
+		if (blob_size < sizeof(struct cam_isp_tfe_bw_config_v2)) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+			return -EINVAL;
+		}
+
+		if (bw_config->num_paths > CAM_ISP_MAX_PER_PATH_VOTES) {
+			CAM_ERR(CAM_ISP, "Invalid num paths %d",
+				bw_config->num_paths);
+			return -EINVAL;
+		}
+
+		/* Check for integer overflow */
+		if (bw_config->num_paths > 1) {
+			if (sizeof(struct cam_axi_per_path_bw_vote) >
+				((UINT_MAX -
+				sizeof(struct cam_isp_tfe_bw_config_v2)) /
+				(bw_config->num_paths - 1))) {
+				CAM_ERR(CAM_ISP,
+					"Size exceeds limit paths:%u size per path:%lu",
+					bw_config->num_paths - 1,
+					sizeof(
+					struct cam_axi_per_path_bw_vote));
+				return -EINVAL;
+			}
+		}
+
+		if ((bw_config->num_paths != 0) && (blob_size <
+			(sizeof(struct cam_isp_tfe_bw_config_v2) +
+			((bw_config->num_paths - 1) *
+			sizeof(struct cam_axi_per_path_bw_vote))))) {
+			CAM_ERR(CAM_ISP,
+				"Invalid blob size: %u, num_paths: %u, bw_config size: %lu, per_path_vote size: %lu",
+				blob_size, bw_config->num_paths,
+				sizeof(struct cam_isp_tfe_bw_config_v2),
+				sizeof(struct cam_axi_per_path_bw_vote));
+			return -EINVAL;
+		}
+
+		if (!prepare || !prepare->priv ||
+			(bw_config->usage_type >= CAM_TFE_HW_NUM_MAX)) {
+			CAM_ERR(CAM_ISP, "Invalid inputs");
+			return -EINVAL;
+		}
+
+		prepare_hw_data = (struct cam_isp_prepare_hw_update_data  *)
+			prepare->priv;
+
+		memset(&prepare_hw_data->bw_config_v2[bw_config->usage_type],
+			0, sizeof(
+			prepare_hw_data->bw_config_v2[bw_config->usage_type]));
+		bw_config_size = sizeof(struct cam_isp_bw_config_internal_v2) +
+			((bw_config->num_paths - 1) *
+			sizeof(struct cam_axi_per_path_bw_vote));
+		memcpy(&prepare_hw_data->bw_config_v2[bw_config->usage_type],
+			bw_config, bw_config_size);
+
+		prepare_hw_data->bw_config_version = CAM_ISP_BW_CONFIG_V2;
+		prepare_hw_data->bw_config_valid[bw_config->usage_type] = true;
+	}
+
+		break;
+	case CAM_ISP_TFE_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: {
+		struct cam_isp_tfe_csid_clock_config    *clock_config =
+			(struct cam_isp_tfe_csid_clock_config *)blob_data;
+
+		if (blob_size < sizeof(struct cam_isp_tfe_csid_clock_config)) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+				blob_size,
+				sizeof(struct cam_isp_tfe_csid_clock_config));
+			return -EINVAL;
+		}
+		rc = cam_isp_tfe_blob_csid_clock_update(blob_type, blob_info,
+			clock_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Clock Update Failed");
+	}
+		break;
+	default:
+		CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_tfe_update_dual_config(
+	struct cam_hw_prepare_update_args  *prepare,
+	struct cam_cmd_buf_desc            *cmd_desc,
+	uint32_t                            split_id,
+	uint32_t                            base_idx,
+	struct cam_isp_hw_mgr_res          *res_list_isp_out,
+	uint32_t                            size_isp_out)
+{
+	int rc = -EINVAL;
+	struct cam_isp_tfe_dual_config             *dual_config;
+	struct cam_isp_hw_mgr_res                  *hw_mgr_res;
+	struct cam_isp_resource_node               *res;
+	struct cam_tfe_dual_update_args             dual_isp_update_args;
+	uint32_t                                    outport_id;
+	size_t                                      len = 0, remain_len = 0;
+	uint32_t                                   *cpu_addr;
+	uint32_t                                    i, j, stp_index;
+
+	CAM_DBG(CAM_ISP, "cmd des size %d, length: %d",
+		cmd_desc->size, cmd_desc->length);
+
+	rc = cam_packet_util_get_cmd_mem_addr(
+		cmd_desc->mem_handle, &cpu_addr, &len);
+	if (rc) {
+		CAM_DBG(CAM_ISP, "unable to get cmd mem addr handle:0x%x",
+			cmd_desc->mem_handle);
+		return rc;
+	}
+
+	if ((len < sizeof(struct cam_isp_tfe_dual_config)) ||
+		(cmd_desc->offset >=
+			(len - sizeof(struct cam_isp_tfe_dual_config)))) {
+		CAM_ERR(CAM_ISP, "not enough buffer provided");
+		return -EINVAL;
+	}
+
+	remain_len = len - cmd_desc->offset;
+	cpu_addr += (cmd_desc->offset / 4);
+	dual_config = (struct cam_isp_tfe_dual_config *)cpu_addr;
+
+	if ((dual_config->num_ports *
+		sizeof(struct cam_isp_tfe_dual_stripe_config)) >
+		(remain_len -
+			offsetof(struct cam_isp_tfe_dual_config, stripes))) {
+		CAM_ERR(CAM_ISP, "not enough buffer for all the dual configs");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "num_ports:%d", dual_config->num_ports);
+	if (dual_config->num_ports >= size_isp_out) {
+		CAM_ERR(CAM_UTIL,
+			"inval num ports %d max num tfe ports:%d",
+			dual_config->num_ports, size_isp_out);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	for (i = 0; i < dual_config->num_ports; i++) {
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			stp_index = (i * CAM_PACKET_MAX_PLANES) +
+				(j * (CAM_PACKET_MAX_PLANES *
+				dual_config->num_ports));
+
+			if (!dual_config->stripes[stp_index].port_id)
+				continue;
+
+			outport_id = dual_config->stripes[stp_index].port_id;
+			if (outport_id >= size_isp_out) {
+				CAM_ERR(CAM_UTIL,
+					"inval outport id:%d i:%d j:%d num ports:%d ",
+					outport_id, i, j,
+					dual_config->num_ports);
+					rc = -EINVAL;
+					goto end;
+			}
+
+			hw_mgr_res = &res_list_isp_out[outport_id];
+			if (!hw_mgr_res->hw_res[j])
+				continue;
+
+			if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
+				continue;
+
+			res = hw_mgr_res->hw_res[j];
+
+			if (res->res_id < CAM_ISP_TFE_OUT_RES_BASE ||
+				res->res_id >= CAM_ISP_TFE_OUT_RES_MAX) {
+				CAM_DBG(CAM_ISP, "res id :%d", res->res_id);
+				continue;
+			}
+
+			dual_isp_update_args.split_id = j;
+			dual_isp_update_args.res      = res;
+			dual_isp_update_args.stripe_config =
+				&dual_config->stripes[stp_index];
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_STRIPE_UPDATE,
+				&dual_isp_update_args,
+				sizeof(struct cam_tfe_dual_update_args));
+			if (rc)
+				goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+int cam_tfe_add_command_buffers(
+	struct cam_hw_prepare_update_args  *prepare,
+	struct cam_kmd_buf_info            *kmd_buf_info,
+	struct cam_isp_ctx_base_info       *base_info,
+	cam_packet_generic_blob_handler     blob_handler_cb,
+	struct cam_isp_hw_mgr_res          *res_list_isp_out,
+	uint32_t                            size_isp_out)
+{
+	int rc = 0;
+	uint32_t                           cmd_meta_data, num_ent, i;
+	uint32_t                           base_idx;
+	enum cam_isp_hw_split_id           split_id;
+	struct cam_cmd_buf_desc           *cmd_desc = NULL;
+	struct cam_hw_update_entry        *hw_entry;
+
+	hw_entry = prepare->hw_update_entries;
+	split_id = base_info->split_id;
+	base_idx = base_info->idx;
+
+	/*
+	 * set the cmd_desc to point the first command descriptor in the
+	 * packet
+	 */
+	cmd_desc = (struct cam_cmd_buf_desc *)
+			((uint8_t *)&prepare->packet->payload +
+			prepare->packet->cmd_buf_offset);
+
+	CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d",
+		split_id, prepare->packet->num_cmd_buf);
+
+	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+		num_ent = prepare->num_hw_update_entries;
+		if (!cmd_desc[i].length)
+			continue;
+
+		/* One hw entry space required for left or right or common */
+		if (num_ent + 1 >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc)
+			return rc;
+
+		cmd_meta_data = cmd_desc[i].meta_data;
+
+		CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d",
+			cmd_meta_data, split_id);
+
+		switch (cmd_meta_data) {
+		case CAM_ISP_TFE_PACKET_META_BASE:
+		case CAM_ISP_TFE_PACKET_META_LEFT:
+			if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
+				hw_entry[num_ent].len = cmd_desc[i].length;
+				hw_entry[num_ent].handle =
+					cmd_desc[i].mem_handle;
+				hw_entry[num_ent].offset = cmd_desc[i].offset;
+				hw_entry[num_ent].flags = CAM_ISP_IQ_BL;
+				CAM_DBG(CAM_ISP,
+					"Meta_Left num_ent=%d handle=0x%x, len=%u, offset=%u",
+					num_ent,
+					hw_entry[num_ent].handle,
+					hw_entry[num_ent].len,
+					hw_entry[num_ent].offset);
+
+				num_ent++;
+			}
+			break;
+		case CAM_ISP_TFE_PACKET_META_RIGHT:
+			if (split_id == CAM_ISP_HW_SPLIT_RIGHT) {
+				hw_entry[num_ent].len = cmd_desc[i].length;
+				hw_entry[num_ent].handle =
+					cmd_desc[i].mem_handle;
+				hw_entry[num_ent].offset = cmd_desc[i].offset;
+				hw_entry[num_ent].flags = CAM_ISP_IQ_BL;
+				CAM_DBG(CAM_ISP,
+					"Meta_Right num_ent=%d handle=0x%x, len=%u, offset=%u",
+					num_ent,
+					hw_entry[num_ent].handle,
+					hw_entry[num_ent].len,
+					hw_entry[num_ent].offset);
+
+				num_ent++;
+			}
+			break;
+		case CAM_ISP_TFE_PACKET_META_COMMON:
+			hw_entry[num_ent].len = cmd_desc[i].length;
+			hw_entry[num_ent].handle =
+				cmd_desc[i].mem_handle;
+			hw_entry[num_ent].offset = cmd_desc[i].offset;
+			hw_entry[num_ent].flags = CAM_ISP_IQ_BL;
+			CAM_DBG(CAM_ISP,
+				"Meta_Common num_ent=%d handle=0x%x, len=%u, offset=%u",
+				num_ent,
+				hw_entry[num_ent].handle,
+				hw_entry[num_ent].len,
+				hw_entry[num_ent].offset);
+			if (cmd_meta_data == CAM_ISP_PACKET_META_DMI_COMMON)
+				hw_entry[num_ent].flags = 0x1;
+
+			num_ent++;
+			break;
+		case CAM_ISP_TFE_PACKET_META_DUAL_CONFIG:
+
+			rc = cam_tfe_update_dual_config(prepare,
+				&cmd_desc[i], split_id, base_idx,
+				res_list_isp_out, size_isp_out);
+
+			if (rc)
+				return rc;
+			break;
+		case CAM_ISP_TFE_PACKET_META_GENERIC_BLOB_COMMON: {
+			struct cam_isp_generic_blob_info   blob_info;
+
+			prepare->num_hw_update_entries = num_ent;
+			blob_info.prepare = prepare;
+			blob_info.base_info = base_info;
+			blob_info.kmd_buf_info = kmd_buf_info;
+
+			rc = cam_packet_util_process_generic_cmd_buffer(
+				&cmd_desc[i],
+				blob_handler_cb,
+				&blob_info);
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Failed in processing blobs %d", rc);
+				return rc;
+			}
+			hw_entry[num_ent].flags = CAM_ISP_IQ_BL;
+			num_ent = prepare->num_hw_update_entries;
+		}
+			break;
+		case CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_FLUSH:
+		case CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR:
+			if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
+				if (prepare->num_reg_dump_buf >=
+					CAM_REG_DUMP_MAX_BUF_ENTRIES) {
+					CAM_ERR(CAM_ISP,
+					"Descriptor count out of bounds: %d",
+					prepare->num_reg_dump_buf);
+					return -EINVAL;
+				}
+				prepare->reg_dump_buf_desc[
+					prepare->num_reg_dump_buf] =
+					cmd_desc[i];
+				prepare->num_reg_dump_buf++;
+				CAM_DBG(CAM_ISP,
+					"Added command buffer: %d desc_count: %d",
+					cmd_desc[i].meta_data,
+					prepare->num_reg_dump_buf);
+			}
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "invalid cdm command meta data %d",
+				cmd_meta_data);
+			return -EINVAL;
+		}
+		prepare->num_hw_update_entries = num_ent;
+	}
+
+	return rc;
+}
+
+static int cam_tfe_mgr_prepare_hw_update(void *hw_mgr_priv,
+	void *prepare_hw_update_args)
+{
+	int rc = 0;
+	struct cam_hw_prepare_update_args *prepare =
+		(struct cam_hw_prepare_update_args *) prepare_hw_update_args;
+	struct cam_tfe_hw_mgr_ctx               *ctx;
+	struct cam_tfe_hw_mgr                   *hw_mgr;
+	struct cam_kmd_buf_info                  kmd_buf;
+	uint32_t                                 i;
+	bool                                     fill_fence = true;
+	struct cam_isp_prepare_hw_update_data   *prepare_hw_data;
+
+	if (!hw_mgr_priv || !prepare_hw_update_args) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_REQ, "Enter for req_id %lld",
+		prepare->packet->header.request_id);
+
+	prepare_hw_data = (struct cam_isp_prepare_hw_update_data  *)
+		prepare->priv;
+
+	ctx = (struct cam_tfe_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
+	hw_mgr = (struct cam_tfe_hw_mgr *)hw_mgr_priv;
+
+	rc = cam_packet_util_validate_packet(prepare->packet,
+		prepare->remain_len);
+	if (rc)
+		return rc;
+
+	/* Pre parse the packet*/
+	rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
+	if (rc)
+		return rc;
+
+	rc = cam_packet_util_process_patches(prepare->packet,
+		hw_mgr->mgr_common.cmd_iommu_hdl,
+		hw_mgr->mgr_common.cmd_iommu_hdl_secure);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Patch ISP packet failed.");
+		return rc;
+	}
+
+	prepare->num_hw_update_entries = 0;
+	prepare->num_in_map_entries = 0;
+	prepare->num_out_map_entries = 0;
+	prepare->num_reg_dump_buf = 0;
+
+	memset(&prepare_hw_data->bw_config[0], 0x0,
+		sizeof(prepare_hw_data->bw_config[0]) *
+		CAM_TFE_HW_NUM_MAX);
+	memset(&prepare_hw_data->bw_config_valid[0], 0x0,
+		sizeof(prepare_hw_data->bw_config_valid[0]) *
+		CAM_TFE_HW_NUM_MAX);
+
+	for (i = 0; i < ctx->num_base; i++) {
+		CAM_DBG(CAM_ISP, "process cmd buffer for device %d", i);
+
+		CAM_DBG(CAM_ISP,
+			"change base i=%d, idx=%d",
+			i, ctx->base[i].idx);
+
+		/* Add change base */
+		rc = cam_isp_add_change_base(prepare, &ctx->res_list_tfe_in,
+			ctx->base[i].idx, &kmd_buf);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Failed in change base i=%d, idx=%d, rc=%d",
+				i, ctx->base[i].idx, rc);
+			goto end;
+		}
+
+
+		/* get command buffers */
+		if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_MAX) {
+			rc = cam_tfe_add_command_buffers(prepare, &kmd_buf,
+				&ctx->base[i],
+				cam_isp_tfe_packet_generic_blob_handler,
+				ctx->res_list_tfe_out, CAM_TFE_HW_OUT_RES_MAX);
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
+					i, ctx->base[i].split_id, rc);
+				goto end;
+			}
+		}
+
+		/* get IO buffers */
+		rc = cam_isp_add_io_buffers(hw_mgr->mgr_common.img_iommu_hdl,
+			hw_mgr->mgr_common.img_iommu_hdl_secure,
+			prepare, ctx->base[i].idx,
+			&kmd_buf, ctx->res_list_tfe_out,
+			NULL,
+			CAM_TFE_HW_OUT_RES_MAX, fill_fence);
+
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Failed in io buffers, i=%d, rc=%d",
+				i, rc);
+			goto end;
+		}
+
+		/* fence map table entries need to fill only once in the loop */
+		if (fill_fence)
+			fill_fence = false;
+	}
+
+	ctx->num_reg_dump_buf = prepare->num_reg_dump_buf;
+	if ((ctx->num_reg_dump_buf) && (ctx->num_reg_dump_buf <
+		CAM_REG_DUMP_MAX_BUF_ENTRIES)) {
+		memcpy(ctx->reg_dump_buf_desc,
+			prepare->reg_dump_buf_desc,
+			sizeof(struct cam_cmd_buf_desc) *
+			prepare->num_reg_dump_buf);
+	}
+
+	/* reg update will be done later for the initial configure */
+	if (((prepare->packet->header.op_code) & 0xF) ==
+		CAM_ISP_PACKET_INIT_DEV) {
+		prepare_hw_data->packet_opcode_type =
+			CAM_ISP_TFE_PACKET_INIT_DEV;
+		goto end;
+	} else
+		prepare_hw_data->packet_opcode_type =
+		CAM_ISP_TFE_PACKET_CONFIG_DEV;
+
+	/* add reg update commands */
+	for (i = 0; i < ctx->num_base; i++) {
+		/* Add change base */
+		rc = cam_isp_add_change_base(prepare, &ctx->res_list_tfe_in,
+			ctx->base[i].idx, &kmd_buf);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Failed in change base adding reg_update cmd i=%d, idx=%d, rc=%d",
+				i, ctx->base[i].idx, rc);
+			goto end;
+		}
+
+		/*Add reg update */
+		rc = cam_isp_add_reg_update(prepare, &ctx->res_list_tfe_in,
+			ctx->base[i].idx, &kmd_buf);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Add Reg_update cmd Failed i=%d, idx=%d, rc=%d",
+				i, ctx->base[i].idx, rc);
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int cam_tfe_mgr_resume_hw(struct cam_tfe_hw_mgr_ctx *ctx)
+{
+	return cam_tfe_mgr_bw_control(ctx, CAM_TFE_BW_CONTROL_INCLUDE);
+}
+
+static int cam_tfe_mgr_sof_irq_debug(
+	struct cam_tfe_hw_mgr_ctx *ctx,
+	uint32_t sof_irq_enable)
+{
+	int rc = 0;
+	uint32_t i = 0;
+	struct cam_isp_hw_mgr_res     *hw_mgr_res = NULL;
+	struct cam_hw_intf            *hw_intf = NULL;
+	struct cam_isp_resource_node  *rsrc_node = NULL;
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf->hw_ops.process_cmd) {
+				rc |= hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_TFE_CSID_SOF_IRQ_DEBUG,
+					&sof_irq_enable,
+					sizeof(sof_irq_enable));
+			}
+		}
+	}
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			rsrc_node = hw_mgr_res->hw_res[i];
+			if (rsrc_node->process_cmd && (rsrc_node->res_id ==
+				CAM_ISP_HW_TFE_IN_CAMIF)) {
+				rc |= hw_mgr_res->hw_res[i]->process_cmd(
+					hw_mgr_res->hw_res[i],
+					CAM_ISP_HW_CMD_SOF_IRQ_DEBUG,
+					&sof_irq_enable,
+					sizeof(sof_irq_enable));
+			}
+		}
+	}
+
+	return rc;
+}
+
+static void cam_tfe_mgr_print_io_bufs(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+	bool *mem_found)
+{
+	dma_addr_t  iova_addr;
+	size_t      src_buf_size;
+	int         i, j;
+	int         rc = 0;
+	int32_t     mmu_hdl;
+
+	struct cam_buf_io_cfg  *io_cfg = NULL;
+
+	if (mem_found)
+		*mem_found = false;
+
+	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+
+	for (i = 0; i < packet->num_io_configs; i++) {
+		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+			if (!io_cfg[i].mem_handle[j])
+				break;
+
+			if (pf_buf_info &&
+				GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+				GET_FD_FROM_HANDLE(pf_buf_info)) {
+				CAM_INFO(CAM_ISP,
+					"Found PF at port: 0x%x mem 0x%x fd: 0x%x",
+					io_cfg[i].resource_type,
+					io_cfg[i].mem_handle[j],
+					pf_buf_info);
+				if (mem_found)
+					*mem_found = true;
+			}
+
+			CAM_INFO(CAM_ISP, "port: 0x%x f: %u format: %d dir %d",
+				io_cfg[i].resource_type,
+				io_cfg[i].fence,
+				io_cfg[i].format,
+				io_cfg[i].direction);
+
+			mmu_hdl = cam_mem_is_secure_buf(
+				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+				iommu_hdl;
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+				mmu_hdl, &iova_addr, &src_buf_size);
+			if (rc < 0) {
+				CAM_ERR(CAM_ISP,
+					"get src buf address fail mem_handle 0x%x",
+					io_cfg[i].mem_handle[j]);
+				continue;
+			}
+			if (iova_addr >> 32) {
+				CAM_ERR(CAM_ISP, "Invalid mapped address");
+				rc = -EINVAL;
+				continue;
+			}
+
+			CAM_INFO(CAM_ISP,
+				"pln %d w %d h %d s %u size 0x%x addr 0x%x end_addr 0x%x offset %x memh %x",
+				j, io_cfg[i].planes[j].width,
+				io_cfg[i].planes[j].height,
+				io_cfg[i].planes[j].plane_stride,
+				(unsigned int)src_buf_size,
+				(unsigned int)iova_addr,
+				(unsigned int)iova_addr +
+				(unsigned int)src_buf_size,
+				io_cfg[i].offsets[j],
+				io_cfg[i].mem_handle[j]);
+		}
+	}
+}
+
+static void cam_tfe_mgr_ctx_irq_dump(struct cam_tfe_hw_mgr_ctx *ctx)
+{
+	struct cam_isp_hw_mgr_res        *hw_mgr_res;
+	struct cam_hw_intf               *hw_intf;
+	struct cam_isp_hw_get_cmd_update  cmd_update;
+	int i = 0;
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
+		if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT)
+			continue;
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+			switch (hw_mgr_res->hw_res[i]->res_id) {
+			case CAM_ISP_HW_TFE_IN_CAMIF:
+				hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+				cmd_update.res = hw_mgr_res->hw_res[i];
+				cmd_update.cmd_type =
+					CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP;
+				hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP,
+					&cmd_update, sizeof(cmd_update));
+				break;
+			default:
+				break;
+			}
+		}
+	}
+}
+
+static int cam_tfe_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+	struct cam_tfe_hw_mgr  *hw_mgr = hw_mgr_priv;
+	struct cam_tfe_hw_mgr_ctx *ctx = (struct cam_tfe_hw_mgr_ctx *)
+		hw_cmd_args->ctxt_to_hw_map;
+	struct cam_isp_hw_cmd_args *isp_hw_cmd_args = NULL;
+
+	if (!hw_mgr_priv || !cmd_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used");
+		return -EPERM;
+	}
+
+	switch (hw_cmd_args->cmd_type) {
+	case CAM_HW_MGR_CMD_INTERNAL:
+		if (!hw_cmd_args->u.internal_args) {
+			CAM_ERR(CAM_ISP, "Invalid cmd arguments");
+			return -EINVAL;
+		}
+
+		isp_hw_cmd_args = (struct cam_isp_hw_cmd_args *)
+			hw_cmd_args->u.internal_args;
+
+		switch (isp_hw_cmd_args->cmd_type) {
+		case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
+			cam_tfe_mgr_pause_hw(ctx);
+			break;
+		case CAM_ISP_HW_MGR_CMD_RESUME_HW:
+			cam_tfe_mgr_resume_hw(ctx);
+			break;
+		case CAM_ISP_HW_MGR_CMD_SOF_DEBUG:
+			cam_tfe_mgr_sof_irq_debug(ctx,
+				isp_hw_cmd_args->u.sof_irq_enable);
+			break;
+		case CAM_ISP_HW_MGR_CMD_CTX_TYPE:
+			if (ctx->is_rdi_only_context)
+				isp_hw_cmd_args->u.ctx_type = CAM_ISP_CTX_RDI;
+			else
+				isp_hw_cmd_args->u.ctx_type = CAM_ISP_CTX_PIX;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
+				hw_cmd_args->cmd_type);
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+		cam_tfe_mgr_print_io_bufs(
+			hw_cmd_args->u.pf_args.pf_data.packet,
+			hw_mgr->mgr_common.img_iommu_hdl,
+			hw_mgr->mgr_common.img_iommu_hdl_secure,
+			hw_cmd_args->u.pf_args.buf_info,
+			hw_cmd_args->u.pf_args.mem_found);
+		break;
+	case CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH:
+		if (ctx->last_dump_flush_req_id == ctx->applied_req_id)
+			return 0;
+
+		ctx->last_dump_flush_req_id = ctx->applied_req_id;
+
+		rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc,
+			ctx->num_reg_dump_buf,
+			CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_FLUSH);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Reg dump on flush failed req id: %llu rc: %d",
+				ctx->applied_req_id, rc);
+			return rc;
+		}
+
+		break;
+	case CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR:
+		if (ctx->last_dump_err_req_id == ctx->applied_req_id)
+			return 0;
+
+		ctx->last_dump_err_req_id = ctx->applied_req_id;
+		rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc,
+			ctx->num_reg_dump_buf,
+			CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Reg dump on error failed req id: %llu rc: %d",
+				ctx->applied_req_id, rc);
+			return rc;
+		}
+		break;
+
+	default:
+		CAM_ERR(CAM_ISP, "Invalid cmd");
+	}
+
+	return rc;
+}
+
+static int cam_tfe_mgr_cmd_get_sof_timestamp(
+	struct cam_tfe_hw_mgr_ctx            *tfe_ctx,
+	uint64_t                             *time_stamp,
+	uint64_t                             *boot_time_stamp)
+{
+	int                                        rc = -EINVAL;
+	uint32_t                                   i;
+	struct cam_isp_hw_mgr_res                 *hw_mgr_res;
+	struct cam_hw_intf                        *hw_intf;
+	struct cam_tfe_csid_get_time_stamp_args    csid_get_time;
+
+	list_for_each_entry(hw_mgr_res, &tfe_ctx->res_list_tfe_csid, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			/*
+			 * Get the SOF time stamp from left resource only.
+			 * Left resource is master for dual tfe case and
+			 * Rdi only context case left resource only hold
+			 * the RDI resource
+			 */
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf->hw_ops.process_cmd) {
+				/*
+				 * Single TFE case, Get the time stamp from
+				 * available one csid hw in the context
+				 * Dual TFE case, get the time stamp from
+				 * master(left) would be sufficient
+				 */
+
+				csid_get_time.node_res =
+					hw_mgr_res->hw_res[i];
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_TFE_CSID_CMD_GET_TIME_STAMP,
+					&csid_get_time,
+					sizeof(struct
+					cam_tfe_csid_get_time_stamp_args));
+				if (!rc && (i == CAM_ISP_HW_SPLIT_LEFT)) {
+					*time_stamp =
+						csid_get_time.time_stamp_val;
+					*boot_time_stamp =
+						csid_get_time.boot_timestamp;
+				}
+			}
+		}
+	}
+
+	if (rc)
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Getting sof time stamp failed");
+
+	return rc;
+}
+
+static void cam_tfe_mgr_ctx_reg_dump(struct cam_tfe_hw_mgr_ctx  *ctx)
+{
+	struct cam_isp_hw_mgr_res        *hw_mgr_res;
+	struct cam_hw_intf               *hw_intf;
+	struct cam_isp_hw_get_cmd_update  cmd_update;
+	int i = 0;
+
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in,
+		list) {
+		if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT)
+			continue;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			switch (hw_mgr_res->hw_res[i]->res_id) {
+			case CAM_ISP_HW_TFE_IN_CAMIF:
+				hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+				cmd_update.res = hw_mgr_res->hw_res[i];
+				cmd_update.cmd_type =
+					CAM_ISP_HW_CMD_GET_REG_DUMP;
+				hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_GET_REG_DUMP,
+					&cmd_update, sizeof(cmd_update));
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	/* Dump the TFE CSID registers */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid,
+		list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf->hw_ops.process_cmd) {
+				hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_TFE_CSID_CMD_GET_REG_DUMP,
+					hw_mgr_res->hw_res[i],
+					sizeof(struct cam_isp_resource_node));
+			}
+		}
+	}
+}
+
+static int cam_tfe_mgr_process_recovery_cb(void *priv, void *data)
+{
+	int32_t rc = 0;
+	struct cam_tfe_hw_event_recovery_data   *recovery_data = data;
+	struct cam_hw_start_args             start_args;
+	struct cam_hw_stop_args              stop_args;
+	struct cam_tfe_hw_mgr               *tfe_hw_mgr = priv;
+	struct cam_isp_hw_mgr_res           *hw_mgr_res;
+	struct cam_tfe_hw_mgr_ctx           *tfe_hw_mgr_ctx;
+	uint32_t                             i = 0;
+
+	uint32_t error_type = recovery_data->error_type;
+	struct cam_tfe_hw_mgr_ctx        *ctx = NULL;
+
+	/* Here recovery is performed */
+	CAM_DBG(CAM_ISP, "ErrorType = %d", error_type);
+
+	switch (error_type) {
+	case CAM_ISP_HW_ERROR_OVERFLOW:
+	case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
+		if (!recovery_data->affected_ctx[0]) {
+			CAM_ERR(CAM_ISP,
+				"No context is affected but recovery called");
+			kfree(recovery_data);
+			return 0;
+		}
+
+		/* stop resources here */
+		CAM_DBG(CAM_ISP, "STOP: Number of affected context: %d",
+			recovery_data->no_of_context);
+		for (i = 0; i < recovery_data->no_of_context; i++) {
+			stop_args.ctxt_to_hw_map =
+				recovery_data->affected_ctx[i];
+			tfe_hw_mgr_ctx = recovery_data->affected_ctx[i];
+
+			if (g_tfe_hw_mgr.debug_cfg.enable_reg_dump)
+				cam_tfe_mgr_ctx_reg_dump(tfe_hw_mgr_ctx);
+
+			if (g_tfe_hw_mgr.debug_cfg.enable_recovery) {
+				rc = cam_tfe_mgr_stop_hw_in_overflow(
+					&stop_args);
+				if (rc) {
+					CAM_ERR(CAM_ISP,
+						"CTX stop failed(%d)", rc);
+					return rc;
+				}
+			}
+		}
+
+		if (!g_tfe_hw_mgr.debug_cfg.enable_recovery) {
+			CAM_INFO(CAM_ISP, "reg dumping is done ");
+			return 0;
+		}
+
+		CAM_DBG(CAM_ISP, "RESET: CSID PATH");
+		for (i = 0; i < recovery_data->no_of_context; i++) {
+			ctx = recovery_data->affected_ctx[i];
+			list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid,
+				list) {
+				rc = cam_tfe_hw_mgr_reset_csid_res(hw_mgr_res);
+				if (rc) {
+					CAM_ERR(CAM_ISP, "Failed RESET (%d)",
+						hw_mgr_res->res_id);
+					return rc;
+				}
+			}
+		}
+
+		CAM_DBG(CAM_ISP, "RESET: Calling TFE reset");
+
+		for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
+			if (recovery_data->affected_core[i])
+				cam_tfe_mgr_reset_tfe_hw(tfe_hw_mgr, i);
+		}
+
+		CAM_DBG(CAM_ISP, "START: Number of affected context: %d",
+			recovery_data->no_of_context);
+
+		for (i = 0; i < recovery_data->no_of_context; i++) {
+			ctx =  recovery_data->affected_ctx[i];
+			start_args.ctxt_to_hw_map = ctx;
+
+			atomic_set(&ctx->overflow_pending, 0);
+
+			rc = cam_tfe_mgr_restart_hw(&start_args);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "CTX start failed(%d)", rc);
+				return rc;
+			}
+			CAM_DBG(CAM_ISP, "Started resources rc (%d)", rc);
+		}
+		CAM_DBG(CAM_ISP, "Recovery Done rc (%d)", rc);
+
+		break;
+
+	case CAM_ISP_HW_ERROR_P2I_ERROR:
+		break;
+
+	case CAM_ISP_HW_ERROR_VIOLATION:
+		break;
+
+	default:
+		CAM_ERR(CAM_ISP, "Invalid Error");
+	}
+	CAM_DBG(CAM_ISP, "Exit: ErrorType = %d", error_type);
+
+	kfree(recovery_data);
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_do_error_recovery(
+	struct cam_tfe_hw_event_recovery_data  *tfe_mgr_recovery_data)
+{
+	int32_t                             rc = 0;
+	struct crm_workq_task              *task = NULL;
+	struct cam_tfe_hw_event_recovery_data  *recovery_data = NULL;
+
+	recovery_data = kmemdup(tfe_mgr_recovery_data,
+		sizeof(struct cam_tfe_hw_event_recovery_data), GFP_ATOMIC);
+
+	if (!recovery_data)
+		return -ENOMEM;
+
+	CAM_DBG(CAM_ISP, "Enter: error_type (%d)", recovery_data->error_type);
+
+	task = cam_req_mgr_workq_get_task(g_tfe_hw_mgr.workq);
+	if (!task) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No empty task frame");
+		kfree(recovery_data);
+		return -ENOMEM;
+	}
+
+	task->process_cb = &cam_tfe_mgr_process_recovery_cb;
+	task->payload = recovery_data;
+	rc = cam_req_mgr_workq_enqueue_task(task,
+		recovery_data->affected_ctx[0]->hw_mgr,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+/*
+ * This function checks if any of the valid entry in affected_core[]
+ * is associated with this context. if YES
+ *  a. It fills the other cores associated with this context.in
+ *      affected_core[]
+ *  b. Return true
+ */
+static bool cam_tfe_hw_mgr_is_ctx_affected(
+	struct cam_tfe_hw_mgr_ctx   *tfe_hwr_mgr_ctx,
+	uint32_t                    *affected_core,
+	uint32_t                     size)
+{
+
+	bool                  rc = false;
+	uint32_t              i = 0, j = 0;
+	uint32_t              max_idx =  tfe_hwr_mgr_ctx->num_base;
+	uint32_t              ctx_affected_core_idx[CAM_TFE_HW_NUM_MAX] = {0};
+
+	CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx);
+
+	if ((max_idx >= CAM_TFE_HW_NUM_MAX) || (size > CAM_TFE_HW_NUM_MAX)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "invalid parameter = %d", max_idx);
+		return rc;
+	}
+
+	for (i = 0; i < max_idx; i++) {
+		if (affected_core[tfe_hwr_mgr_ctx->base[i].idx])
+			rc = true;
+		else {
+			ctx_affected_core_idx[j] = tfe_hwr_mgr_ctx->base[i].idx;
+			j = j + 1;
+		}
+	}
+
+	if (rc) {
+		while (j) {
+			if (affected_core[ctx_affected_core_idx[j-1]] != 1)
+				affected_core[ctx_affected_core_idx[j-1]] = 1;
+			j = j - 1;
+		}
+	}
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+/*
+ * For any dual TFE context, if non-affected TFE is also serving
+ * another context, then that context should also be notified with fatal error
+ * So Loop through each context and -
+ *   a. match core_idx
+ *   b. Notify CTX with fatal error
+ */
+static int  cam_tfe_hw_mgr_find_affected_ctx(
+	struct cam_isp_hw_error_event_data    *error_event_data,
+	uint32_t                               curr_core_idx,
+	struct cam_tfe_hw_event_recovery_data     *recovery_data)
+{
+	uint32_t affected_core[CAM_TFE_HW_NUM_MAX] = {0};
+	struct cam_tfe_hw_mgr_ctx   *tfe_hwr_mgr_ctx = NULL;
+	cam_hw_event_cb_func         notify_err_cb;
+	struct cam_tfe_hw_mgr       *tfe_hwr_mgr = NULL;
+	enum cam_isp_hw_event_type   event_type = CAM_ISP_HW_EVENT_ERROR;
+	uint32_t i = 0;
+
+	if (!recovery_data) {
+		CAM_ERR(CAM_ISP, "recovery_data parameter is NULL");
+		return -EINVAL;
+	}
+
+	recovery_data->no_of_context = 0;
+	affected_core[curr_core_idx] = 1;
+	tfe_hwr_mgr = &g_tfe_hw_mgr;
+
+	list_for_each_entry(tfe_hwr_mgr_ctx,
+		&tfe_hwr_mgr->used_ctx_list, list) {
+		/*
+		 * Check if current core_idx matches the HW associated
+		 * with this context
+		 */
+		if (!cam_tfe_hw_mgr_is_ctx_affected(tfe_hwr_mgr_ctx,
+			affected_core, CAM_TFE_HW_NUM_MAX))
+			continue;
+
+		atomic_set(&tfe_hwr_mgr_ctx->overflow_pending, 1);
+		notify_err_cb = tfe_hwr_mgr_ctx->common.event_cb[event_type];
+
+		/* Add affected_context in list of recovery data */
+		CAM_DBG(CAM_ISP, "Add affected ctx %d to list",
+			tfe_hwr_mgr_ctx->ctx_index);
+		if (recovery_data->no_of_context < CAM_CTX_MAX)
+			recovery_data->affected_ctx[
+				recovery_data->no_of_context++] =
+				tfe_hwr_mgr_ctx;
+
+		/*
+		 * In the call back function corresponding ISP context
+		 * will update CRM about fatal Error
+		 */
+		notify_err_cb(tfe_hwr_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_ERROR, error_event_data);
+	}
+
+	/* fill the affected_core in recovery data */
+	for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
+		recovery_data->affected_core[i] = affected_core[i];
+		CAM_DBG(CAM_ISP, "tfe core %d is affected (%d)",
+			 i, recovery_data->affected_core[i]);
+	}
+
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_handle_hw_err(
+	void                                *evt_info)
+{
+	struct cam_isp_hw_event_info            *event_info = evt_info;
+	struct cam_isp_hw_error_event_data       error_event_data = {0};
+	struct cam_tfe_hw_event_recovery_data    recovery_data = {0};
+	int    rc = -EINVAL;
+	uint32_t core_idx;
+
+	if (event_info->err_type == CAM_TFE_IRQ_STATUS_VIOLATION)
+		error_event_data.error_type = CAM_ISP_HW_ERROR_VIOLATION;
+	else if (event_info->res_type == CAM_ISP_RESOURCE_TFE_IN ||
+		event_info->res_type == CAM_ISP_RESOURCE_PIX_PATH)
+		error_event_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+	else if (event_info->res_type == CAM_ISP_RESOURCE_TFE_OUT)
+		error_event_data.error_type = CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
+
+	core_idx = event_info->hw_idx;
+
+	if (g_tfe_hw_mgr.debug_cfg.enable_recovery)
+		error_event_data.recovery_enabled = true;
+	else
+		error_event_data.recovery_enabled = false;
+
+	rc = cam_tfe_hw_mgr_find_affected_ctx(&error_event_data,
+		core_idx, &recovery_data);
+
+	if (event_info->res_type == CAM_ISP_RESOURCE_TFE_OUT)
+		return rc;
+
+	if (g_tfe_hw_mgr.debug_cfg.enable_recovery) {
+		/* Trigger for recovery */
+		if (event_info->err_type == CAM_TFE_IRQ_STATUS_VIOLATION)
+			recovery_data.error_type = CAM_ISP_HW_ERROR_VIOLATION;
+		else
+			recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+		cam_tfe_hw_mgr_do_error_recovery(&recovery_data);
+	} else {
+		CAM_DBG(CAM_ISP, "recovery is not enabled");
+		rc = 0;
+	}
+
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_handle_hw_rup(
+	void                                    *ctx,
+	void                                    *evt_info)
+{
+	struct cam_isp_hw_event_info            *event_info = evt_info;
+	struct cam_tfe_hw_mgr_ctx               *tfe_hw_mgr_ctx = ctx;
+	cam_hw_event_cb_func                     tfe_hwr_irq_rup_cb;
+	struct cam_isp_hw_reg_update_event_data  rup_event_data;
+
+	tfe_hwr_irq_rup_cb =
+		tfe_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
+
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_TFE_IN_CAMIF:
+		if (tfe_hw_mgr_ctx->is_dual)
+			if (event_info->hw_idx != tfe_hw_mgr_ctx->master_hw_idx)
+				break;
+
+		if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending))
+			break;
+
+		tfe_hwr_irq_rup_cb(tfe_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+		break;
+
+	case CAM_ISP_HW_TFE_IN_RDI0:
+	case CAM_ISP_HW_TFE_IN_RDI1:
+	case CAM_ISP_HW_TFE_IN_RDI2:
+		if (!tfe_hw_mgr_ctx->is_rdi_only_context)
+			break;
+		if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending))
+			break;
+		tfe_hwr_irq_rup_cb(tfe_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+		break;
+
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "RUP done for TFE source %d",
+		event_info->res_id);
+
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_check_irq_for_dual_tfe(
+	struct cam_tfe_hw_mgr_ctx            *tfe_hw_mgr_ctx,
+	uint32_t                              hw_event_type)
+{
+	int32_t                               rc = -EINVAL;
+	uint32_t                             *event_cnt = NULL;
+	uint32_t                              core_idx0 = 0;
+	uint32_t                              core_idx1 = 1;
+
+	if (!tfe_hw_mgr_ctx->is_dual)
+		return 0;
+
+	switch (hw_event_type) {
+	case CAM_ISP_HW_EVENT_SOF:
+		event_cnt = tfe_hw_mgr_ctx->sof_cnt;
+		break;
+	case CAM_ISP_HW_EVENT_EPOCH:
+		event_cnt = tfe_hw_mgr_ctx->epoch_cnt;
+		break;
+	case CAM_ISP_HW_EVENT_EOF:
+		event_cnt = tfe_hw_mgr_ctx->eof_cnt;
+		break;
+	default:
+		return 0;
+	}
+
+	if (event_cnt[core_idx0] == event_cnt[core_idx1]) {
+
+		event_cnt[core_idx0] = 0;
+		event_cnt[core_idx1] = 0;
+
+		rc = 0;
+		return rc;
+	}
+
+	if ((event_cnt[core_idx0] &&
+		(event_cnt[core_idx0] - event_cnt[core_idx1] > 1)) ||
+		(event_cnt[core_idx1] &&
+		(event_cnt[core_idx1] - event_cnt[core_idx0] > 1))) {
+
+		if (tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt > 10) {
+			rc = -1;
+			return rc;
+		}
+
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"One TFE could not generate hw event %d id0:%d id1:%d",
+			hw_event_type, event_cnt[core_idx0],
+			event_cnt[core_idx1]);
+		if (event_cnt[core_idx0] >= 2) {
+			event_cnt[core_idx0]--;
+			tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt++;
+		}
+		if (event_cnt[core_idx1] >= 2) {
+			event_cnt[core_idx1]--;
+			tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt++;
+		}
+
+		if (tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt == 1)
+			cam_tfe_mgr_ctx_irq_dump(tfe_hw_mgr_ctx);
+		rc = 0;
+	}
+
+	CAM_DBG(CAM_ISP, "Only one core_index has given hw event %d",
+			hw_event_type);
+
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_handle_hw_epoch(
+	void                                 *ctx,
+	void                                 *evt_info)
+{
+	struct cam_isp_hw_event_info         *event_info = evt_info;
+	struct cam_tfe_hw_mgr_ctx            *tfe_hw_mgr_ctx = ctx;
+	cam_hw_event_cb_func                  tfe_hw_irq_epoch_cb;
+	struct cam_isp_hw_epoch_event_data    epoch_done_event_data;
+	int                                   rc = 0;
+
+	tfe_hw_irq_epoch_cb =
+		tfe_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EPOCH];
+
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_TFE_IN_CAMIF:
+		tfe_hw_mgr_ctx->epoch_cnt[event_info->hw_idx]++;
+		rc = cam_tfe_hw_mgr_check_irq_for_dual_tfe(tfe_hw_mgr_ctx,
+			CAM_ISP_HW_EVENT_EPOCH);
+		if (!rc) {
+			if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending))
+				break;
+			tfe_hw_irq_epoch_cb(tfe_hw_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_EPOCH, &epoch_done_event_data);
+		}
+		break;
+
+	case CAM_ISP_HW_TFE_IN_RDI0:
+	case CAM_ISP_HW_TFE_IN_RDI1:
+	case CAM_ISP_HW_TFE_IN_RDI2:
+		break;
+
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "Epoch for TFE source %d", event_info->res_id);
+
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_handle_hw_sof(
+	void                                 *ctx,
+	void                                 *evt_info)
+{
+	struct cam_isp_hw_event_info         *event_info = evt_info;
+	struct cam_tfe_hw_mgr_ctx            *tfe_hw_mgr_ctx = ctx;
+	cam_hw_event_cb_func                  tfe_hw_irq_sof_cb;
+	struct cam_isp_hw_sof_event_data      sof_done_event_data;
+	int                                   rc = 0;
+
+	tfe_hw_irq_sof_cb =
+		tfe_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
+
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_TFE_IN_CAMIF:
+		tfe_hw_mgr_ctx->sof_cnt[event_info->hw_idx]++;
+		rc = cam_tfe_hw_mgr_check_irq_for_dual_tfe(tfe_hw_mgr_ctx,
+			CAM_ISP_HW_EVENT_SOF);
+		if (!rc) {
+			cam_tfe_mgr_cmd_get_sof_timestamp(tfe_hw_mgr_ctx,
+				&sof_done_event_data.timestamp,
+				&sof_done_event_data.boot_time);
+
+			if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending))
+				break;
+
+			tfe_hw_irq_sof_cb(tfe_hw_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+		}
+		break;
+
+	case CAM_ISP_HW_TFE_IN_RDI0:
+	case CAM_ISP_HW_TFE_IN_RDI1:
+	case CAM_ISP_HW_TFE_IN_RDI2:
+		if (!tfe_hw_mgr_ctx->is_rdi_only_context)
+			break;
+		cam_tfe_mgr_cmd_get_sof_timestamp(tfe_hw_mgr_ctx,
+			&sof_done_event_data.timestamp,
+			&sof_done_event_data.boot_time);
+		if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending))
+			break;
+		tfe_hw_irq_sof_cb(tfe_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+		break;
+
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "SOF for TFE source %d", event_info->res_id);
+
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_handle_hw_eof(
+	void                                 *ctx,
+	void                                 *evt_info)
+{
+	struct cam_isp_hw_event_info         *event_info = evt_info;
+	struct cam_tfe_hw_mgr_ctx            *tfe_hw_mgr_ctx = ctx;
+	cam_hw_event_cb_func                  tfe_hw_irq_eof_cb;
+	struct cam_isp_hw_eof_event_data      eof_done_event_data;
+	int                                   rc = 0;
+
+	tfe_hw_irq_eof_cb =
+		tfe_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EOF];
+
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_TFE_IN_CAMIF:
+		tfe_hw_mgr_ctx->eof_cnt[event_info->hw_idx]++;
+		rc = cam_tfe_hw_mgr_check_irq_for_dual_tfe(tfe_hw_mgr_ctx,
+			CAM_ISP_HW_EVENT_EOF);
+		if (!rc) {
+			if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending))
+				break;
+			tfe_hw_irq_eof_cb(tfe_hw_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_EOF, &eof_done_event_data);
+		}
+		break;
+
+	case CAM_ISP_HW_TFE_IN_RDI0:
+	case CAM_ISP_HW_TFE_IN_RDI1:
+	case CAM_ISP_HW_TFE_IN_RDI2:
+		break;
+
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "EOF for out_res->res_id: 0x%x",
+		event_info->res_id);
+
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_handle_hw_buf_done(
+	void                                *ctx,
+	void                                *evt_info)
+{
+	cam_hw_event_cb_func                 tfe_hwr_irq_wm_done_cb;
+	struct cam_tfe_hw_mgr_ctx           *tfe_hw_mgr_ctx = ctx;
+	struct cam_isp_hw_done_event_data    buf_done_event_data = {0};
+	struct cam_isp_hw_event_info        *event_info = evt_info;
+
+	tfe_hwr_irq_wm_done_cb =
+		tfe_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
+
+	buf_done_event_data.num_handles = 1;
+	buf_done_event_data.resource_handle[0] = event_info->res_id;
+
+	if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending))
+		return 0;
+
+	if (buf_done_event_data.num_handles > 0 && tfe_hwr_irq_wm_done_cb) {
+		CAM_DBG(CAM_ISP, "Notify ISP context");
+		tfe_hwr_irq_wm_done_cb(tfe_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_DONE, &buf_done_event_data);
+	}
+
+	CAM_DBG(CAM_ISP, "Buf done for out_res->res_id: 0x%x",
+		event_info->res_id);
+
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_event_handler(
+	void                                *priv,
+	uint32_t                             evt_id,
+	void                                *evt_info)
+{
+	int                                  rc = 0;
+
+	if (!evt_info)
+		return -EINVAL;
+
+	if (!priv)
+		if (evt_id != CAM_ISP_HW_EVENT_ERROR)
+			return -EINVAL;
+
+	CAM_DBG(CAM_ISP, "Event ID 0x%x", evt_id);
+
+	switch (evt_id) {
+	case CAM_ISP_HW_EVENT_SOF:
+		rc = cam_tfe_hw_mgr_handle_hw_sof(priv, evt_info);
+		break;
+
+	case CAM_ISP_HW_EVENT_REG_UPDATE:
+		rc = cam_tfe_hw_mgr_handle_hw_rup(priv, evt_info);
+		break;
+
+	case CAM_ISP_HW_EVENT_EPOCH:
+		rc = cam_tfe_hw_mgr_handle_hw_epoch(priv, evt_info);
+		break;
+
+	case CAM_ISP_HW_EVENT_EOF:
+		rc = cam_tfe_hw_mgr_handle_hw_eof(priv, evt_info);
+		break;
+
+	case CAM_ISP_HW_EVENT_DONE:
+		rc = cam_tfe_hw_mgr_handle_hw_buf_done(priv, evt_info);
+		break;
+
+	case CAM_ISP_HW_EVENT_ERROR:
+		rc = cam_tfe_hw_mgr_handle_hw_err(evt_info);
+		break;
+
+	default:
+		CAM_ERR(CAM_ISP, "Invalid event ID %d", evt_id);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_tfe_hw_mgr_sort_dev_with_caps(
+	struct cam_tfe_hw_mgr *tfe_hw_mgr)
+{
+	int i;
+
+	/* get caps for csid devices */
+	for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
+		if (!tfe_hw_mgr->csid_devices[i])
+			continue;
+		if (tfe_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps) {
+			tfe_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps(
+				tfe_hw_mgr->csid_devices[i]->hw_priv,
+				&tfe_hw_mgr->tfe_csid_dev_caps[i],
+				sizeof(tfe_hw_mgr->tfe_csid_dev_caps[i]));
+		}
+	}
+
+	/* get caps for tfe devices */
+	for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
+		if (!tfe_hw_mgr->tfe_devices[i])
+			continue;
+		if (tfe_hw_mgr->tfe_devices[i]->hw_ops.get_hw_caps) {
+			tfe_hw_mgr->tfe_devices[i]->hw_ops.get_hw_caps(
+				tfe_hw_mgr->tfe_devices[i]->hw_priv,
+				&tfe_hw_mgr->tfe_dev_caps[i],
+				sizeof(tfe_hw_mgr->tfe_dev_caps[i]));
+		}
+	}
+
+	return 0;
+}
+
+static int cam_tfe_set_csid_debug(void *data, u64 val)
+{
+	g_tfe_hw_mgr.debug_cfg.csid_debug = val;
+	CAM_DBG(CAM_ISP, "Set CSID Debug value :%lld", val);
+	return 0;
+}
+
+static int cam_tfe_get_csid_debug(void *data, u64 *val)
+{
+	*val = g_tfe_hw_mgr.debug_cfg.csid_debug;
+	CAM_DBG(CAM_ISP, "Get CSID Debug value :%lld",
+		g_tfe_hw_mgr.debug_cfg.csid_debug);
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cam_tfe_csid_debug,
+	cam_tfe_get_csid_debug,
+	cam_tfe_set_csid_debug, "%16llu");
+
+static int cam_tfe_set_camif_debug(void *data, u64 val)
+{
+	g_tfe_hw_mgr.debug_cfg.camif_debug = val;
+	CAM_DBG(CAM_ISP,
+		"Set camif enable_diag_sensor_status value :%lld", val);
+	return 0;
+}
+
+static int cam_tfe_get_camif_debug(void *data, u64 *val)
+{
+	*val = g_tfe_hw_mgr.debug_cfg.camif_debug;
+	CAM_DBG(CAM_ISP,
+		"Set camif enable_diag_sensor_status value :%lld",
+		g_tfe_hw_mgr.debug_cfg.csid_debug);
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cam_tfe_camif_debug,
+	cam_tfe_get_camif_debug,
+	cam_tfe_set_camif_debug, "%16llu");
+
+static int cam_tfe_hw_mgr_debug_register(void)
+{
+	g_tfe_hw_mgr.debug_cfg.dentry = debugfs_create_dir("camera_tfe",
+		NULL);
+
+	if (!g_tfe_hw_mgr.debug_cfg.dentry) {
+		CAM_ERR(CAM_ISP, "failed to create dentry");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_file("tfe_csid_debug",
+		0644,
+		g_tfe_hw_mgr.debug_cfg.dentry, NULL,
+		&cam_tfe_csid_debug)) {
+		CAM_ERR(CAM_ISP, "failed to create cam_tfe_csid_debug");
+		goto err;
+	}
+
+	if (!debugfs_create_u32("enable_recovery",
+		0644,
+		g_tfe_hw_mgr.debug_cfg.dentry,
+		&g_tfe_hw_mgr.debug_cfg.enable_recovery)) {
+		CAM_ERR(CAM_ISP, "failed to create enable_recovery");
+		goto err;
+	}
+
+	if (!debugfs_create_bool("enable_reg_dump",
+		0644,
+		g_tfe_hw_mgr.debug_cfg.dentry,
+		&g_tfe_hw_mgr.debug_cfg.enable_reg_dump)) {
+		CAM_ERR(CAM_ISP, "failed to create enable_reg_dump");
+		goto err;
+	}
+
+	if (!debugfs_create_file("tfe_camif_debug",
+		0644,
+		g_tfe_hw_mgr.debug_cfg.dentry, NULL,
+		&cam_tfe_camif_debug)) {
+		CAM_ERR(CAM_ISP, "failed to create cam_tfe_camif_debug");
+		goto err;
+	}
+
+	if (!debugfs_create_bool("per_req_reg_dump",
+		0644,
+		g_tfe_hw_mgr.debug_cfg.dentry,
+		&g_tfe_hw_mgr.debug_cfg.per_req_reg_dump)) {
+		CAM_ERR(CAM_ISP, "failed to create per_req_reg_dump entry");
+		goto err;
+	}
+
+
+	g_tfe_hw_mgr.debug_cfg.enable_recovery = 0;
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(g_tfe_hw_mgr.debug_cfg.dentry);
+	return -ENOMEM;
+}
+
+int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
+{
+	int rc = -EFAULT;
+	int i, j;
+	struct cam_iommu_handle cdm_handles;
+	struct cam_tfe_hw_mgr_ctx *ctx_pool;
+	struct cam_isp_hw_mgr_res *res_list_tfe_out;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	memset(&g_tfe_hw_mgr, 0, sizeof(g_tfe_hw_mgr));
+
+	mutex_init(&g_tfe_hw_mgr.ctx_mutex);
+
+	if (CAM_TFE_HW_NUM_MAX != CAM_TFE_CSID_HW_NUM_MAX) {
+		CAM_ERR(CAM_ISP, "CSID num is different then TFE num");
+		return -EINVAL;
+	}
+
+	/* fill tfe hw intf information */
+	for (i = 0, j = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
+		rc = cam_tfe_hw_init(&g_tfe_hw_mgr.tfe_devices[i], i);
+		if (!rc) {
+			struct cam_hw_info *tfe_hw = (struct cam_hw_info *)
+				g_tfe_hw_mgr.tfe_devices[i]->hw_priv;
+			struct cam_hw_soc_info *soc_info = &tfe_hw->soc_info;
+
+			j++;
+
+			g_tfe_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0];
+			CAM_DBG(CAM_ISP,
+				"reg_map: mem base = %pK cam_base = 0x%llx",
+				(void __iomem *)soc_info->reg_map[0].mem_base,
+				(uint64_t) soc_info->reg_map[0].mem_cam_base);
+		} else {
+			g_tfe_hw_mgr.cdm_reg_map[i] = NULL;
+		}
+	}
+	if (j == 0) {
+		CAM_ERR(CAM_ISP, "no valid TFE HW");
+		return -EINVAL;
+	}
+
+	/* fill csid hw intf information */
+	for (i = 0, j = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
+		rc = cam_tfe_csid_hw_init(&g_tfe_hw_mgr.csid_devices[i], i);
+		if (!rc)
+			j++;
+	}
+	if (!j) {
+		CAM_ERR(CAM_ISP, "no valid TFE CSID HW");
+		return -EINVAL;
+	}
+
+	/* fill tpg hw intf information */
+	for (i = 0, j = 0; i < CAM_TOP_TPG_HW_NUM_MAX; i++) {
+		rc = cam_top_tpg_hw_init(&g_tfe_hw_mgr.tpg_devices[i], i);
+		if (!rc)
+			j++;
+	}
+	if (!j) {
+		CAM_ERR(CAM_ISP, "no valid TFE TPG HW");
+		return -EINVAL;
+	}
+
+	cam_tfe_hw_mgr_sort_dev_with_caps(&g_tfe_hw_mgr);
+
+	/* setup tfe context list */
+	INIT_LIST_HEAD(&g_tfe_hw_mgr.free_ctx_list);
+	INIT_LIST_HEAD(&g_tfe_hw_mgr.used_ctx_list);
+
+	/*
+	 *  for now, we only support one iommu handle. later
+	 *  we will need to setup more iommu handle for other
+	 *  use cases.
+	 *  Also, we have to release them once we have the
+	 *  deinit support
+	 */
+	if (cam_smmu_get_handle("tfe",
+		&g_tfe_hw_mgr.mgr_common.img_iommu_hdl)) {
+		CAM_ERR(CAM_ISP, "Can not get iommu handle");
+		return -EINVAL;
+	}
+
+	if (cam_smmu_get_handle("cam-secure",
+		&g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure)) {
+		CAM_ERR(CAM_ISP, "Failed to get secure iommu handle");
+		goto secure_fail;
+	}
+
+	CAM_DBG(CAM_ISP, "iommu_handles: non-secure[0x%x], secure[0x%x]",
+		g_tfe_hw_mgr.mgr_common.img_iommu_hdl,
+		g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure);
+
+	if (!cam_cdm_get_iommu_handle("tfe0", &cdm_handles)) {
+		CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles");
+		g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure;
+		g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl_secure =
+			cdm_handles.secure;
+	} else {
+		CAM_DBG(CAM_ISP, "Failed to acquire the CDM iommu handles");
+		g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl = -1;
+		g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
+	}
+
+	atomic_set(&g_tfe_hw_mgr.active_ctx_cnt, 0);
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		memset(&g_tfe_hw_mgr.ctx_pool[i], 0,
+			sizeof(g_tfe_hw_mgr.ctx_pool[i]));
+		INIT_LIST_HEAD(&g_tfe_hw_mgr.ctx_pool[i].list);
+		INIT_LIST_HEAD(&g_tfe_hw_mgr.ctx_pool[i].res_list_tfe_csid);
+		INIT_LIST_HEAD(&g_tfe_hw_mgr.ctx_pool[i].res_list_tfe_in);
+		ctx_pool = &g_tfe_hw_mgr.ctx_pool[i];
+		for (j = 0; j < CAM_TFE_HW_OUT_RES_MAX; j++) {
+			res_list_tfe_out = &ctx_pool->res_list_tfe_out[j];
+			INIT_LIST_HEAD(&res_list_tfe_out->list);
+		}
+
+		/* init context pool */
+		INIT_LIST_HEAD(&g_tfe_hw_mgr.ctx_pool[i].free_res_list);
+		for (j = 0; j < CAM_TFE_HW_RES_POOL_MAX; j++) {
+			INIT_LIST_HEAD(
+				&g_tfe_hw_mgr.ctx_pool[i].res_pool[j].list);
+			list_add_tail(
+				&g_tfe_hw_mgr.ctx_pool[i].res_pool[j].list,
+				&g_tfe_hw_mgr.ctx_pool[i].free_res_list);
+		}
+
+		g_tfe_hw_mgr.ctx_pool[i].cdm_cmd =
+			kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+				((CAM_TFE_HW_ENTRIES_MAX - 1) *
+				 sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+		if (!g_tfe_hw_mgr.ctx_pool[i].cdm_cmd) {
+			rc = -ENOMEM;
+			CAM_ERR(CAM_ISP, "Allocation Failed for cdm command");
+			goto end;
+		}
+
+		g_tfe_hw_mgr.ctx_pool[i].ctx_index = i;
+		g_tfe_hw_mgr.ctx_pool[i].hw_mgr = &g_tfe_hw_mgr;
+
+		cam_tasklet_init(&g_tfe_hw_mgr.mgr_common.tasklet_pool[i],
+			&g_tfe_hw_mgr.ctx_pool[i], i);
+		g_tfe_hw_mgr.ctx_pool[i].common.tasklet_info =
+			g_tfe_hw_mgr.mgr_common.tasklet_pool[i];
+
+
+		init_completion(&g_tfe_hw_mgr.ctx_pool[i].config_done_complete);
+		list_add_tail(&g_tfe_hw_mgr.ctx_pool[i].list,
+			&g_tfe_hw_mgr.free_ctx_list);
+	}
+
+	/* Create Worker for tfe_hw_mgr with 10 tasks */
+	rc = cam_req_mgr_workq_create("cam_tfe_worker", 10,
+			&g_tfe_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ, 0);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "Unable to create worker");
+		goto end;
+	}
+
+	/* fill return structure */
+	hw_mgr_intf->hw_mgr_priv = &g_tfe_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_tfe_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_tfe_mgr_acquire;
+	hw_mgr_intf->hw_start = cam_tfe_mgr_start_hw;
+	hw_mgr_intf->hw_stop = cam_tfe_mgr_stop_hw;
+	hw_mgr_intf->hw_read = cam_tfe_mgr_read;
+	hw_mgr_intf->hw_write = cam_tfe_mgr_write;
+	hw_mgr_intf->hw_release = cam_tfe_mgr_release_hw;
+	hw_mgr_intf->hw_prepare_update = cam_tfe_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config = cam_tfe_mgr_config_hw;
+	hw_mgr_intf->hw_cmd = cam_tfe_mgr_cmd;
+	hw_mgr_intf->hw_reset = cam_tfe_mgr_reset;
+
+	if (iommu_hdl)
+		*iommu_hdl = g_tfe_hw_mgr.mgr_common.img_iommu_hdl;
+
+	cam_tfe_hw_mgr_debug_register();
+	CAM_DBG(CAM_ISP, "Exit");
+
+	return 0;
+end:
+	if (rc) {
+		for (i = 0; i < CAM_CTX_MAX; i++) {
+			cam_tasklet_deinit(
+				&g_tfe_hw_mgr.mgr_common.tasklet_pool[i]);
+			kfree(g_tfe_hw_mgr.ctx_pool[i].cdm_cmd);
+			g_tfe_hw_mgr.ctx_pool[i].cdm_cmd = NULL;
+			g_tfe_hw_mgr.ctx_pool[i].common.tasklet_info = NULL;
+		}
+	}
+	cam_smmu_destroy_handle(
+		g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure);
+	g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
+secure_fail:
+	cam_smmu_destroy_handle(g_tfe_hw_mgr.mgr_common.img_iommu_hdl);
+	g_tfe_hw_mgr.mgr_common.img_iommu_hdl = -1;
+	return rc;
+}

+ 196 - 0
drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h

@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_HW_MGR_H_
+#define _CAM_TFE_HW_MGR_H_
+
+#include <linux/completion.h>
+#include <media/cam_isp_tfe.h>
+#include "cam_isp_hw_mgr.h"
+#include "cam_tfe_hw_intf.h"
+#include "cam_tfe_csid_hw_intf.h"
+#include "cam_top_tpg_hw_intf.h"
+#include "cam_tasklet_util.h"
+
+
+
+/* TFE resource constants */
+#define CAM_TFE_HW_IN_RES_MAX            (CAM_ISP_TFE_IN_RES_MAX & 0xFF)
+#define CAM_TFE_HW_OUT_RES_MAX           (CAM_ISP_TFE_OUT_RES_MAX & 0xFF)
+#define CAM_TFE_HW_RES_POOL_MAX          64
+
+/**
+ * struct cam_tfe_hw_mgr_debug - contain the debug information
+ *
+ * @dentry:                    Debugfs entry
+ * @csid_debug:                csid debug information
+ * @enable_recovery:           enable recovery
+ * @camif_debug:               enable sensor diagnosis status
+ * @enable_reg_dump:           enable reg dump on error;
+ * @per_req_reg_dump:          Enable per request reg dump
+ *
+ */
+struct cam_tfe_hw_mgr_debug {
+	struct dentry  *dentry;
+	uint64_t       csid_debug;
+	uint32_t       enable_recovery;
+	uint32_t       camif_debug;
+	bool           enable_reg_dump;
+	bool           per_req_reg_dump;
+};
+
+/**
+ * struct cam_tfe_hw_mgr_ctx - TFE HW manager Context object
+ *
+ * @list:                     used by the ctx list.
+ * @common:                   common acquired context data
+ * @ctx_index:                acquired context id.
+ * @hw_mgr:                   tfe hw mgr which owns this context
+ * @ctx_in_use:               flag to tell whether context is active
+ * @res_list_csid:            csid resource list
+ * @res_list_tfe_in:          tfe input resource list
+ * @res_list_tfe_out:         tfe output resoruces array
+ * @free_res_list:            free resources list for the branch node
+ * @res_pool:                 memory storage for the free resource list
+ * @base                      device base index array contain the all TFE HW
+ *                            instance associated with this context.
+ * @num_base                  number of valid base data in the base array
+ * @cdm_handle                cdm hw acquire handle
+ * @cdm_ops                   cdm util operation pointer for building
+ *                            cdm commands
+ * @cdm_cmd                   cdm base and length request pointer
+ * @config_done_complete      indicator for configuration complete
+ * @sof_cnt                   sof count value per core, used for dual TFE
+ * @epoch_cnt                 epoch count value per core, used for dual TFE
+ * @eof_cnt                   eof count value per core, used for dual TFE
+ * @overflow_pending          flat to specify the overflow is pending for the
+ *                            context
+ * @cdm_done                  flag to indicate cdm has finished writing shadow
+ *                            registers
+ * @is_rdi_only_context       flag to specify the context has only rdi resource
+ * @reg_dump_buf_desc:        cmd buffer descriptors for reg dump
+ * @num_reg_dump_buf:         count of descriptors in reg_dump_buf_desc
+ * @applied_req_id:           last request id to be applied
+ * @last_dump_flush_req_id    last req id for which reg dump on flush was called
+ * @last_dump_err_req_id      last req id for which reg dump on error was called
+ * @init_done                 indicate whether init hw is done
+ * @is_dual                   indicate whether context is in dual TFE mode
+ * @is_tpg                    indicate whether context use tpg
+ * @master_hw_idx             master hardware index in dual tfe case
+ * @dual_tfe_irq_mismatch_cnt irq mismatch count value per core, used for
+ *                              dual TFE
+ */
+struct cam_tfe_hw_mgr_ctx {
+	struct list_head                list;
+	struct cam_isp_hw_mgr_ctx       common;
+
+	uint32_t                        ctx_index;
+	struct cam_tfe_hw_mgr          *hw_mgr;
+	uint32_t                        ctx_in_use;
+
+	struct cam_isp_hw_mgr_res       res_list_tpg;
+	struct list_head                res_list_tfe_csid;
+	struct list_head                res_list_tfe_in;
+	struct cam_isp_hw_mgr_res
+			res_list_tfe_out[CAM_TFE_HW_OUT_RES_MAX];
+
+	struct list_head                free_res_list;
+	struct cam_isp_hw_mgr_res       res_pool[CAM_TFE_HW_RES_POOL_MAX];
+
+	struct cam_isp_ctx_base_info    base[CAM_TFE_HW_NUM_MAX];
+	uint32_t                        num_base;
+	uint32_t                        cdm_handle;
+	struct cam_cdm_utils_ops       *cdm_ops;
+	struct cam_cdm_bl_request      *cdm_cmd;
+	struct completion               config_done_complete;
+
+	uint32_t                        sof_cnt[CAM_TFE_HW_NUM_MAX];
+	uint32_t                        epoch_cnt[CAM_TFE_HW_NUM_MAX];
+	uint32_t                        eof_cnt[CAM_TFE_HW_NUM_MAX];
+	atomic_t                        overflow_pending;
+	atomic_t                        cdm_done;
+	uint32_t                        is_rdi_only_context;
+	struct cam_cmd_buf_desc         reg_dump_buf_desc[
+						CAM_REG_DUMP_MAX_BUF_ENTRIES];
+	uint32_t                        num_reg_dump_buf;
+	uint64_t                        applied_req_id;
+	uint64_t                        last_dump_flush_req_id;
+	uint64_t                        last_dump_err_req_id;
+	bool                            init_done;
+	bool                            is_dual;
+	bool                            is_tpg;
+	uint32_t                        master_hw_idx;
+	uint32_t                        dual_tfe_irq_mismatch_cnt;
+};
+
+/**
+ * struct cam_tfe_hw_mgr - TFE HW Manager
+ *
+ * @mgr_common:            common data for all HW managers
+ * @tpg_devices:           tpg devices instacnce array. This will be filled by
+ *                         HW manager during the initialization.
+ * @csid_devices:          csid device instances array. This will be filled by
+ *                         HW manager during the initialization.
+ * @tfe_devices:           TFE device instances array. This will be filled by
+ *                         HW layer during initialization
+ * @cdm_reg_map            commands for register dump
+ * @ctx_mutex:             mutex for the hw context pool
+ * @active_ctx_cnt         active context count number
+ * @free_ctx_list:         free hw context list
+ * @used_ctx_list:         used hw context list
+ * @ctx_pool:              context storage
+ * @tfe_csid_dev_caps      csid device capability stored per core
+ * @tfe_dev_caps           tfe device capability per core
+ * @work q                 work queue for TFE hw manager
+ * @debug_cfg              debug configuration
+ */
+struct cam_tfe_hw_mgr {
+	struct cam_isp_hw_mgr          mgr_common;
+	struct cam_hw_intf            *tpg_devices[CAM_TOP_TPG_HW_NUM_MAX];
+	struct cam_hw_intf            *csid_devices[CAM_TFE_CSID_HW_NUM_MAX];
+	struct cam_hw_intf            *tfe_devices[CAM_TFE_HW_NUM_MAX];
+	struct cam_soc_reg_map        *cdm_reg_map[CAM_TFE_HW_NUM_MAX];
+	struct mutex                   ctx_mutex;
+	atomic_t                       active_ctx_cnt;
+	struct list_head               free_ctx_list;
+	struct list_head               used_ctx_list;
+	struct cam_tfe_hw_mgr_ctx      ctx_pool[CAM_CTX_MAX];
+
+	struct cam_tfe_csid_hw_caps    tfe_csid_dev_caps[
+						CAM_TFE_CSID_HW_NUM_MAX];
+	struct cam_tfe_hw_get_hw_cap   tfe_dev_caps[CAM_TFE_HW_NUM_MAX];
+	struct cam_req_mgr_core_workq *workq;
+	struct cam_tfe_hw_mgr_debug    debug_cfg;
+};
+
+/**
+ * struct cam_tfe_hw_event_recovery_data - Payload for the recovery procedure
+ *
+ * @error_type:               Error type that causes the recovery
+ * @affected_core:            Array of the hardware cores that are affected
+ * @affected_ctx:             Array of the hardware contexts that are affected
+ * @no_of_context:            Actual number of the affected context
+ *
+ */
+struct cam_tfe_hw_event_recovery_data {
+	uint32_t                   error_type;
+	uint32_t                   affected_core[CAM_TFE_HW_NUM_MAX];
+	struct cam_tfe_hw_mgr_ctx *affected_ctx[CAM_CTX_MAX];
+	uint32_t                   no_of_context;
+};
+
+/**
+ * cam_tfe_hw_mgr_init()
+ *
+ * @brief:              Initialize the TFE hardware manger. This is the
+ *                      etnry functinon for the TFE HW manager.
+ *
+ * @hw_mgr_intf:        TFE hardware manager object returned
+ * @iommu_hdl:          Iommu handle to be returned
+ *
+ */
+int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl);
+
+#endif /* _CAM_TFE_HW_MGR_H_ */

+ 4 - 2
drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h

@@ -17,6 +17,8 @@
 #define CAM_IFE_RDI_NUM_MAX  4
 #define CAM_ISP_BW_CONFIG_V1 1
 #define CAM_ISP_BW_CONFIG_V2 2
+#define CAM_TFE_HW_NUM_MAX   3
+#define CAM_TFE_RDI_NUM_MAX  3
 
 /* Appliacble vote paths for dual ife, based on no. of UAPI definitions */
 #define CAM_ISP_MAX_PER_PATH_VOTES 30
@@ -252,12 +254,12 @@ struct cam_isp_hw_cmd_args {
  *
  * @brief:              Initialization function for the ISP hardware manager
  *
- * @of_node:            Device node input
+ * @device_name_str:    Device name string
  * @hw_mgr:             Input/output structure for the ISP hardware manager
  *                          initialization
  * @iommu_hdl:          Iommu handle to be returned
  */
-int cam_isp_hw_mgr_init(struct device_node *of_node,
+int cam_isp_hw_mgr_init(const char    *device_name_str,
 	struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl);
 
 #endif /* __CAM_ISP_HW_MGR_INTF_H__ */

+ 14 - 5
drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h

@@ -38,11 +38,14 @@ struct cam_isp_timestamp {
 void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp);
 
 enum cam_isp_hw_type {
-	CAM_ISP_HW_TYPE_CSID        = 0,
-	CAM_ISP_HW_TYPE_ISPIF       = 1,
-	CAM_ISP_HW_TYPE_VFE         = 2,
-	CAM_ISP_HW_TYPE_IFE_CSID    = 3,
-	CAM_ISP_HW_TYPE_MAX         = 4,
+	CAM_ISP_HW_TYPE_CSID,
+	CAM_ISP_HW_TYPE_ISPIF,
+	CAM_ISP_HW_TYPE_VFE,
+	CAM_ISP_HW_TYPE_IFE_CSID,
+	CAM_ISP_HW_TYPE_TFE,
+	CAM_ISP_HW_TYPE_TFE_CSID,
+	CAM_ISP_HW_TYPE_TPG,
+	CAM_ISP_HW_TYPE_MAX,
 };
 
 enum cam_isp_hw_split_id {
@@ -74,6 +77,9 @@ enum cam_isp_resource_type {
 	CAM_ISP_RESOURCE_VFE_IN,
 	CAM_ISP_RESOURCE_VFE_OUT,
 	CAM_ISP_RESOURCE_VFE_BUS_RD,
+	CAM_ISP_RESOURCE_TPG,
+	CAM_ISP_RESOURCE_TFE_IN,
+	CAM_ISP_RESOURCE_TFE_OUT,
 	CAM_ISP_RESOURCE_MAX,
 };
 
@@ -91,6 +97,7 @@ enum cam_isp_hw_cmd_type {
 	CAM_ISP_HW_CMD_BW_UPDATE_V2,
 	CAM_ISP_HW_CMD_BW_CONTROL,
 	CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
+	CAM_ISP_HW_CMD_GET_REG_DUMP,
 	CAM_ISP_HW_CMD_UBWC_UPDATE,
 	CAM_ISP_HW_CMD_SOF_IRQ_DEBUG,
 	CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
@@ -102,6 +109,8 @@ enum cam_isp_hw_cmd_type {
 	CAM_ISP_HW_CMD_WM_CONFIG_UPDATE,
 	CAM_ISP_HW_CMD_CSID_QCFA_SUPPORTED,
 	CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA,
+	CAM_ISP_HW_CMD_TPG_PHY_CLOCK_UPDATE,
+	CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP,
 	CAM_ISP_HW_CMD_MAX,
 };
 

+ 179 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_csid_hw_intf.h

@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_CSID_HW_INTF_H_
+#define _CAM_TFE_CSID_HW_INTF_H_
+
+#include "cam_isp_hw.h"
+#include "cam_hw_intf.h"
+
+/* MAX TFE CSID instance */
+#define CAM_TFE_CSID_HW_NUM_MAX                        3
+#define CAM_TFE_CSID_RDI_MAX                           3
+
+/**
+ * enum cam_tfe_pix_path_res_id - Specify the csid patch
+ */
+enum cam_tfe_csid_path_res_id {
+	CAM_TFE_CSID_PATH_RES_RDI_0,
+	CAM_TFE_CSID_PATH_RES_RDI_1,
+	CAM_TFE_CSID_PATH_RES_RDI_2,
+	CAM_TFE_CSID_PATH_RES_IPP,
+	CAM_TFE_CSID_PATH_RES_MAX,
+};
+
+/**
+ * enum cam_tfe_csid_irq_reg
+ */
+enum cam_tfe_csid_irq_reg {
+	TFE_CSID_IRQ_REG_RDI0,
+	TFE_CSID_IRQ_REG_RDI1,
+	TFE_CSID_IRQ_REG_RDI2,
+	TFE_CSID_IRQ_REG_TOP,
+	TFE_CSID_IRQ_REG_RX,
+	TFE_CSID_IRQ_REG_IPP,
+	TFE_CSID_IRQ_REG_MAX,
+};
+
+
+/**
+ * struct cam_tfe_csid_hw_caps- get the CSID hw capability
+ * @num_rdis:       number of rdis supported by CSID HW device
+ * @num_pix:        number of pxl paths supported by CSID HW device
+ * @major_version : major version
+ * @minor_version:  minor version
+ * @version_incr:   version increment
+ *
+ */
+struct cam_tfe_csid_hw_caps {
+	uint32_t      num_rdis;
+	uint32_t      num_pix;
+	uint32_t      major_version;
+	uint32_t      minor_version;
+	uint32_t      version_incr;
+};
+
+/**
+ * struct cam_tfe_csid_hw_reserve_resource_args- hw reserve
+ * @res_type :    Reource type ie PATH
+ * @res_id  :     Resource id to be reserved
+ * @in_port :     Input port resource info
+ * @out_port:     Output port resource info, used for RDI path only
+ * @sync_mode:    Sync mode
+ *                Sync mode could be master, slave or none
+ * @master_idx:   Master device index to be configured in the slave path
+ *                for master path, this value is not required.
+ *                only slave need to configure the master index value
+ * @phy_sel:      Phy selection number if tpg is enabled from userspace
+ * @event_cb_prv: Context data
+ * @event_cb:     Callback function to hw mgr in case of hw events
+ * @node_res :    Reserved resource structure pointer
+ *
+ */
+struct cam_tfe_csid_hw_reserve_resource_args {
+	enum cam_isp_resource_type                res_type;
+	uint32_t                                  res_id;
+	struct cam_isp_tfe_in_port_info          *in_port;
+	struct cam_isp_tfe_out_port_info         *out_port;
+	enum cam_isp_hw_sync_mode                 sync_mode;
+	uint32_t                                  master_idx;
+	uint32_t                                  phy_sel;
+	void                                     *event_cb_prv;
+	cam_hw_mgr_event_cb_func                  event_cb;
+	struct cam_isp_resource_node             *node_res;
+};
+
+/**
+ *  enum cam_tfe_csid_halt_cmd - Specify the halt command type
+ */
+enum cam_tfe_csid_halt_cmd {
+	CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY,
+	CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
+	CAM_TFE_CSID_HALT_IMMEDIATELY,
+	CAM_TFE_CSID_HALT_MAX,
+};
+
+/**
+ * struct cam_csid_hw_stop- stop all resources
+ * @stop_cmd : Applicable only for PATH resources
+ *             if stop command set to Halt immediately,driver will stop
+ *             path immediately, manager need to reset the path after HI
+ *             if stop command set to halt at frame boundary, driver will set
+ *             halt at frame boundary and wait for frame boundary
+ * @num_res :  Number of resources to be stopped
+ * @node_res : Reource pointer array( ie cid or CSID)
+ *
+ */
+struct cam_tfe_csid_hw_stop_args {
+	enum cam_tfe_csid_halt_cmd                stop_cmd;
+	uint32_t                                  num_res;
+	struct cam_isp_resource_node            **node_res;
+};
+
+/**
+ * enum cam_tfe_csid_reset_type - Specify the reset type
+ */
+enum cam_tfe_csid_reset_type {
+	CAM_TFE_CSID_RESET_GLOBAL,
+	CAM_TFE_CSID_RESET_PATH,
+	CAM_TFE_CSID_RESET_MAX,
+};
+
+/**
+ * struct cam_tfe_csid_reset_cfg-  Csid reset configuration
+ * @ reset_type :                  Global reset or path reset
+ * @res_node :                     Resource need to be reset
+ *
+ */
+struct cam_tfe_csid_reset_cfg_args {
+	enum cam_tfe_csid_reset_type   reset_type;
+	struct cam_isp_resource_node  *node_res;
+};
+
+/**
+ * struct cam_csid_get_time_stamp_args-  time stamp capture arguments
+ * @res_node :       Resource to get the time stamp
+ * @time_stamp_val : Captured time stamp
+ * @boot_timestamp : Boot time stamp
+ */
+struct cam_tfe_csid_get_time_stamp_args {
+	struct cam_isp_resource_node      *node_res;
+	uint64_t                           time_stamp_val;
+	uint64_t                           boot_timestamp;
+};
+
+/**
+ * enum cam_tfe_csid_cmd_type - Specify the csid command
+ */
+enum cam_tfe_csid_cmd_type {
+	CAM_TFE_CSID_CMD_GET_TIME_STAMP,
+	CAM_TFE_CSID_SET_CSID_DEBUG,
+	CAM_TFE_CSID_SOF_IRQ_DEBUG,
+	CAM_TFE_CSID_CMD_GET_REG_DUMP,
+	CAM_TFE_CSID_CMD_MAX,
+};
+
+/**
+ * cam_tfe_csid_hw_init()
+ *
+ * @brief:               Initialize function for the CSID hardware
+ *
+ * @tfe_csid_hw:         CSID hardware instance returned
+ * @hw_idex:             CSID hardware instance id
+ */
+int cam_tfe_csid_hw_init(struct cam_hw_intf **tfe_csid_hw,
+	uint32_t hw_idx);
+
+/*
+ * struct cam_tfe_csid_clock_update_args:
+ *
+ * @clk_rate:                Clock rate requested
+ */
+struct cam_tfe_csid_clock_update_args {
+	uint64_t                           clk_rate;
+};
+
+
+#endif /* _CAM_TFE_CSID_HW_INTF_H_ */

+ 253 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_hw_intf.h

@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_HW_INTF_H_
+#define _CAM_TFE_HW_INTF_H_
+
+#include "cam_isp_hw.h"
+#include "cam_cpas_api.h"
+
+#define CAM_TFE_HW_NUM_MAX            3
+#define TFE_CORE_BASE_IDX             0
+
+
+enum cam_isp_hw_tfe_in {
+	CAM_ISP_HW_TFE_IN_CAMIF       = 0,
+	CAM_ISP_HW_TFE_IN_RDI0        = 1,
+	CAM_ISP_HW_TFE_IN_RDI1        = 2,
+	CAM_ISP_HW_TFE_IN_RDI2        = 3,
+	CAM_ISP_HW_TFE_IN_MAX,
+};
+
+enum cam_isp_hw_tfe_core {
+	CAM_ISP_HW_TFE_CORE_0,
+	CAM_ISP_HW_TFE_CORE_1,
+	CAM_ISP_HW_TFE_CORE_2,
+	CAM_ISP_HW_TFE_CORE_MAX,
+};
+
+enum cam_tfe_hw_irq_status {
+	CAM_TFE_IRQ_STATUS_SUCCESS,
+	CAM_TFE_IRQ_STATUS_ERR,
+	CAM_TFE_IRQ_STATUS_OVERFLOW,
+	CAM_TFE_IRQ_STATUS_P2I_ERROR,
+	CAM_TFE_IRQ_STATUS_VIOLATION,
+	CAM_TFE_IRQ_STATUS_MAX,
+};
+
+enum cam_tfe_hw_irq_regs {
+	CAM_TFE_IRQ_CAMIF_REG_STATUS0           = 0,
+	CAM_TFE_IRQ_CAMIF_REG_STATUS1           = 1,
+	CAM_TFE_IRQ_CAMIF_REG_STATUS2           = 2,
+	CAM_TFE_IRQ_REGISTERS_MAX,
+};
+
+enum cam_tfe_bus_irq_regs {
+	CAM_TFE_IRQ_BUS_REG_STATUS0             = 0,
+	CAM_TFE_IRQ_BUS_REG_STATUS1             = 1,
+	CAM_TFE_BUS_IRQ_REGISTERS_MAX,
+};
+
+enum cam_tfe_reset_type {
+	CAM_TFE_HW_RESET_HW_AND_REG,
+	CAM_TFE_HW_RESET_HW,
+	CAM_TFE_HW_RESET_MAX,
+};
+
+enum cam_tfe_bw_control_action {
+	CAM_TFE_BW_CONTROL_EXCLUDE       = 0,
+	CAM_TFE_BW_CONTROL_INCLUDE       = 1
+};
+
+/*
+ * struct cam_tfe_hw_get_hw_cap:
+ *
+ * @max_width:               Max width supported by HW
+ * @max_height:              Max height supported by HW
+ * @max_pixel_num:           Max Pixel channels available
+ * @max_rdi_num:             Max Raw channels available
+ */
+struct cam_tfe_hw_get_hw_cap {
+	uint32_t                max_width;
+	uint32_t                max_height;
+	uint32_t                max_pixel_num;
+	uint32_t                max_rdi_num;
+};
+
+/*
+ * struct cam_tfe_hw_tfe_out_acquire_args:
+ *
+ * @rsrc_node:               Pointer to Resource Node object, filled if acquire
+ *                           is successful
+ * @out_port_info:           Output Port details to acquire
+ * @unique_id:               Unique Identity of Context to associate with this
+ *                           resource. Used for composite grouping of multiple
+ *                           resources in the same context
+ * @is_dual:                 Dual TFE or not
+ * @split_id:                In case of Dual TFE, this is Left or Right.
+ *                           (Default is Left if Single TFE)
+ * @is_master:               In case of Dual TFE, this is Master or Slave.
+ *                           (Default is Master in case of Single TFE)
+ * @cdm_ops:                 CDM operations
+ * @ctx:                     Context data
+ */
+struct cam_tfe_hw_tfe_out_acquire_args {
+	struct cam_isp_resource_node      *rsrc_node;
+	struct cam_isp_tfe_out_port_info  *out_port_info;
+	uint32_t                           unique_id;
+	uint32_t                           is_dual;
+	enum cam_isp_hw_split_id           split_id;
+	uint32_t                           is_master;
+	struct cam_cdm_utils_ops          *cdm_ops;
+	void                              *ctx;
+};
+
+/*
+ * struct cam_tfe_hw_tfe_in_acquire_args:
+ *
+ * @rsrc_node:               Pointer to Resource Node object, filled if acquire
+ *                           is successful
+ * @res_id:                  Resource ID of resource to acquire if specific,
+ *                           else CAM_ISP_HW_TFE_IN_MAX
+ * @cdm_ops:                 CDM operations
+ * @sync_mode:               In case of Dual TFE, this is Master or Slave.
+ *                           (Default is Master in case of Single TFE)
+ * @in_port:                 Input port details to acquire
+ * @camif_pd_enable          Camif pd enable or disable
+ * @dual_tfe_sync_sel_idx    Dual tfe master hardware index
+ */
+struct cam_tfe_hw_tfe_in_acquire_args {
+	struct cam_isp_resource_node         *rsrc_node;
+	struct cam_isp_tfe_in_port_info      *in_port;
+	uint32_t                              res_id;
+	void                                 *cdm_ops;
+	enum cam_isp_hw_sync_mode             sync_mode;
+	bool                                  camif_pd_enable;
+	uint32_t                              dual_tfe_sync_sel_idx;
+};
+
+/*
+ * struct cam_tfe_acquire_args:
+ *
+ * @rsrc_type:               Type of Resource (OUT/IN) to acquire
+ * @tasklet:                 Tasklet to associate with this resource. This is
+ *                           used to schedule bottom of IRQ events associated
+ *                           with this resource.
+ * @priv:                    Context data
+ * @event_cb:                Callback function to hw mgr in case of hw events
+ * @tfe_out:                 Acquire args for TFE_OUT
+ * @tfe_in:                  Acquire args for TFE_IN
+ */
+struct cam_tfe_acquire_args {
+	enum cam_isp_resource_type           rsrc_type;
+	void                                *tasklet;
+	void                                *priv;
+	cam_hw_mgr_event_cb_func             event_cb;
+	union {
+		struct cam_tfe_hw_tfe_out_acquire_args  tfe_out;
+		struct cam_tfe_hw_tfe_in_acquire_args   tfe_in;
+	};
+};
+
+/*
+ * struct cam_tfe_clock_update_args:
+ *
+ * @node_res:                Resource to get the time stamp
+ * @clk_rate:                Clock rate requested
+ */
+struct cam_tfe_clock_update_args {
+	struct cam_isp_resource_node      *node_res;
+	uint64_t                           clk_rate;
+};
+
+/*
+ * struct cam_tfe_bw_update_args:
+ *
+ * @node_res:             Resource to get the BW
+ * @isp_vote:             Vote info according to usage data (left/right/rdi)
+ */
+struct cam_tfe_bw_update_args {
+	struct cam_isp_resource_node      *node_res;
+	struct cam_axi_vote                isp_vote;
+};
+
+/*
+ * struct cam_tfe_dual_update_args:
+ *
+ * @Brief:        update the dual isp striping configuration.
+ *
+ * @ split_id:        spilt id to inform left or rifht
+ * @ res:             resource node
+ * @ stripe_config:   stripe configuration for port
+ *
+ */
+struct cam_tfe_dual_update_args {
+	enum cam_isp_hw_split_id                  split_id;
+	struct cam_isp_resource_node             *res;
+	struct cam_isp_tfe_dual_stripe_config    *stripe_config;
+};
+
+/*
+ * struct cam_tfe_bw_control_args:
+ *
+ * @node_res:             Resource to get the time stamp
+ * @action:               Bandwidth control action
+ */
+struct cam_tfe_bw_control_args {
+	struct cam_isp_resource_node      *node_res;
+	enum cam_tfe_bw_control_action     action;
+};
+
+/*
+ * struct cam_tfe_irq_evt_payload:
+ *
+ * @Brief:                   This structure is used to save payload for IRQ
+ *                           related to TFE_TOP resources
+ *
+ * @list:                    list_head node for the payload
+ * @core_index:              Index of TFE HW that generated this IRQ event
+ * @core_info:               Private data of handler in bottom half context
+ * @evt_id:                  IRQ event
+ * @irq_reg_val:             IRQ and Error register values, read when IRQ was
+ *                           handled
+ * @bus_irq_val              Bus irq register status
+ * @debug_status_0:          Value of debug status_0 register at time of IRQ
+ * @ccif_violation_status    ccif violation status
+ * @overflow_status          bus overflow status
+ * @image_size_violation_status  image size violations status
+
+ * @error_type:              Identify different errors
+ * @enable_reg_dump:         enable register dump on error
+ * @ts:                      Timestamp
+ */
+struct cam_tfe_irq_evt_payload {
+	struct list_head           list;
+	uint32_t                   core_index;
+	void                      *core_info;
+	uint32_t                   evt_id;
+	uint32_t                   irq_reg_val[CAM_TFE_IRQ_REGISTERS_MAX];
+	uint32_t                   bus_irq_val[CAM_TFE_BUS_IRQ_REGISTERS_MAX];
+	uint32_t                   ccif_violation_status;
+	uint32_t                   overflow_status;
+	uint32_t                   image_size_violation_status;
+	uint32_t                   debug_status_0;
+
+	uint32_t                   error_type;
+	bool                       enable_reg_dump;
+	struct cam_isp_timestamp   ts;
+};
+
+/*
+ * cam_tfe_hw_init()
+ *
+ * @Brief:                  Initialize TFE HW device
+ *
+ * @tfe_hw:                 tfe_hw interface to fill in and return on
+ *                          successful initialization
+ * @hw_idx:                 Index of TFE HW
+ */
+int cam_tfe_hw_init(struct cam_hw_intf **tfe_hw, uint32_t hw_idx);
+
+#endif /* _CAM_TFE_HW_INTF_H_ */

+ 74 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_top_tpg_hw_intf.h

@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TOP_TPG_HW_INTF_H_
+#define _CAM_TOP_TPG_HW_INTF_H_
+
+#include "cam_isp_hw.h"
+#include "cam_hw_intf.h"
+
+/* Max top tpg instance */
+#define CAM_TOP_TPG_HW_NUM_MAX                        2
+/* Max supported number of DT for TPG */
+#define CAM_TOP_TPG_MAX_SUPPORTED_DT                  4
+
+/**
+ * enum cam_top_tpg_id - top tpg hw instance id
+ */
+enum cam_top_tpg_id {
+	CAM_TOP_TPG_ID_0,
+	CAM_TOP_TPG_ID_1,
+	CAM_TFE_TPG_ID_MAX,
+};
+
+/**
+ * struct cam_top_tpg_hw_caps- Get the top tpg hw capability
+ * @major_version : Major version
+ * @minor_version:  Minor version
+ * @version_incr:   Version increment
+ *
+ */
+struct cam_top_tpg_hw_caps {
+	uint32_t      major_version;
+	uint32_t      minor_version;
+	uint32_t      version_incr;
+};
+
+/**
+ * struct cam_tfe_csid_hw_reserve_resource_args- hw reserve
+ * @num_inport:   number of inport
+ *                TPG support 4 dt types, each different dt comes in different
+ *                in port.
+ * @in_port :     Input port resource info structure pointer
+ * @node_res :    Reserved resource structure pointer
+ *
+ */
+struct cam_top_tpg_hw_reserve_resource_args {
+	uint32_t                          num_inport;
+	struct cam_isp_tfe_in_port_info  *in_port[CAM_TOP_TPG_MAX_SUPPORTED_DT];
+	struct cam_isp_resource_node     *node_res;
+};
+
+/**
+ * cam_top_tpg_hw_init()
+ *
+ * @brief:               Initialize function for the tpg hardware
+ *
+ * @top_tpg_hw:          TPG hardware instance returned
+ * @hw_idex:             TPG hardware instance id
+ */
+int cam_top_tpg_hw_init(struct cam_hw_intf **top_tpg_hw,
+	uint32_t hw_idx);
+
+/*
+ * struct cam_top_tpg_clock_update_args:
+ *
+ * @clk_rate:                phy rate requested
+ */
+struct cam_top_tpg_clock_update_args {
+	uint64_t                           clk_rate;
+};
+
+#endif /* _CAM_TOP_TPG_HW_INTF_H_ */

+ 15 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/Makefile

@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cdm/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_tfe_csid_dev.o cam_tfe_csid_soc.o cam_tfe_csid_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_tfe_csid530.o

+ 51 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid530.c

@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include "cam_tfe_csid_core.h"
+#include "cam_tfe_csid530.h"
+#include "cam_tfe_csid_dev.h"
+
+#define CAM_TFE_CSID_DRV_NAME                    "csid_530"
+#define CAM_TFE_CSID_VERSION_V530                 0x50030000
+
+static struct cam_tfe_csid_hw_info cam_tfe_csid530_hw_info = {
+	.csid_reg = &cam_tfe_csid_530_reg_offset,
+	.hw_dts_version = CAM_TFE_CSID_VERSION_V530,
+};
+
+static const struct of_device_id cam_tfe_csid530_dt_match[] = {
+	{
+		.compatible = "qcom,csid530",
+		.data = &cam_tfe_csid530_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_tfe_csid530_dt_match);
+
+static struct platform_driver cam_tfe_csid530_driver = {
+	.probe = cam_tfe_csid_probe,
+	.remove = cam_tfe_csid_remove,
+	.driver = {
+		.name = CAM_TFE_CSID_DRV_NAME,
+		.of_match_table = cam_tfe_csid530_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+int cam_tfe_csid530_init_module(void)
+{
+	return platform_driver_register(&cam_tfe_csid530_driver);
+}
+
+void cam_tfe_csid530_exit_module(void)
+{
+	platform_driver_unregister(&cam_tfe_csid530_driver);
+}
+
+MODULE_DESCRIPTION("CAM TFE_CSID530 driver");
+MODULE_LICENSE("GPL v2");

+ 225 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid530.h

@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_CSID_530_H_
+#define _CAM_TFE_CSID_530_H_
+
+#include "cam_tfe_csid_core.h"
+
+static struct cam_tfe_csid_pxl_reg_offset  cam_tfe_csid_530_ipp_reg_offset = {
+	.csid_pxl_irq_status_addr            = 0x30,
+	.csid_pxl_irq_mask_addr              = 0x34,
+	.csid_pxl_irq_clear_addr             = 0x38,
+	.csid_pxl_irq_set_addr               = 0x3c,
+
+	.csid_pxl_cfg0_addr                  = 0x200,
+	.csid_pxl_cfg1_addr                  = 0x204,
+	.csid_pxl_ctrl_addr                  = 0x208,
+	.csid_pxl_hcrop_addr                 = 0x21c,
+	.csid_pxl_vcrop_addr                 = 0x220,
+	.csid_pxl_rst_strobes_addr           = 0x240,
+	.csid_pxl_status_addr                = 0x254,
+	.csid_pxl_misr_val_addr              = 0x258,
+	.csid_pxl_timestamp_curr0_sof_addr   = 0x290,
+	.csid_pxl_timestamp_curr1_sof_addr   = 0x294,
+	.csid_pxl_timestamp_perv0_sof_addr   = 0x298,
+	.csid_pxl_timestamp_perv1_sof_addr   = 0x29c,
+	.csid_pxl_timestamp_curr0_eof_addr   = 0x2a0,
+	.csid_pxl_timestamp_curr1_eof_addr   = 0x2a4,
+	.csid_pxl_timestamp_perv0_eof_addr   = 0x2a8,
+	.csid_pxl_timestamp_perv1_eof_addr   = 0x2ac,
+	.csid_pxl_err_recovery_cfg0_addr     = 0x2d0,
+	.csid_pxl_err_recovery_cfg1_addr     = 0x2d4,
+	.csid_pxl_err_recovery_cfg2_addr     = 0x2d8,
+	/* configurations */
+	.pix_store_en_shift_val              = 7,
+	.early_eof_en_shift_val              = 29,
+	.halt_master_sel_shift               = 4,
+	.halt_mode_shift                     = 2,
+	.halt_master_sel_master_val          = 3,
+	.halt_master_sel_slave_val           = 0,
+};
+
+static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_530_rdi_0_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_err_recovery_cfg0_addr          = 0x3b0,
+	.csid_rdi_err_recovery_cfg1_addr          = 0x3b4,
+	.csid_rdi_err_recovery_cfg2_addr          = 0x3b8,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_530_rdi_1_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_err_recovery_cfg0_addr          = 0x4b0,
+	.csid_rdi_err_recovery_cfg1_addr          = 0x4b4,
+	.csid_rdi_err_recovery_cfg2_addr          = 0x4b8,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_530_rdi_2_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_err_recovery_cfg0_addr          = 0x5b0,
+	.csid_rdi_err_recovery_cfg1_addr          = 0x5b4,
+	.csid_rdi_err_recovery_cfg2_addr          = 0x5b8,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static struct cam_tfe_csid_csi2_rx_reg_offset
+	cam_tfe_csid_530_csi2_reg_offset = {
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_hdr_addr      = 0x13c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_capture_long_pkt_en_shift               = 0,
+	.csi2_capture_short_pkt_en_shift              = 1,
+	.csi2_capture_cphy_pkt_en_shift               = 2,
+	.csi2_capture_long_pkt_dt_shift               = 4,
+	.csi2_capture_long_pkt_vc_shift               = 10,
+	.csi2_capture_short_pkt_vc_shift              = 12,
+	.csi2_capture_cphy_pkt_dt_shift               = 14,
+	.csi2_capture_cphy_pkt_vc_shift               = 20,
+	.csi2_rx_phy_num_mask                         = 0x3,
+	.csi2_rx_long_pkt_hdr_rst_stb_shift           = 0x1,
+	.csi2_rx_short_pkt_hdr_rst_stb_shift          = 0x2,
+};
+
+static struct cam_tfe_csid_common_reg_offset
+	cam_tfe_csid_530_cmn_reg_offset = {
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 5,
+	.minor_version                                = 3,
+	.version_incr                                 = 0,
+	.num_rdis                                     = 3,
+	.num_pix                                      = 1,
+	.csid_reg_rst_stb                             = 1,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.ipp_path_rst_stb_all                         = 0x17,
+	.rdi_path_rst_stb_all                         = 0x97,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x3FFFF,
+	.rdi_irq_mask_all                             = 0x3FFFF,
+	.top_tfe2_pix_pipe_fuse_reg                   = 0xFE4,
+	.top_tfe2_fuse_reg                            = 0xFE8,
+};
+
+static struct cam_tfe_csid_reg_offset cam_tfe_csid_530_reg_offset = {
+	.cmn_reg          = &cam_tfe_csid_530_cmn_reg_offset,
+	.csi2_reg         = &cam_tfe_csid_530_csi2_reg_offset,
+	.ipp_reg          = &cam_tfe_csid_530_ipp_reg_offset,
+	.rdi_reg = {
+		&cam_tfe_csid_530_rdi_0_reg_offset,
+		&cam_tfe_csid_530_rdi_1_reg_offset,
+		&cam_tfe_csid_530_rdi_2_reg_offset,
+		},
+};
+
+int cam_tfe_csid530_init_module(void);
+void cam_tfe_csid530_exit_module(void);
+
+#endif /*_CAM_TFE_CSID_530_H_ */

+ 2824 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c

@@ -0,0 +1,2824 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <media/cam_tfe.h>
+#include <media/cam_defs.h>
+
+#include "cam_tfe_csid_core.h"
+#include "cam_isp_hw.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+#include "cam_isp_hw_mgr_intf.h"
+
+/* Timeout value in msec */
+#define TFE_CSID_TIMEOUT                               1000
+
+/* Timeout values in usec */
+#define CAM_TFE_CSID_TIMEOUT_SLEEP_US                  1000
+#define CAM_TFE_CSID_TIMEOUT_ALL_US                    100000
+
+/*
+ * Constant Factors needed to change QTimer ticks to nanoseconds
+ * QTimer Freq = 19.2 MHz
+ * Time(us) = ticks/19.2
+ * Time(ns) = ticks/19.2 * 1000
+ */
+#define CAM_TFE_CSID_QTIMER_MUL_FACTOR                 10000
+#define CAM_TFE_CSID_QTIMER_DIV_FACTOR                 192
+
+/* Max number of sof irq's triggered in case of SOF freeze */
+#define CAM_TFE_CSID_IRQ_SOF_DEBUG_CNT_MAX 12
+
+/* Max CSI Rx irq error count threshold value */
+#define CAM_TFE_CSID_MAX_IRQ_ERROR_COUNT               5
+
+static int cam_tfe_csid_is_ipp_format_supported(
+	uint32_t in_format)
+{
+	int rc = -EINVAL;
+
+	switch (in_format) {
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+		rc = 0;
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+static int cam_tfe_csid_get_format_rdi(
+	uint32_t in_format, uint32_t out_format,
+	uint32_t *decode_fmt, uint32_t *plain_fmt)
+{
+	int rc = 0;
+
+	switch (in_format) {
+	case CAM_FORMAT_MIPI_RAW_6:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_6:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN8:
+			*decode_fmt = 0x0;
+			*plain_fmt = 0x0;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_8:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_PLAIN128:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN8:
+			*decode_fmt = 0x1;
+			*plain_fmt = 0x0;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_10:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_PLAIN128:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN16_10:
+			*decode_fmt = 0x2;
+			*plain_fmt = 0x1;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_12:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_12:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN16_12:
+			*decode_fmt = 0x3;
+			*plain_fmt = 0x1;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_14:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_14:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN16_14:
+			*decode_fmt = 0x4;
+			*plain_fmt = 0x1;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_16:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_16:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN16_16:
+			*decode_fmt = 0x5;
+			*plain_fmt = 0x1;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "Unsupported format pair in %d out %d",
+			in_format, out_format);
+
+	return rc;
+}
+
+static int cam_tfe_csid_get_format_ipp(
+	uint32_t in_format,
+	uint32_t *decode_fmt, uint32_t *plain_fmt)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_ISP, "input format:%d",
+		 in_format);
+
+	switch (in_format) {
+	case CAM_FORMAT_MIPI_RAW_6:
+		*decode_fmt  = 0;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_8:
+		*decode_fmt  = 0x1;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_10:
+		*decode_fmt  = 0x2;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_12:
+		*decode_fmt  = 0x3;
+		*plain_fmt = 0x1;
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Unsupported format %d",
+			in_format);
+		rc = -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "decode_fmt:%d plain_fmt:%d",
+		 *decode_fmt, *plain_fmt);
+
+	return rc;
+}
+
+static int cam_tfe_csid_cid_get(struct cam_tfe_csid_hw *csid_hw,
+	int32_t vc, uint32_t dt, uint32_t *cid)
+{
+	uint32_t  i = 0;
+
+	/* Return already reserved CID if the VC/DT matches */
+	for (i = 0; i < CAM_TFE_CSID_CID_MAX; i++) {
+		if (csid_hw->cid_res[i].cnt >= 1) {
+			if (csid_hw->cid_res[i].vc == vc &&
+				csid_hw->cid_res[i].dt == dt) {
+				csid_hw->cid_res[i].cnt++;
+				*cid = i;
+				CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated",
+					csid_hw->hw_intf->hw_idx, i);
+				return 0;
+			}
+		}
+	}
+
+	for (i = 0; i < CAM_TFE_CSID_CID_MAX; i++) {
+		if (!csid_hw->cid_res[i].cnt) {
+			csid_hw->cid_res[i].vc  = vc;
+			csid_hw->cid_res[i].dt  = dt;
+			csid_hw->cid_res[i].cnt = 1;
+			*cid = i;
+			CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated",
+				csid_hw->hw_intf->hw_idx, i);
+			return 0;
+		}
+	}
+
+	CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Free cid is not available",
+		 csid_hw->hw_intf->hw_idx);
+	/* Dump CID values */
+	for (i = 0; i < CAM_TFE_CSID_CID_MAX; i++) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CID:%d vc:%d dt:%d cnt:%d",
+			csid_hw->hw_intf->hw_idx, i, csid_hw->cid_res[i].vc,
+			csid_hw->cid_res[i].dt, csid_hw->cid_res[i].cnt);
+	}
+	return -EINVAL;
+}
+
+static int cam_tfe_csid_global_reset(struct cam_tfe_csid_hw *csid_hw)
+{
+	struct cam_hw_soc_info                *soc_info;
+	const struct cam_tfe_csid_reg_offset  *csid_reg;
+	int rc = 0;
+	uint32_t val = 0, i;
+	uint32_t status;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d Csid reset", csid_hw->hw_intf->hw_idx);
+
+	/* Mask all interrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_hw->pxl_pipe_enable)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_hw->pxl_pipe_enable)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
+
+	for (i = 0 ; i < csid_reg->cmn_reg->num_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	/* perform the top CSID HW registers reset */
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr,
+			status, (status & 0x1) == 0x1,
+		CAM_TFE_CSID_TIMEOUT_SLEEP_US, CAM_TFE_CSID_TIMEOUT_ALL_US);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d",
+			  csid_hw->hw_intf->hw_idx, rc);
+		rc = -ETIMEDOUT;
+	}
+
+	/* perform the SW registers reset */
+	reinit_completion(&csid_hw->csid_top_complete);
+	cam_io_w_mb(csid_reg->cmn_reg->csid_reg_rst_stb,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
+		msecs_to_jiffies(TFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d soft reg reset fail rc = %d",
+			 csid_hw->hw_intf->hw_idx, rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	} else
+		rc = 0;
+
+	usleep_range(3000, 3010);
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+	if (val != 0)
+		CAM_ERR(CAM_ISP, "CSID:%d IRQ value after reset rc = %d",
+			csid_hw->hw_intf->hw_idx, val);
+	csid_hw->error_irq_count = 0;
+
+	return rc;
+}
+
+static int cam_tfe_csid_path_reset(struct cam_tfe_csid_hw *csid_hw,
+	struct cam_tfe_csid_reset_cfg_args  *reset)
+{
+	int rc = 0;
+	struct cam_hw_soc_info                    *soc_info;
+	struct cam_isp_resource_node              *res;
+	const struct cam_tfe_csid_reg_offset      *csid_reg;
+	uint32_t  reset_strb_addr, reset_strb_val, val, id;
+	struct completion  *complete;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	res      = reset->node_res;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid hw state :%d",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) {
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d resource:%d",
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) {
+		if (!csid_reg->ipp_reg) {
+			CAM_ERR(CAM_ISP, "CSID:%d IPP not supported :%d",
+				 csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr = csid_reg->ipp_reg->csid_pxl_rst_strobes_addr;
+		complete = &csid_hw->csid_ipp_complete;
+		reset_strb_val = csid_reg->cmn_reg->ipp_path_rst_stb_all;
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+		val |= TFE_CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			 csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+	} else {
+		id = res->res_id;
+		if (!csid_reg->rdi_reg[id]) {
+			CAM_ERR(CAM_ISP, "CSID:%d RDI res not supported :%d",
+				 csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr =
+			csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr;
+		complete =
+			&csid_hw->csid_rdin_complete[id];
+		reset_strb_val = csid_reg->cmn_reg->rdi_path_rst_stb_all;
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val |= TFE_CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+
+	reinit_completion(complete);
+
+	/* Reset the corresponding tfe csid path */
+	cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
+				reset_strb_addr);
+
+	rc = wait_for_completion_timeout(complete,
+		msecs_to_jiffies(TFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d Res id %d fail rc = %d",
+			 csid_hw->hw_intf->hw_idx,
+			res->res_id,  rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	}
+
+end:
+	return rc;
+}
+
+static int cam_tfe_csid_cid_reserve(struct cam_tfe_csid_hw *csid_hw,
+	struct cam_tfe_csid_hw_reserve_resource_args  *cid_reserv,
+	uint32_t  *cid_value)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_ISP,
+		"CSID:%d res_id:0x%x Lane type:%d lane_num:%d dt:%d vc:%d",
+		csid_hw->hw_intf->hw_idx,
+		cid_reserv->in_port->res_id,
+		cid_reserv->in_port->lane_type,
+		cid_reserv->in_port->lane_num,
+		cid_reserv->in_port->dt,
+		cid_reserv->in_port->vc);
+
+	if (cid_reserv->in_port->res_id >= CAM_ISP_TFE_IN_RES_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d  Invalid phy sel %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d  Invalid lane type %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((cid_reserv->in_port->lane_type ==  CAM_ISP_LANE_TYPE_DPHY &&
+		cid_reserv->in_port->lane_num > 4)) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid lane num %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
+		cid_reserv->in_port->lane_num > 3) {
+		CAM_ERR(CAM_ISP, " CSID:%d Invalid lane type %d & num %d",
+			 csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* CSID  CSI2 v1.1 supports 4 vc  */
+	if (cid_reserv->in_port->dt > 0x3f ||
+		cid_reserv->in_port->vc > 0x3) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->vc, cid_reserv->in_port->dt);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
+		CAM_ERR(CAM_ISP,
+			"CSID%d reserve cnt reached max",
+			csid_hw->hw_intf->hw_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "Reserve_cnt %u", csid_hw->csi2_reserve_cnt);
+
+	if (csid_hw->csi2_reserve_cnt) {
+		/* current configure res type should match requested res type */
+		if (csid_hw->in_res_id != cid_reserv->in_port->res_id) {
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (csid_hw->csi2_rx_cfg.lane_cfg !=
+			cid_reserv->in_port->lane_cfg  ||
+			csid_hw->csi2_rx_cfg.lane_type !=
+			cid_reserv->in_port->lane_type ||
+			csid_hw->csi2_rx_cfg.lane_num !=
+			cid_reserv->in_port->lane_num) {
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+	rc = cam_tfe_csid_cid_get(csid_hw,
+		cid_reserv->in_port->vc,
+		cid_reserv->in_port->dt,
+		cid_value);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CSID:%d CID Reserve failed res_id %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->res_id);
+		goto end;
+	}
+
+	if (!csid_hw->csi2_reserve_cnt) {
+		csid_hw->in_res_id = cid_reserv->in_port->res_id;
+
+		csid_hw->csi2_rx_cfg.lane_cfg =
+			cid_reserv->in_port->lane_cfg;
+		csid_hw->csi2_rx_cfg.lane_type =
+			cid_reserv->in_port->lane_type;
+		csid_hw->csi2_rx_cfg.lane_num =
+			cid_reserv->in_port->lane_num;
+
+		if (cid_reserv->in_port->res_id != CAM_ISP_TFE_IN_RES_TPG)
+			csid_hw->csi2_rx_cfg.phy_sel =
+				(cid_reserv->in_port->res_id & 0xFF) - 1;
+		else
+			csid_hw->csi2_rx_cfg.phy_sel =
+				(cid_reserv->phy_sel & 0xFF) - 1;
+	}
+
+	csid_hw->csi2_reserve_cnt++;
+	CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired reserv cnt:%d",
+		csid_hw->hw_intf->hw_idx, *cid_value,
+		csid_hw->csi2_reserve_cnt);
+
+end:
+	return rc;
+}
+
+static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
+	struct cam_tfe_csid_hw_reserve_resource_args  *reserve)
+{
+	int rc = 0;
+	struct cam_tfe_csid_path_cfg    *path_data;
+	struct cam_isp_resource_node    *res;
+	uint32_t          cid_value;
+
+	/* CSID  CSI2 v2.0 supports 4 vc */
+	if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x3 ||
+		(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d",
+			 csid_hw->hw_intf->hw_idx,
+			reserve->in_port->vc, reserve->in_port->dt,
+			reserve->sync_mode);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (reserve->res_id) {
+	case CAM_TFE_CSID_PATH_RES_IPP:
+		if (csid_hw->ipp_res.res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CAM_DBG(CAM_ISP,
+				"CSID:%d IPP resource not available %d",
+				csid_hw->hw_intf->hw_idx,
+				csid_hw->ipp_res.res_state);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cam_tfe_csid_is_ipp_format_supported(
+				reserve->in_port->format)) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d res id:%d un support format %d",
+				csid_hw->hw_intf->hw_idx, reserve->res_id,
+				reserve->in_port->format);
+			rc = -EINVAL;
+			goto end;
+		}
+		rc = cam_tfe_csid_cid_reserve(csid_hw, reserve, &cid_value);
+		if (rc)
+			goto end;
+
+		/* assign the IPP resource */
+		res = &csid_hw->ipp_res;
+		CAM_DBG(CAM_ISP,
+			"CSID:%d IPP resource:%d acquired successfully",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+
+		break;
+
+	case CAM_TFE_CSID_PATH_RES_RDI_0:
+	case CAM_TFE_CSID_PATH_RES_RDI_1:
+	case CAM_TFE_CSID_PATH_RES_RDI_2:
+		if (csid_hw->rdi_res[reserve->res_id].res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d RDI:%d resource not available %d",
+				csid_hw->hw_intf->hw_idx,
+				reserve->res_id,
+				csid_hw->rdi_res[reserve->res_id].res_state);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		rc = cam_tfe_csid_cid_reserve(csid_hw, reserve, &cid_value);
+		if (rc)
+			goto end;
+
+		res = &csid_hw->rdi_res[reserve->res_id];
+		CAM_DBG(CAM_ISP,
+			"CSID:%d RDI resource:%d acquire success",
+			csid_hw->hw_intf->hw_idx,
+			res->res_id);
+
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res id:%d",
+			csid_hw->hw_intf->hw_idx, reserve->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	path_data = (struct cam_tfe_csid_path_cfg   *)res->res_priv;
+
+	CAM_DBG(CAM_ISP, "sensor width:%d height:%d fps:%d vbi:%d hbi:%d",
+		reserve->in_port->sensor_width,
+		reserve->in_port->sensor_height,
+		reserve->in_port->sensor_fps,
+		reserve->in_port->sensor_vbi,
+		reserve->in_port->sensor_hbi);
+	path_data->sensor_width = reserve->in_port->sensor_width;
+	path_data->sensor_height = reserve->in_port->sensor_height;
+	path_data->sensor_fps  = reserve->in_port->sensor_fps;
+	path_data->sensor_hbi = reserve->in_port->sensor_vbi;
+	path_data->sensor_vbi = reserve->in_port->sensor_hbi;
+
+	path_data->cid = cid_value;
+	path_data->in_format = reserve->in_port->format;
+	path_data->out_format = reserve->out_port->format;
+	path_data->sync_mode = reserve->sync_mode;
+	path_data->height  = reserve->in_port->height;
+	path_data->start_line = reserve->in_port->line_start;
+	path_data->end_line = reserve->in_port->line_end;
+
+	csid_hw->event_cb = reserve->event_cb;
+	csid_hw->event_cb_priv = reserve->event_cb_prv;
+
+	/* Enable crop only for ipp */
+	if (reserve->res_id == CAM_TFE_CSID_PATH_RES_IPP)
+		path_data->crop_enable = true;
+
+	CAM_DBG(CAM_ISP,
+		"Res id: %d height:%d line_start %d line_end %d crop_en %d",
+		reserve->res_id, reserve->in_port->height,
+		reserve->in_port->line_start, reserve->in_port->line_end,
+		path_data->crop_enable);
+
+	path_data->dt = reserve->in_port->dt;
+	path_data->vc = reserve->in_port->vc;
+
+	if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		path_data->start_pixel = reserve->in_port->left_start;
+		path_data->end_pixel = reserve->in_port->left_end;
+		path_data->width  = reserve->in_port->left_width;
+		CAM_DBG(CAM_ISP, "CSID:%d master:startpixel 0x%x endpixel:0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
+			path_data->end_pixel);
+		CAM_DBG(CAM_ISP, "CSID:%d master:line start:0x%x line end:0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_line,
+			path_data->end_line);
+	} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+		path_data->master_idx = reserve->master_idx;
+		CAM_DBG(CAM_ISP, "CSID:%d master_idx=%d",
+			csid_hw->hw_intf->hw_idx, path_data->master_idx);
+		path_data->start_pixel = reserve->in_port->right_start;
+		path_data->end_pixel = reserve->in_port->right_end;
+		path_data->width  = reserve->in_port->right_width;
+		CAM_DBG(CAM_ISP, "CSID:%d slave:start:0x%x end:0x%x width 0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
+			path_data->end_pixel, path_data->width);
+		CAM_DBG(CAM_ISP, "CSID:%d slave:line start:0x%x line end:0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_line,
+			path_data->end_line);
+	} else {
+		path_data->width  = reserve->in_port->left_width;
+		path_data->start_pixel = reserve->in_port->left_start;
+		path_data->end_pixel = reserve->in_port->left_end;
+		CAM_DBG(CAM_ISP, "Res id: %d left width %d start: %d stop:%d",
+			reserve->res_id, reserve->in_port->left_width,
+			reserve->in_port->left_start,
+			reserve->in_port->left_end);
+	}
+
+	CAM_DBG(CAM_ISP, "Res %d width %d height %d", reserve->res_id,
+		path_data->width, path_data->height);
+	reserve->node_res = res;
+
+end:
+	return rc;
+}
+
+static int cam_tfe_csid_enable_csi2(
+	struct cam_tfe_csid_hw          *csid_hw)
+{
+	const struct cam_tfe_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info                     *soc_info;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CAM_DBG(CAM_ISP, "CSID:%d config csi2 rx",
+		csid_hw->hw_intf->hw_idx);
+
+	/* rx cfg0 */
+	val = 0;
+	val = (csid_hw->csi2_rx_cfg.lane_num - 1)  |
+		(csid_hw->csi2_rx_cfg.lane_cfg << 4) |
+		(csid_hw->csi2_rx_cfg.lane_type << 24);
+	val |= (csid_hw->csi2_rx_cfg.phy_sel &
+		csid_reg->csi2_reg->csi2_rx_phy_num_mask) << 20;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+
+	/* rx cfg1 */
+	val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
+
+	/* enable packet ecc correction */
+	val |= 1;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	/* Enable the CSI2 rx inerrupts */
+	val = TFE_CSID_CSI2_RX_INFO_RST_DONE |
+		TFE_CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
+		TFE_CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
+		TFE_CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
+		TFE_CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
+		TFE_CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
+		TFE_CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
+		TFE_CSID_CSI2_RX_ERROR_CRC |
+		TFE_CSID_CSI2_RX_ERROR_ECC |
+		TFE_CSID_CSI2_RX_ERROR_MMAPPED_VC_DT |
+		TFE_CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW |
+		TFE_CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME |
+		TFE_CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
+
+	/* Enable the interrupt based on csid debug info set */
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOT_IRQ)
+		val |= TFE_CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED |
+			TFE_CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED |
+			TFE_CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED |
+			TFE_CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED;
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOT_IRQ)
+		val |= TFE_CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED |
+			TFE_CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED |
+			TFE_CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED |
+			TFE_CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED;
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+		val |= TFE_CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED;
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+		val |= TFE_CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED;
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+		val |= TFE_CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	return 0;
+}
+
+static int cam_tfe_csid_disable_csi2(
+	struct cam_tfe_csid_hw          *csid_hw)
+{
+	const struct cam_tfe_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info                    *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CAM_DBG(CAM_ISP, "CSID:%d Disable csi2 rx",
+		csid_hw->hw_intf->hw_idx);
+
+	/* Disable the CSI2 rx inerrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	/* Reset the Rx CFG registers */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	return 0;
+}
+
+static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw  *csid_hw)
+{
+	int rc = 0;
+	const struct cam_tfe_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t i, val, clk_lvl;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* overflow check before increment */
+	if (csid_hw->hw_info->open_count == UINT_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+			csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+
+	/* Increment ref Count */
+	csid_hw->hw_info->open_count++;
+	if (csid_hw->hw_info->open_count > 1) {
+		CAM_DBG(CAM_ISP, "CSID hw has already been enabled");
+		return rc;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d init CSID HW",
+		csid_hw->hw_intf->hw_idx);
+
+	rc = cam_soc_util_get_clk_level(soc_info, csid_hw->clk_rate,
+		soc_info->src_clk_idx, &clk_lvl);
+	CAM_DBG(CAM_ISP, "CSID clock lvl %u", clk_lvl);
+
+	rc = cam_tfe_csid_enable_soc_resources(soc_info, clk_lvl);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed",
+			csid_hw->hw_intf->hw_idx);
+		goto err;
+	}
+
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
+	/* Disable the top IRQ interrupt */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+	/* Reset CSID top */
+	rc = cam_tfe_csid_global_reset(csid_hw);
+	if (rc)
+		goto disable_soc;
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_hw->pxl_pipe_enable)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->cmn_reg->csid_hw_version_addr);
+	CAM_DBG(CAM_ISP, "CSID:%d CSID HW version: 0x%x",
+		csid_hw->hw_intf->hw_idx, val);
+
+	/* enable the csi2 rx */
+	rc = cam_tfe_csid_enable_csi2(csid_hw);
+	if (rc)
+		goto disable_soc;
+
+	return rc;
+
+disable_soc:
+	cam_tfe_csid_disable_soc_resources(soc_info);
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+err:
+	csid_hw->hw_info->open_count--;
+	return rc;
+}
+
+static int cam_tfe_csid_disable_hw(struct cam_tfe_csid_hw *csid_hw)
+{
+	int rc = -EINVAL;
+	struct cam_hw_soc_info                   *soc_info;
+	const struct cam_tfe_csid_reg_offset     *csid_reg;
+	unsigned long                             flags;
+
+	/* Check for refcount */
+	if (!csid_hw->hw_info->open_count) {
+		CAM_WARN(CAM_ISP, "Unbalanced disable_hw");
+		return rc;
+	}
+
+	/* Decrement ref Count */
+	csid_hw->hw_info->open_count--;
+
+	if (csid_hw->hw_info->open_count) {
+		rc = 0;
+		return rc;
+	}
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	/* Disable the csi2 */
+	cam_tfe_csid_disable_csi2(csid_hw);
+
+	CAM_DBG(CAM_ISP, "%s:Calling Global Reset", __func__);
+	cam_tfe_csid_global_reset(csid_hw);
+	CAM_DBG(CAM_ISP, "%s:Global Reset Done", __func__);
+
+	CAM_DBG(CAM_ISP, "CSID:%d De-init CSID HW",
+		csid_hw->hw_intf->hw_idx);
+
+	/* Disable the top IRQ interrupt */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	rc = cam_tfe_csid_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
+			csid_hw->hw_intf->hw_idx);
+
+	spin_lock_irqsave(&csid_hw->spin_lock, flags);
+	csid_hw->device_enabled = 0;
+	spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	csid_hw->error_irq_count = 0;
+
+	return rc;
+}
+
+static int cam_tfe_csid_init_config_pxl_path(
+	struct cam_tfe_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_tfe_csid_path_cfg             *path_data;
+	const struct cam_tfe_csid_reg_offset     *csid_reg;
+	struct cam_hw_soc_info                   *soc_info;
+	const struct cam_tfe_csid_pxl_reg_offset *pxl_reg = NULL;
+	uint32_t decode_format = 0, plain_format = 0, val = 0;
+
+	path_data = (struct cam_tfe_csid_path_cfg  *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	pxl_reg = csid_reg->ipp_reg;
+	if (!pxl_reg) {
+		CAM_ERR(CAM_ISP, "CSID:%d IPP :%d is not supported on HW",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "Config IPP Path");
+	rc = cam_tfe_csid_get_format_ipp(path_data->in_format,
+		&decode_format, &plain_format);
+	if (rc)
+		return rc;
+
+	/*
+	 * configure Pxl path and enable the time stamp capture.
+	 * enable the HW measrurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(decode_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(path_data->crop_enable <<
+		csid_reg->cmn_reg->crop_h_en_shift_val) |
+		(path_data->crop_enable <<
+		csid_reg->cmn_reg->crop_v_en_shift_val) |
+		(1 << 1);
+
+	val |= (1 << pxl_reg->pix_store_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg0_addr);
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg1_addr);
+
+	/* select the post irq sub sample strobe for time stamp capture */
+	val |= TFE_CSID_TIMESTAMP_STB_POST_IRQ;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg1_addr);
+
+	if (path_data->crop_enable) {
+		val = (((path_data->end_pixel & 0xFFFF) <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_pixel & 0xFFFF));
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_hcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
+
+		val = (((path_data->end_line & 0xFFFF) <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_line & 0xFFFF));
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_vcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
+
+		/* Enable generating early eof strobe based on crop config */
+		if (!(csid_hw->csid_debug & TFE_CSID_DEBUG_DISABLE_EARLY_EOF)) {
+			val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				pxl_reg->csid_pxl_cfg0_addr);
+			val |= (1 << pxl_reg->early_eof_en_shift_val);
+			cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+				pxl_reg->csid_pxl_cfg0_addr);
+		}
+	}
+
+	/* Enable the Pxl path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg0_addr);
+
+	/* Enable Error Detection Overflow ctrl mode: 2 -> Detect overflow */
+	val = 0x9;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_err_recovery_cfg0_addr);
+
+	/* configure the rx packet capture based on csid debug set */
+	val = 0;
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+		val = ((1 <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift));
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr);
+	CAM_DBG(CAM_ISP, "rx capture control value 0x%x", val);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_tfe_csid_deinit_pxl_path(
+	struct cam_tfe_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	const struct cam_tfe_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info                    *soc_info;
+	const struct cam_tfe_csid_pxl_reg_offset  *pxl_reg = NULL;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	pxl_reg = csid_reg->ipp_reg;
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		CAM_ERR(CAM_ISP,
+			"CSID:%d IPP Res type %d res_id:%d in wrong state %d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+	}
+
+	if (!pxl_reg) {
+		CAM_ERR(CAM_ISP, "CSID:%d IPP %d is not supported on HW",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Disable Error Recovery */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_err_recovery_cfg0_addr);
+
+end:
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_tfe_csid_enable_pxl_path(
+	struct cam_tfe_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	const struct cam_tfe_csid_reg_offset     *csid_reg;
+	struct cam_hw_soc_info                   *soc_info;
+	struct cam_tfe_csid_path_cfg             *path_data;
+	const struct cam_tfe_csid_pxl_reg_offset *pxl_reg = NULL;
+	uint32_t                                  val = 0;
+
+	path_data = (struct cam_tfe_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	pxl_reg = csid_reg->ipp_reg;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		CAM_ERR(CAM_ISP,
+			"CSID:%d IPP path res type:%d res_id:%d Invalid state%d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	if (!pxl_reg) {
+		CAM_ERR(CAM_ISP, "CSID:%d IPP resid: %d not supported on HW",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "Enable IPP path");
+
+	/* Set master or slave path */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
+		/* Set halt mode as master */
+		val = (TFE_CSID_HALT_MODE_MASTER  <<
+			pxl_reg->halt_mode_shift) |
+			(pxl_reg->halt_master_sel_master_val <<
+			pxl_reg->halt_master_sel_shift);
+	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		/* Set halt mode as slave and set master idx */
+		val = (TFE_CSID_HALT_MODE_SLAVE << pxl_reg->halt_mode_shift);
+	else
+		/* Default is internal halt mode */
+		val = 0;
+
+	/*
+	 * Resume at frame boundary if Master or No Sync.
+	 * Slave will get resume command from Master.
+	 */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
+		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
+		val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_ctrl_addr);
+
+	CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
+
+	/* Enable the required pxl path interrupts */
+	val = TFE_CSID_PATH_INFO_RST_DONE |
+		TFE_CSID_PATH_ERROR_FIFO_OVERFLOW |
+		TFE_CSID_PATH_IPP_ERROR_CCIF_VIOLATION |
+		TFE_CSID_PATH_IPP_OVERFLOW_IRQ;
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)
+		val |= TFE_CSID_PATH_INFO_INPUT_SOF;
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)
+		val |= TFE_CSID_PATH_INFO_INPUT_EOF;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_irq_mask_addr);
+
+	CAM_DBG(CAM_ISP, "Enable IPP IRQ mask 0x%x", val);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+static int cam_tfe_csid_disable_pxl_path(
+	struct cam_tfe_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_tfe_csid_halt_cmd       stop_cmd)
+{
+	int rc = 0;
+	uint32_t val = 0;
+	const struct cam_tfe_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info                     *soc_info;
+	struct cam_tfe_csid_path_cfg               *path_data;
+	const struct cam_tfe_csid_pxl_reg_offset   *pxl_reg;
+
+	path_data = (struct cam_tfe_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) {
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+			csid_hw->hw_intf->hw_idx, res->res_id, res->res_state);
+		return rc;
+	}
+
+	pxl_reg = csid_reg->ipp_reg;
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CAM_DBG(CAM_ISP, "CSID:%d IPP path Res:%d Invalid state%d",
+			csid_hw->hw_intf->hw_idx, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	if (!pxl_reg) {
+		CAM_ERR(CAM_ISP, "CSID:%d IPP %d is not supported on HW",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_TFE_CSID_HALT_IMMEDIATELY) {
+		CAM_ERR(CAM_ISP,
+			"CSID:%d IPP path un supported stop command:%d",
+			csid_hw->hw_intf->hw_idx, stop_cmd);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d IPP path",
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_irq_mask_addr);
+
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
+		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
+		/* configure Halt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_ctrl_addr);
+		val &= ~0x3;
+		val |= stop_cmd;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_ctrl_addr);
+	}
+
+	return rc;
+}
+
+static int cam_tfe_csid_init_config_rdi_path(
+	struct cam_tfe_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_tfe_csid_path_cfg           *path_data;
+	const struct cam_tfe_csid_reg_offset   *csid_reg;
+	struct cam_hw_soc_info                 *soc_info;
+	uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
+
+	path_data = (struct cam_tfe_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	id = res->res_id;
+	if (!csid_reg->rdi_reg[id]) {
+		CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW",
+			 csid_hw->hw_intf->hw_idx, id);
+		return -EINVAL;
+	}
+
+	rc = cam_tfe_csid_get_format_rdi(path_data->in_format,
+		path_data->out_format, &path_format, &plain_fmt);
+	if (rc)
+		return rc;
+
+	/*
+	 * RDI path config and enable the time stamp capture
+	 * Enable the measurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(path_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
+		(1 << 2) | 1;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	/* select the post irq sub sample strobe for time stamp capture */
+	cam_io_w_mb(TFE_CSID_TIMESTAMP_STB_POST_IRQ,
+		soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr);
+
+	/* Enable Error Detection, Overflow ctrl mode: 2 -> Detect overflow */
+	val = 0x9;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_err_recovery_cfg0_addr);
+
+	/* Configure the halt mode */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the RPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	/* configure the rx packet capture based on csid debug set */
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+		val = ((1 <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift));
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_tfe_csid_deinit_rdi_path(
+	struct cam_tfe_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	uint32_t id;
+	const struct cam_tfe_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info                    *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2 ||
+		res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		!csid_reg->rdi_reg[id]) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res id%d state:%d",
+			csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	/* Disable Error Recovery */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_err_recovery_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_tfe_csid_enable_rdi_path(
+	struct cam_tfe_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	const struct cam_tfe_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info                    *soc_info;
+	uint32_t id, val;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2 ||
+		!csid_reg->rdi_reg[id]) {
+		CAM_ERR(CAM_ISP,
+			"CSID:%d invalid res type:%d res_id:%d state%d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	/* resume at frame boundary */
+	cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the required RDI interrupts */
+	val = TFE_CSID_PATH_INFO_RST_DONE | TFE_CSID_PATH_ERROR_FIFO_OVERFLOW |
+		TFE_CSID_PATH_RDI_ERROR_CCIF_VIOLATION |
+		TFE_CSID_PATH_RDI_OVERFLOW_IRQ;
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)
+		val |= TFE_CSID_PATH_INFO_INPUT_SOF;
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)
+		val |= TFE_CSID_PATH_INFO_INPUT_EOF;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+static int cam_tfe_csid_disable_rdi_path(
+	struct cam_tfe_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_tfe_csid_halt_cmd                stop_cmd)
+{
+	int rc = 0;
+	uint32_t id, val = 0;
+	const struct cam_tfe_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info                     *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if ((res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2) ||
+		(!csid_reg->rdi_reg[res->res_id])) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID:%d Res:%d already in stopped state:%d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID:%d Res:%d Invalid res_state%d",
+			csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_TFE_CSID_HALT_IMMEDIATELY) {
+		CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d",
+			csid_hw->hw_intf->hw_idx, stop_cmd);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+
+	/* Halt the RDI path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+	val &= ~0x3;
+	val |= stop_cmd;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	return rc;
+}
+
+static int cam_tfe_csid_poll_stop_status(
+	struct cam_tfe_csid_hw          *csid_hw,
+	uint32_t                         res_mask)
+{
+	int rc = 0;
+	uint32_t csid_status_addr = 0, val = 0, res_id = 0;
+	const struct cam_tfe_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info                     *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	for (; res_id < CAM_TFE_CSID_PATH_RES_MAX; res_id++, res_mask >>= 1) {
+		if ((res_mask & 0x1) == 0)
+			continue;
+		val = 0;
+
+		if (res_id == CAM_TFE_CSID_PATH_RES_IPP) {
+			csid_status_addr =
+			csid_reg->ipp_reg->csid_pxl_status_addr;
+
+			if (csid_hw->ipp_res.res_state !=
+				CAM_ISP_RESOURCE_STATE_STREAMING)
+				continue;
+
+		} else {
+			csid_status_addr =
+				csid_reg->rdi_reg[res_id]->csid_rdi_status_addr;
+
+			if (csid_hw->rdi_res[res_id].res_state !=
+				CAM_ISP_RESOURCE_STATE_STREAMING)
+				continue;
+
+		}
+
+		CAM_DBG(CAM_ISP, "start polling CSID:%d res_id:%d",
+			csid_hw->hw_intf->hw_idx, res_id);
+
+		rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+			csid_status_addr, val, (val & 0x1) == 0x1,
+				CAM_TFE_CSID_TIMEOUT_SLEEP_US,
+				CAM_TFE_CSID_TIMEOUT_ALL_US);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "CSID:%d res:%d halt failed rc %d",
+				csid_hw->hw_intf->hw_idx, res_id, rc);
+			rc = -ETIMEDOUT;
+			break;
+		}
+		CAM_DBG(CAM_ISP, "End polling CSID:%d res_id:%d",
+			csid_hw->hw_intf->hw_idx, res_id);
+	}
+
+	return rc;
+}
+
+static int cam_tfe_csid_get_time_stamp(
+		struct cam_tfe_csid_hw   *csid_hw, void *cmd_args)
+{
+	struct cam_tfe_csid_get_time_stamp_args        *time_stamp;
+	struct cam_isp_resource_node               *res;
+	const struct cam_tfe_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info                     *soc_info;
+	const struct cam_tfe_csid_rdi_reg_offset   *rdi_reg;
+	struct timespec64 ts;
+	uint32_t  time_32, id;
+
+	time_stamp = (struct cam_tfe_csid_get_time_stamp_args  *)cmd_args;
+	res = time_stamp->node_res;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+		res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) {
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) {
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = (uint64_t) time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_timestamp_curr0_sof_addr);
+	} else {
+		id = res->res_id;
+		rdi_reg = csid_reg->rdi_reg[id];
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			rdi_reg->csid_rdi_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = (uint64_t) time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			rdi_reg->csid_rdi_timestamp_curr0_sof_addr);
+	}
+
+	time_stamp->time_stamp_val |= (uint64_t) time_32;
+	time_stamp->time_stamp_val = mul_u64_u32_div(
+		time_stamp->time_stamp_val,
+		CAM_TFE_CSID_QTIMER_MUL_FACTOR,
+		CAM_TFE_CSID_QTIMER_DIV_FACTOR);
+
+	ktime_get_boottime_ts64(&ts);
+	time_stamp->boot_timestamp = (uint64_t)((ts.tv_sec * 1000000000) +
+		ts.tv_nsec);
+
+	return 0;
+}
+
+static int cam_tfe_csid_set_csid_debug(struct cam_tfe_csid_hw   *csid_hw,
+	void *cmd_args)
+{
+	uint32_t  *csid_debug;
+
+	csid_debug = (uint32_t  *) cmd_args;
+	csid_hw->csid_debug = *csid_debug;
+	CAM_DBG(CAM_ISP, "CSID:%d set csid debug value:%d",
+		csid_hw->hw_intf->hw_idx, csid_hw->csid_debug);
+
+	return 0;
+}
+
+static int cam_tfe_csid_get_hw_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_tfe_csid_hw_caps           *hw_caps;
+	struct cam_tfe_csid_hw                *csid_hw;
+	struct cam_hw_info                    *csid_hw_info;
+	const struct cam_tfe_csid_reg_offset  *csid_reg;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	hw_caps = (struct cam_tfe_csid_hw_caps *) get_hw_cap_args;
+
+	hw_caps->num_rdis = csid_reg->cmn_reg->num_rdis;
+	hw_caps->num_pix = csid_hw->pxl_pipe_enable;
+	hw_caps->major_version = csid_reg->cmn_reg->major_version;
+	hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
+	hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
+
+	CAM_DBG(CAM_ISP,
+		"CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d",
+		csid_hw->hw_intf->hw_idx, hw_caps->num_rdis,
+		hw_caps->num_pix, hw_caps->major_version,
+		hw_caps->minor_version, hw_caps->version_incr);
+
+	return rc;
+}
+
+static int cam_tfe_csid_reset(void *hw_priv,
+	void *reset_args, uint32_t arg_size)
+{
+	struct cam_tfe_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_tfe_csid_reset_cfg_args  *reset;
+	int rc = 0;
+
+	if (!hw_priv || !reset_args || (arg_size !=
+		sizeof(struct cam_tfe_csid_reset_cfg_args))) {
+		CAM_ERR(CAM_ISP, "CSID:Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
+	reset   = (struct cam_tfe_csid_reset_cfg_args  *)reset_args;
+
+	switch (reset->reset_type) {
+	case CAM_TFE_CSID_RESET_GLOBAL:
+		rc = cam_tfe_csid_global_reset(csid_hw);
+		break;
+	case CAM_TFE_CSID_RESET_PATH:
+		rc = cam_tfe_csid_path_reset(csid_hw, reset);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:Invalid reset type :%d",
+			reset->reset_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_tfe_csid_reserve(void *hw_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_tfe_csid_hw                    *csid_hw;
+	struct cam_hw_info                        *csid_hw_info;
+	struct cam_tfe_csid_hw_reserve_resource_args  *reserv;
+
+	if (!hw_priv || !reserve_args || (arg_size !=
+		sizeof(struct cam_tfe_csid_hw_reserve_resource_args))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
+	reserv = (struct cam_tfe_csid_hw_reserve_resource_args  *)reserve_args;
+
+	if (reserv->res_type != CAM_ISP_RESOURCE_PIX_PATH) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type :%d",
+			csid_hw->hw_intf->hw_idx, reserv->res_type);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "res_type %d, CSID: %u",
+		reserv->res_type, csid_hw->hw_intf->hw_idx);
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	rc = cam_tfe_csid_path_reserve(csid_hw, reserv);
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_tfe_csid_release(void *hw_priv,
+	void *release_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_tfe_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_isp_resource_node    *res;
+	struct cam_tfe_csid_path_cfg    *path_data;
+
+	if (!hw_priv || !release_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)release_args;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_TFE_CSID_PATH_RES_MAX)) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE) ||
+		(res->res_state >= CAM_ISP_RESOURCE_STATE_STREAMING)) {
+		CAM_WARN(CAM_ISP,
+			"CSID:%d res type:%d Res %d in state %d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id,
+			res->res_state);
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d res type :%d Resource id:%d",
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	path_data = (struct cam_tfe_csid_path_cfg *)res->res_priv;
+	if (csid_hw->cid_res[path_data->cid].cnt)
+		csid_hw->cid_res[path_data->cid].cnt--;
+
+	if (csid_hw->csi2_reserve_cnt)
+		csid_hw->csi2_reserve_cnt--;
+
+	if (!csid_hw->csi2_reserve_cnt)
+		memset(&csid_hw->csi2_rx_cfg, 0,
+			sizeof(struct cam_tfe_csid_csi2_rx_cfg));
+
+	CAM_DBG(CAM_ISP, "CSID:%d res id :%d cnt:%d reserv cnt:%d",
+		csid_hw->hw_intf->hw_idx,
+		res->res_id, csid_hw->cid_res[path_data->cid].cnt,
+		csid_hw->csi2_reserve_cnt);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_tfe_csid_reset_retain_sw_reg(
+	struct cam_tfe_csid_hw *csid_hw)
+{
+	int rc = 0;
+	uint32_t status;
+	const struct cam_tfe_csid_reg_offset *csid_reg =
+		csid_hw->csid_info->csid_reg;
+	struct cam_hw_soc_info          *soc_info;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	/* clear the top interrupt first */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+	rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr,
+			status, (status & 0x1) == 0x1,
+		CAM_TFE_CSID_TIMEOUT_SLEEP_US, CAM_TFE_CSID_TIMEOUT_ALL_US);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d",
+			  csid_hw->hw_intf->hw_idx, rc);
+		rc = -ETIMEDOUT;
+	} else {
+		CAM_DBG(CAM_ISP, "CSID:%d hw reset completed %d",
+			csid_hw->hw_intf->hw_idx, rc);
+		rc = 0;
+	}
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	return rc;
+}
+
+static int cam_tfe_csid_init_hw(void *hw_priv,
+	void *init_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_tfe_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	const struct cam_tfe_csid_reg_offset   *csid_reg;
+	unsigned long                           flags;
+
+	if (!hw_priv || !init_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
+	res      = (struct cam_isp_resource_node *)init_args;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type state %d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		return -EINVAL;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res tpe:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
+		(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
+		CAM_ERR(CAM_ISP,
+			"CSID:%d res type:%d res_id:%dInvalid state %d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d",
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	/* Initialize the csid hardware */
+	rc = cam_tfe_csid_enable_hw(csid_hw);
+	if (rc)
+		goto end;
+
+	if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
+		rc = cam_tfe_csid_init_config_pxl_path(csid_hw, res);
+	else
+		rc = cam_tfe_csid_init_config_rdi_path(csid_hw, res);
+
+	rc = cam_tfe_csid_reset_retain_sw_reg(csid_hw);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP, "CSID: Failed in SW reset");
+
+	if (rc)
+		cam_tfe_csid_disable_hw(csid_hw);
+
+	spin_lock_irqsave(&csid_hw->spin_lock, flags);
+	csid_hw->device_enabled = 1;
+	spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_tfe_csid_deinit_hw(void *hw_priv,
+	void *deinit_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_tfe_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+
+	if (!hw_priv || !deinit_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "CSID:Invalid arguments");
+		return -EINVAL;
+	}
+
+	res = (struct cam_isp_resource_node *)deinit_args;
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
+
+	if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d",
+			 csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state",
+			 csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "De-Init IPP Path: %d", res->res_id);
+
+	if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
+		rc = cam_tfe_csid_deinit_pxl_path(csid_hw, res);
+	else
+		rc = cam_tfe_csid_deinit_rdi_path(csid_hw, res);
+
+	/* Disable CSID HW */
+	CAM_DBG(CAM_ISP, "Disabling CSID Hw");
+	cam_tfe_csid_disable_hw(csid_hw);
+	CAM_DBG(CAM_ISP, "%s: Exit", __func__);
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_tfe_csid_start(void *hw_priv, void *start_args,
+			uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_tfe_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	const struct cam_tfe_csid_reg_offset   *csid_reg;
+
+	if (!hw_priv || !start_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)start_args;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) {
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res tpe:%d res id:%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Reset sof irq debug fields */
+	csid_hw->sof_irq_triggered = false;
+	csid_hw->irq_debug_cnt = 0;
+
+	CAM_DBG(CAM_ISP, "CSID:%d res_type :%d res_id:%d",
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
+			rc = cam_tfe_csid_enable_pxl_path(csid_hw, res);
+		else
+			rc = cam_tfe_csid_enable_rdi_path(csid_hw, res);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+			csid_hw->hw_intf->hw_idx, res->res_type);
+		break;
+	}
+end:
+	return rc;
+}
+
+static int cam_tfe_csid_stop(void *hw_priv,
+	void *stop_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_tfe_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_isp_resource_node         *res;
+	struct cam_tfe_csid_hw_stop_args         *csid_stop;
+	uint32_t  i;
+	uint32_t res_mask = 0;
+
+	if (!hw_priv || !stop_args ||
+		(arg_size != sizeof(struct cam_tfe_csid_hw_stop_args))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+	csid_stop = (struct cam_tfe_csid_hw_stop_args  *) stop_args;
+
+	if (!csid_stop->num_res) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
+	CAM_DBG(CAM_ISP, "CSID:%d num_res %d",
+		csid_hw->hw_intf->hw_idx,
+		csid_stop->num_res);
+
+	/* Stop the resource first */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		CAM_DBG(CAM_ISP, "CSID:%d res_type %d res_id %d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id);
+		switch (res->res_type) {
+		case CAM_ISP_RESOURCE_PIX_PATH:
+			res_mask |= (1 << res->res_id);
+			if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
+				rc = cam_tfe_csid_disable_pxl_path(csid_hw,
+					res, csid_stop->stop_cmd);
+			else
+				rc = cam_tfe_csid_disable_rdi_path(csid_hw,
+					res, csid_stop->stop_cmd);
+
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+				csid_hw->hw_intf->hw_idx,
+				res->res_type);
+			break;
+		}
+	}
+
+	if (res_mask)
+		rc = cam_tfe_csid_poll_stop_status(csid_hw, res_mask);
+
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+	}
+
+	CAM_DBG(CAM_ISP,  "%s: Exit", __func__);
+	return rc;
+}
+
+static int cam_tfe_csid_read(void *hw_priv,
+	void *read_args, uint32_t arg_size)
+{
+	CAM_ERR(CAM_ISP, "CSID: un supported");
+	return -EINVAL;
+}
+
+static int cam_tfe_csid_write(void *hw_priv,
+	void *write_args, uint32_t arg_size)
+{
+	CAM_ERR(CAM_ISP, "CSID: un supported");
+	return -EINVAL;
+}
+
+static int cam_tfe_csid_sof_irq_debug(
+	struct cam_tfe_csid_hw *csid_hw, void *cmd_args)
+{
+	int i = 0;
+	uint32_t val = 0;
+	bool sof_irq_enable = false;
+	const struct cam_tfe_csid_reg_offset    *csid_reg;
+	struct cam_hw_soc_info                  *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (*((uint32_t *)cmd_args) == 1)
+		sof_irq_enable = true;
+
+	if (csid_hw->hw_info->hw_state ==
+		CAM_HW_STATE_POWER_DOWN) {
+		CAM_WARN(CAM_ISP,
+			"CSID powered down unable to %s sof irq",
+			sof_irq_enable ? "enable" : "disable");
+		return 0;
+	}
+
+	if (csid_reg->ipp_reg) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+
+		if (val) {
+			if (sof_irq_enable)
+				val |= TFE_CSID_PATH_INFO_INPUT_SOF;
+			else
+				val &= ~TFE_CSID_PATH_INFO_INPUT_SOF;
+
+			cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+			val = 0;
+		}
+	}
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+		if (val) {
+			if (sof_irq_enable)
+				val |= TFE_CSID_PATH_INFO_INPUT_SOF;
+			else
+				val &= ~TFE_CSID_PATH_INFO_INPUT_SOF;
+
+			cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+				csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+			val = 0;
+		}
+	}
+
+	if (sof_irq_enable) {
+		csid_hw->csid_debug |= TFE_CSID_DEBUG_ENABLE_SOF_IRQ;
+		csid_hw->sof_irq_triggered = true;
+	} else {
+		csid_hw->csid_debug &= ~TFE_CSID_DEBUG_ENABLE_SOF_IRQ;
+		csid_hw->sof_irq_triggered = false;
+	}
+
+	CAM_INFO(CAM_ISP, "SOF freeze: CSID SOF irq %s",
+		sof_irq_enable ? "enabled" : "disabled");
+
+	return 0;
+}
+
+static int cam_tfe_csid_set_csid_clock(
+	struct cam_tfe_csid_hw *csid_hw, void *cmd_args)
+{
+	struct cam_tfe_csid_clock_update_args *clk_update = NULL;
+
+	if (!csid_hw)
+		return -EINVAL;
+
+	clk_update =
+		(struct cam_tfe_csid_clock_update_args *)cmd_args;
+
+	csid_hw->clk_rate = clk_update->clk_rate;
+	CAM_DBG(CAM_ISP, "CSID clock rate %llu", csid_hw->clk_rate);
+
+	return 0;
+}
+
+static int cam_tfe_csid_get_regdump(struct cam_tfe_csid_hw *csid_hw,
+	void *cmd_args)
+{
+	struct cam_tfe_csid_reg_offset    *csid_reg;
+	struct cam_hw_soc_info            *soc_info;
+	struct cam_isp_resource_node      *res;
+	struct cam_tfe_csid_path_cfg      *path_data;
+	uint32_t id;
+	int val;
+
+	csid_reg = (struct cam_tfe_csid_reg_offset   *)
+			csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	res = (struct cam_isp_resource_node  *)cmd_args;
+	path_data = (struct cam_tfe_csid_path_cfg   *)res->res_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+		res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) {
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) {
+		CAM_INFO(CAM_ISP, "Dumping CSID:%d IPP registers ",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_cfg0_addr);
+		CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
+			csid_reg->ipp_reg->csid_pxl_cfg0_addr, val);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_cfg1_addr);
+		CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
+			csid_reg->ipp_reg->csid_pxl_cfg1_addr, val);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_ctrl_addr);
+		CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
+			csid_reg->ipp_reg->csid_pxl_ctrl_addr, val);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_hcrop_addr);
+		CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
+			csid_reg->ipp_reg->csid_pxl_hcrop_addr, val);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_vcrop_addr);
+		CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
+			csid_reg->ipp_reg->csid_pxl_vcrop_addr, val);
+	} else {
+		id = res->res_id;
+		CAM_INFO(CAM_ISP, "Dumping CSID:%d RDI:%d registers ",
+			csid_hw->hw_intf->hw_idx, id);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+		CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr, val);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr);
+		CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
+			csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr, val);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+		CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x",
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr, val);
+	}
+	CAM_INFO(CAM_ISP,
+		"start pix:%d end pix:%d start line:%d end line:%d w:%d h:%d",
+		path_data->start_pixel, path_data->end_pixel,
+		path_data->start_line, path_data->end_line,
+		path_data->width, path_data->height);
+	CAM_INFO(CAM_ISP,
+		"clock:%d crop_enable:%d vc:%d dt:%d informat:%d outformat:%d",
+		path_data->clk_rate, path_data->crop_enable,
+		path_data->vc, path_data->dt,
+		path_data->in_format, path_data->out_format);
+
+	return 0;
+}
+
+static int cam_tfe_csid_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_tfe_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+
+	if (!hw_priv || !cmd_args) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_tfe_csid_hw   *)csid_hw_info->core_info;
+
+	switch (cmd_type) {
+	case CAM_TFE_CSID_CMD_GET_TIME_STAMP:
+		rc = cam_tfe_csid_get_time_stamp(csid_hw, cmd_args);
+		break;
+	case CAM_TFE_CSID_SET_CSID_DEBUG:
+		rc = cam_tfe_csid_set_csid_debug(csid_hw, cmd_args);
+		break;
+	case CAM_TFE_CSID_SOF_IRQ_DEBUG:
+		rc = cam_tfe_csid_sof_irq_debug(csid_hw, cmd_args);
+		break;
+	case CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE:
+		rc = cam_tfe_csid_set_csid_clock(csid_hw, cmd_args);
+		break;
+	case CAM_TFE_CSID_CMD_GET_REG_DUMP:
+		rc = cam_tfe_csid_get_regdump(csid_hw, cmd_args);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d",
+			csid_hw->hw_intf->hw_idx, cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
+{
+	struct cam_tfe_csid_hw                         *csid_hw;
+	struct cam_hw_soc_info                         *soc_info;
+	const struct cam_tfe_csid_reg_offset           *csid_reg;
+	const struct cam_tfe_csid_csi2_rx_reg_offset   *csi2_reg;
+	uint32_t                   irq_status[TFE_CSID_IRQ_REG_MAX];
+	bool fatal_err_detected = false;
+	uint32_t sof_irq_debug_en = 0;
+	unsigned long flags;
+	uint32_t i, val;
+
+	csid_hw = (struct cam_tfe_csid_hw *)data;
+
+	if (!data) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
+		return IRQ_HANDLED;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	csi2_reg = csid_reg->csi2_reg;
+
+	/* read */
+	irq_status[TFE_CSID_IRQ_REG_TOP] =
+		cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr);
+
+	irq_status[TFE_CSID_IRQ_REG_RX] =
+		cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
+
+	if (csid_hw->pxl_pipe_enable)
+		irq_status[TFE_CSID_IRQ_REG_IPP] =
+			cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_pxl_irq_status_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+		irq_status[i] =
+		cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
+
+	/* clear */
+	cam_io_w_mb(irq_status[TFE_CSID_IRQ_REG_TOP],
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(irq_status[TFE_CSID_IRQ_REG_RX],
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_hw->pxl_pipe_enable)
+		cam_io_w_mb(irq_status[TFE_CSID_IRQ_REG_IPP],
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+		cam_io_w_mb(irq_status[i],
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+	}
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	CAM_ERR_RATE_LIMIT(CAM_ISP,
+		"CSID %d irq status 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+		csid_hw->hw_intf->hw_idx, irq_status[TFE_CSID_IRQ_REG_TOP],
+		irq_status[TFE_CSID_IRQ_REG_RX],
+		irq_status[TFE_CSID_IRQ_REG_IPP],
+		irq_status[TFE_CSID_IRQ_REG_RDI0],
+		irq_status[TFE_CSID_IRQ_REG_RDI1],
+		irq_status[TFE_CSID_IRQ_REG_RDI2]);
+
+	/* Software register reset complete*/
+	if (irq_status[TFE_CSID_IRQ_REG_TOP])
+		complete(&csid_hw->csid_top_complete);
+
+	if (irq_status[TFE_CSID_IRQ_REG_RX] &
+		BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val))
+		complete(&csid_hw->csid_csi2_complete);
+
+	spin_lock_irqsave(&csid_hw->spin_lock, flags);
+	if (csid_hw->device_enabled == 1) {
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
+			fatal_err_detected = true;
+		}
+
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
+			fatal_err_detected = true;
+		}
+
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
+			fatal_err_detected = true;
+		}
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
+			fatal_err_detected = true;
+		}
+
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION)
+			csid_hw->error_irq_count++;
+
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION)
+			csid_hw->error_irq_count++;
+
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW)
+			csid_hw->error_irq_count++;
+
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME)
+			csid_hw->error_irq_count++;
+
+	}
+	spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
+
+	if (csid_hw->error_irq_count >
+		CAM_TFE_CSID_MAX_IRQ_ERROR_COUNT) {
+		fatal_err_detected = true;
+		csid_hw->error_irq_count = 0;
+	}
+
+	CAM_INFO(CAM_ISP,
+		"CSID %d irq status 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+		csid_hw->hw_intf->hw_idx,
+		irq_status[TFE_CSID_IRQ_REG_TOP],
+		irq_status[TFE_CSID_IRQ_REG_RX],
+		irq_status[TFE_CSID_IRQ_REG_IPP],
+		irq_status[TFE_CSID_IRQ_REG_RDI0],
+		irq_status[TFE_CSID_IRQ_REG_RDI1],
+		irq_status[TFE_CSID_IRQ_REG_RDI2]);
+
+	if (fatal_err_detected) {
+		/* Reset the Rx CFG registers */
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+	}
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOT_IRQ) {
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL0_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL1_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL2_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL3_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+	}
+
+	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOT_IRQ) {
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL0_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL1_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL2_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL3_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+	}
+
+	if ((csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) &&
+		(irq_status[TFE_CSID_IRQ_REG_RX] &
+		TFE_CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED)) {
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d LONG_PKT_CAPTURED",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_long_pkt_0_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP,
+			"CSID:%d long packet VC :%d DT:%d WC:%d",
+			csid_hw->hw_intf->hw_idx,
+			(val >> 22), ((val >> 16) & 0x3F), (val & 0xFFFF));
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_long_pkt_1_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d long packet ECC :%d",
+			csid_hw->hw_intf->hw_idx, val);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_long_pkt_ftr_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP,
+			"CSID:%d long pkt cal CRC:%d expected CRC:%d",
+			csid_hw->hw_intf->hw_idx, (val >> 16), (val & 0xFFFF));
+	}
+	if ((csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) &&
+		(irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED)) {
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d SHORT_PKT_CAPTURED",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_short_pkt_0_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP,
+			"CSID:%d short pkt VC :%d DT:%d LC:%d",
+			csid_hw->hw_intf->hw_idx,
+			(val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF));
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_short_pkt_1_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d short packet ECC :%d",
+			csid_hw->hw_intf->hw_idx, val);
+	}
+
+	if ((csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) &&
+		(irq_status[TFE_CSID_IRQ_REG_RX] &
+			TFE_CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED)) {
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PKT_HDR_CAPTURED",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_cphy_pkt_hdr_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP,
+			"CSID:%d cphy packet VC :%d DT:%d WC:%d",
+			csid_hw->hw_intf->hw_idx,
+			(val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF));
+	}
+
+	/* read the IPP errors */
+	if (csid_hw->pxl_pipe_enable) {
+		/* IPP reset done bit */
+		if (irq_status[TFE_CSID_IRQ_REG_IPP] &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CAM_DBG(CAM_ISP, "CSID IPP reset complete");
+			complete(&csid_hw->csid_ipp_complete);
+		}
+
+		if ((irq_status[TFE_CSID_IRQ_REG_IPP] &
+			TFE_CSID_PATH_INFO_INPUT_SOF) &&
+			(csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received",
+				csid_hw->hw_intf->hw_idx);
+			if (csid_hw->sof_irq_triggered)
+				csid_hw->irq_debug_cnt++;
+		}
+
+		if ((irq_status[TFE_CSID_IRQ_REG_IPP] &
+			TFE_CSID_PATH_INFO_INPUT_EOF) &&
+			(csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received",
+				csid_hw->hw_intf->hw_idx);
+		}
+
+		if (irq_status[TFE_CSID_IRQ_REG_IPP] &
+			TFE_CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			/* Stop IPP path immediately */
+			cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_pxl_ctrl_addr);
+		}
+	}
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+		if (irq_status[i] &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CAM_DBG(CAM_ISP, "CSID RDI%d reset complete", i);
+			complete(&csid_hw->csid_rdin_complete[i]);
+		}
+
+		if ((irq_status[i] & TFE_CSID_PATH_INFO_INPUT_SOF) &&
+			(csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID RDI:%d SOF received", i);
+			if (csid_hw->sof_irq_triggered)
+				csid_hw->irq_debug_cnt++;
+		}
+
+		if ((irq_status[i] & TFE_CSID_PATH_INFO_INPUT_EOF) &&
+			(csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID RDI:%d EOF received", i);
+		}
+
+		if (irq_status[i] & TFE_CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			/* Stop RDI path immediately */
+			cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
+		}
+	}
+
+	if (csid_hw->irq_debug_cnt >= CAM_TFE_CSID_IRQ_SOF_DEBUG_CNT_MAX) {
+		cam_tfe_csid_sof_irq_debug(csid_hw, &sof_irq_debug_en);
+		csid_hw->irq_debug_cnt = 0;
+	}
+
+	CAM_DBG(CAM_ISP, "IRQ Handling exit");
+	return IRQ_HANDLED;
+}
+
+int cam_tfe_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx)
+{
+	int rc = -EINVAL;
+	uint32_t i, val, clk_lvl;
+	struct cam_tfe_csid_path_cfg         *path_data;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_tfe_csid_hw               *tfe_csid_hw = NULL;
+	const struct cam_tfe_csid_reg_offset *csid_reg;
+
+	if (csid_idx >= CAM_TFE_CSID_HW_NUM_MAX) {
+		CAM_ERR(CAM_ISP, "Invalid csid index:%d", csid_idx);
+		return rc;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *) csid_hw_intf->hw_priv;
+	tfe_csid_hw  = (struct cam_tfe_csid_hw  *) csid_hw_info->core_info;
+
+	tfe_csid_hw->hw_intf = csid_hw_intf;
+	tfe_csid_hw->hw_info = csid_hw_info;
+	csid_reg = tfe_csid_hw->csid_info->csid_reg;
+
+	CAM_DBG(CAM_ISP, "type %d index %d",
+		tfe_csid_hw->hw_intf->hw_type, csid_idx);
+
+	tfe_csid_hw->device_enabled = 0;
+	tfe_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&tfe_csid_hw->hw_info->hw_mutex);
+	spin_lock_init(&tfe_csid_hw->hw_info->hw_lock);
+	spin_lock_init(&tfe_csid_hw->spin_lock);
+	init_completion(&tfe_csid_hw->hw_info->hw_complete);
+
+	init_completion(&tfe_csid_hw->csid_top_complete);
+	init_completion(&tfe_csid_hw->csid_csi2_complete);
+	init_completion(&tfe_csid_hw->csid_ipp_complete);
+	for (i = 0; i < CAM_TFE_CSID_RDI_MAX; i++)
+		init_completion(&tfe_csid_hw->csid_rdin_complete[i]);
+
+	rc = cam_tfe_csid_init_soc_resources(&tfe_csid_hw->hw_info->soc_info,
+			cam_tfe_csid_irq, tfe_csid_hw);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d Failed to init_soc", csid_idx);
+		goto err;
+	}
+	rc = cam_soc_util_get_clk_level(&tfe_csid_hw->hw_info->soc_info,
+		tfe_csid_hw->clk_rate,
+		tfe_csid_hw->hw_info->soc_info.src_clk_idx, &clk_lvl);
+	CAM_DBG(CAM_ISP, "CSID clock lvl %u", clk_lvl);
+
+	rc = cam_tfe_csid_enable_soc_resources(&tfe_csid_hw->hw_info->soc_info,
+		clk_lvl);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed",
+			tfe_csid_hw->hw_intf->hw_idx);
+		goto err;
+	}
+
+	tfe_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_tfe_csid_get_hw_caps;
+	tfe_csid_hw->hw_intf->hw_ops.init        = cam_tfe_csid_init_hw;
+	tfe_csid_hw->hw_intf->hw_ops.deinit      = cam_tfe_csid_deinit_hw;
+	tfe_csid_hw->hw_intf->hw_ops.reset       = cam_tfe_csid_reset;
+	tfe_csid_hw->hw_intf->hw_ops.reserve     = cam_tfe_csid_reserve;
+	tfe_csid_hw->hw_intf->hw_ops.release     = cam_tfe_csid_release;
+	tfe_csid_hw->hw_intf->hw_ops.start       = cam_tfe_csid_start;
+	tfe_csid_hw->hw_intf->hw_ops.stop        = cam_tfe_csid_stop;
+	tfe_csid_hw->hw_intf->hw_ops.read        = cam_tfe_csid_read;
+	tfe_csid_hw->hw_intf->hw_ops.write       = cam_tfe_csid_write;
+	tfe_csid_hw->hw_intf->hw_ops.process_cmd = cam_tfe_csid_process_cmd;
+
+	/* reset the cid values */
+	for (i = 0; i < CAM_TFE_CSID_CID_MAX; i++) {
+		tfe_csid_hw->cid_res[i].vc  = 0;
+		tfe_csid_hw->cid_res[i].dt  = 0;
+		tfe_csid_hw->cid_res[i].cnt = 0;
+	}
+
+	if (tfe_csid_hw->hw_intf->hw_idx == 2) {
+		val = cam_io_r_mb(
+			tfe_csid_hw->hw_info->soc_info.reg_map[1].mem_base +
+			csid_reg->cmn_reg->top_tfe2_fuse_reg);
+		if (val) {
+			CAM_INFO(CAM_ISP, "TFE 2 is not supported by hardware");
+			rc = -EINVAL;
+			goto err;
+		}
+	}
+
+	val = cam_io_r_mb(
+		tfe_csid_hw->hw_info->soc_info.reg_map[1].mem_base +
+		csid_reg->cmn_reg->top_tfe2_pix_pipe_fuse_reg);
+
+	/* Initialize the IPP resources */
+	if (!(val && (tfe_csid_hw->hw_intf->hw_idx == 2))) {
+		CAM_DBG(CAM_ISP, "initializing the pix path");
+
+		tfe_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		tfe_csid_hw->ipp_res.res_id = CAM_TFE_CSID_PATH_RES_IPP;
+		tfe_csid_hw->ipp_res.res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		tfe_csid_hw->ipp_res.hw_intf = tfe_csid_hw->hw_intf;
+		path_data = kzalloc(sizeof(*path_data),
+					GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		tfe_csid_hw->ipp_res.res_priv = path_data;
+		tfe_csid_hw->pxl_pipe_enable = 1;
+	}
+
+	/* Initialize the RDI resource */
+	for (i = 0; i < tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+			i++) {
+		/* res type is from RDI 0 to RDI2 */
+		tfe_csid_hw->rdi_res[i].res_type =
+			CAM_ISP_RESOURCE_PIX_PATH;
+		tfe_csid_hw->rdi_res[i].res_id = i;
+		tfe_csid_hw->rdi_res[i].res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		tfe_csid_hw->rdi_res[i].hw_intf = tfe_csid_hw->hw_intf;
+
+		path_data = kzalloc(sizeof(*path_data),
+			GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		tfe_csid_hw->rdi_res[i].res_priv = path_data;
+	}
+
+	tfe_csid_hw->csid_debug = 0;
+	tfe_csid_hw->error_irq_count = 0;
+
+	rc = cam_tfe_csid_disable_soc_resources(
+		&tfe_csid_hw->hw_info->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
+			tfe_csid_hw->hw_intf->hw_idx);
+		goto err;
+	}
+
+	return 0;
+err:
+	if (rc) {
+		kfree(tfe_csid_hw->ipp_res.res_priv);
+		for (i = 0; i <
+			tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+			i++)
+			kfree(tfe_csid_hw->rdi_res[i].res_priv);
+	}
+
+	return rc;
+}
+
+
+int cam_tfe_csid_hw_deinit(struct cam_tfe_csid_hw *tfe_csid_hw)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+
+	if (!tfe_csid_hw) {
+		CAM_ERR(CAM_ISP, "Invalid param");
+		return rc;
+	}
+
+	/* release the privdate data memory from resources */
+	kfree(tfe_csid_hw->ipp_res.res_priv);
+
+	for (i = 0; i <
+		tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+		i++) {
+		kfree(tfe_csid_hw->rdi_res[i].res_priv);
+	}
+
+	cam_tfe_csid_deinit_soc_resources(&tfe_csid_hw->hw_info->soc_info);
+
+	return 0;
+}

+ 412 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.h

@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_CSID_HW_H_
+#define _CAM_TFE_CSID_HW_H_
+
+#include "cam_hw.h"
+#include "cam_tfe_csid_hw_intf.h"
+#include "cam_tfe_csid_soc.h"
+
+#define CAM_TFE_CSID_CID_MAX                          4
+
+#define TFE_CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED    BIT(0)
+#define TFE_CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED    BIT(1)
+#define TFE_CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED    BIT(2)
+#define TFE_CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED    BIT(3)
+#define TFE_CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED    BIT(4)
+#define TFE_CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED    BIT(5)
+#define TFE_CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED    BIT(6)
+#define TFE_CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED    BIT(7)
+#define TFE_CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED       BIT(8)
+#define TFE_CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED      BIT(9)
+#define TFE_CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED   BIT(10)
+#define TFE_CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION     BIT(11)
+#define TFE_CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION     BIT(12)
+#define TFE_CSID_CSI2_RX_ERROR_CPHY_PH_CRC            BIT(13)
+#define TFE_CSID_CSI2_RX_WARNING_ECC                  BIT(14)
+#define TFE_CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW    BIT(15)
+#define TFE_CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW    BIT(16)
+#define TFE_CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW    BIT(17)
+#define TFE_CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW    BIT(18)
+#define TFE_CSID_CSI2_RX_ERROR_CRC                    BIT(19)
+#define TFE_CSID_CSI2_RX_ERROR_ECC                    BIT(20)
+#define TFE_CSID_CSI2_RX_ERROR_MMAPPED_VC_DT          BIT(21)
+#define TFE_CSID_CSI2_RX_ERROR_UNMAPPED_VC_DT         BIT(22)
+#define TFE_CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW       BIT(23)
+#define TFE_CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME        BIT(24)
+#define TFE_CSID_CSI2_RX_INFO_RST_DONE                BIT(27)
+
+#define TFE_CSID_PATH_INFO_RST_DONE                   BIT(1)
+#define TFE_CSID_PATH_ERROR_FIFO_OVERFLOW             BIT(2)
+#define TFE_CSID_PATH_INFO_INPUT_EOF                  BIT(9)
+#define TFE_CSID_PATH_INFO_INPUT_EOL                  BIT(10)
+#define TFE_CSID_PATH_INFO_INPUT_SOL                  BIT(11)
+#define TFE_CSID_PATH_INFO_INPUT_SOF                  BIT(12)
+#define TFE_CSID_PATH_IPP_ERROR_CCIF_VIOLATION        BIT(15)
+#define TFE_CSID_PATH_IPP_OVERFLOW_IRQ                BIT(16)
+#define TFE_CSID_PATH_IPP_FRAME_DROP                  BIT(17)
+#define TFE_CSID_PATH_RDI_FRAME_DROP                  BIT(16)
+#define TFE_CSID_PATH_RDI_OVERFLOW_IRQ                BIT(17)
+#define TFE_CSID_PATH_RDI_ERROR_CCIF_VIOLATION        BIT(18)
+
+/*
+ * Debug values enable the corresponding interrupts and debug logs provide
+ * necessary information
+ */
+#define TFE_CSID_DEBUG_ENABLE_SOF_IRQ                 BIT(0)
+#define TFE_CSID_DEBUG_ENABLE_EOF_IRQ                 BIT(1)
+#define TFE_CSID_DEBUG_ENABLE_SOT_IRQ                 BIT(2)
+#define TFE_CSID_DEBUG_ENABLE_EOT_IRQ                 BIT(3)
+#define TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE       BIT(4)
+#define TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE        BIT(5)
+#define TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE        BIT(6)
+#define TFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO            BIT(7)
+#define TFE_CSID_DEBUG_DISABLE_EARLY_EOF              BIT(8)
+
+/* enum cam_csid_path_halt_mode select the path halt mode control */
+enum cam_tfe_csid_path_halt_mode {
+	TFE_CSID_HALT_MODE_INTERNAL,
+	TFE_CSID_HALT_MODE_GLOBAL,
+	TFE_CSID_HALT_MODE_MASTER,
+	TFE_CSID_HALT_MODE_SLAVE,
+};
+
+/**
+ *enum cam_csid_path_timestamp_stb_sel - select the sof/eof strobes used to
+ *        capture the timestamp
+ */
+enum cam_tfe_csid_path_timestamp_stb_sel {
+	TFE_CSID_TIMESTAMP_STB_PRE_HALT,
+	TFE_CSID_TIMESTAMP_STB_POST_HALT,
+	TFE_CSID_TIMESTAMP_STB_POST_IRQ,
+	TFE_CSID_TIMESTAMP_STB_MAX,
+};
+
+struct cam_tfe_csid_pxl_reg_offset {
+	/* Pxl path register offsets*/
+	uint32_t csid_pxl_irq_status_addr;
+	uint32_t csid_pxl_irq_mask_addr;
+	uint32_t csid_pxl_irq_clear_addr;
+	uint32_t csid_pxl_irq_set_addr;
+
+	uint32_t csid_pxl_cfg0_addr;
+	uint32_t csid_pxl_cfg1_addr;
+	uint32_t csid_pxl_ctrl_addr;
+	uint32_t csid_pxl_hcrop_addr;
+	uint32_t csid_pxl_vcrop_addr;
+	uint32_t csid_pxl_rst_strobes_addr;
+	uint32_t csid_pxl_status_addr;
+	uint32_t csid_pxl_misr_val_addr;
+	uint32_t csid_pxl_timestamp_curr0_sof_addr;
+	uint32_t csid_pxl_timestamp_curr1_sof_addr;
+	uint32_t csid_pxl_timestamp_perv0_sof_addr;
+	uint32_t csid_pxl_timestamp_perv1_sof_addr;
+	uint32_t csid_pxl_timestamp_curr0_eof_addr;
+	uint32_t csid_pxl_timestamp_curr1_eof_addr;
+	uint32_t csid_pxl_timestamp_perv0_eof_addr;
+	uint32_t csid_pxl_timestamp_perv1_eof_addr;
+	uint32_t csid_pxl_err_recovery_cfg0_addr;
+	uint32_t csid_pxl_err_recovery_cfg1_addr;
+	uint32_t csid_pxl_err_recovery_cfg2_addr;
+
+	/* configuration */
+	uint32_t pix_store_en_shift_val;
+	uint32_t early_eof_en_shift_val;
+	uint32_t halt_master_sel_shift;
+	uint32_t halt_mode_shift;
+	uint32_t halt_master_sel_master_val;
+	uint32_t halt_master_sel_slave_val;
+};
+
+struct cam_tfe_csid_rdi_reg_offset {
+	uint32_t csid_rdi_irq_status_addr;
+	uint32_t csid_rdi_irq_mask_addr;
+	uint32_t csid_rdi_irq_clear_addr;
+	uint32_t csid_rdi_irq_set_addr;
+
+	/*RDI N register address */
+	uint32_t csid_rdi_cfg0_addr;
+	uint32_t csid_rdi_cfg1_addr;
+	uint32_t csid_rdi_ctrl_addr;
+	uint32_t csid_rdi_rst_strobes_addr;
+	uint32_t csid_rdi_status_addr;
+	uint32_t csid_rdi_misr_val0_addr;
+	uint32_t csid_rdi_misr_val1_addr;
+	uint32_t csid_rdi_timestamp_curr0_sof_addr;
+	uint32_t csid_rdi_timestamp_curr1_sof_addr;
+	uint32_t csid_rdi_timestamp_prev0_sof_addr;
+	uint32_t csid_rdi_timestamp_prev1_sof_addr;
+	uint32_t csid_rdi_timestamp_curr0_eof_addr;
+	uint32_t csid_rdi_timestamp_curr1_eof_addr;
+	uint32_t csid_rdi_timestamp_prev0_eof_addr;
+	uint32_t csid_rdi_timestamp_prev1_eof_addr;
+	uint32_t csid_rdi_err_recovery_cfg0_addr;
+	uint32_t csid_rdi_err_recovery_cfg1_addr;
+	uint32_t csid_rdi_err_recovery_cfg2_addr;
+	uint32_t csid_rdi_byte_cntr_ping_addr;
+	uint32_t csid_rdi_byte_cntr_pong_addr;
+
+	/* configuration */
+	uint32_t packing_format;
+};
+
+struct cam_tfe_csid_csi2_rx_reg_offset {
+	uint32_t csid_csi2_rx_irq_status_addr;
+	uint32_t csid_csi2_rx_irq_mask_addr;
+	uint32_t csid_csi2_rx_irq_clear_addr;
+	uint32_t csid_csi2_rx_irq_set_addr;
+	uint32_t csid_csi2_rx_cfg0_addr;
+	uint32_t csid_csi2_rx_cfg1_addr;
+	uint32_t csid_csi2_rx_capture_ctrl_addr;
+	uint32_t csid_csi2_rx_rst_strobes_addr;
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr;
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_ftr_addr;
+	uint32_t csid_csi2_rx_captured_cphy_pkt_hdr_addr;
+	uint32_t csid_csi2_rx_total_pkts_rcvd_addr;
+	uint32_t csid_csi2_rx_stats_ecc_addr; //no
+	uint32_t csid_csi2_rx_total_crc_err_addr;
+
+	/*configurations */
+	uint32_t csi2_rst_srb_all;
+	uint32_t csi2_rst_done_shift_val;
+	uint32_t csi2_irq_mask_all;
+	uint32_t csi2_misr_enable_shift_val;
+	uint32_t csi2_vc_mode_shift_val;
+	uint32_t csi2_capture_long_pkt_en_shift;
+	uint32_t csi2_capture_short_pkt_en_shift;
+	uint32_t csi2_capture_cphy_pkt_en_shift;
+	uint32_t csi2_capture_long_pkt_dt_shift;
+	uint32_t csi2_capture_long_pkt_vc_shift;
+	uint32_t csi2_capture_short_pkt_vc_shift;
+	uint32_t csi2_capture_cphy_pkt_dt_shift;
+	uint32_t csi2_capture_cphy_pkt_vc_shift;
+	uint32_t csi2_rx_phy_num_mask;
+	uint32_t csi2_rx_long_pkt_hdr_rst_stb_shift;
+	uint32_t csi2_rx_short_pkt_hdr_rst_stb_shift;
+};
+
+struct cam_tfe_csid_common_reg_offset {
+	/* MIPI CSID registers */
+	uint32_t csid_hw_version_addr;
+	uint32_t csid_cfg0_addr;
+	uint32_t csid_ctrl_addr;
+	uint32_t csid_rst_strobes_addr;
+
+	uint32_t csid_test_bus_ctrl_addr;
+	uint32_t csid_top_irq_status_addr;
+	uint32_t csid_top_irq_mask_addr;
+	uint32_t csid_top_irq_clear_addr;
+	uint32_t csid_top_irq_set_addr;
+	uint32_t csid_irq_cmd_addr;
+
+	/*configurations */
+	uint32_t major_version;
+	uint32_t minor_version;
+	uint32_t version_incr;
+	uint32_t num_rdis;
+	uint32_t num_pix;
+	uint32_t csid_reg_rst_stb;
+	uint32_t csid_rst_stb;
+	uint32_t csid_rst_stb_sw_all;
+	uint32_t ipp_path_rst_stb_all;
+	uint32_t rdi_path_rst_stb_all;
+	uint32_t path_rst_done_shift_val;
+	uint32_t path_en_shift_val;
+	uint32_t dt_id_shift_val;
+	uint32_t vc_shift_val;
+	uint32_t dt_shift_val;
+	uint32_t fmt_shift_val;
+	uint32_t plain_fmt_shit_val;
+	uint32_t crop_v_en_shift_val;
+	uint32_t crop_h_en_shift_val;
+	uint32_t crop_shift;
+	uint32_t ipp_irq_mask_all;
+	uint32_t rdi_irq_mask_all;
+	uint32_t top_tfe2_pix_pipe_fuse_reg;
+	uint32_t top_tfe2_fuse_reg;
+};
+
+/**
+ * struct cam_tfe_csid_reg_offset- CSID instance register info
+ *
+ * @cmn_reg:  csid common registers info
+ * @ipp_reg:  ipp register offset information
+ * @ppp_reg:  ppp register offset information
+ * @rdi_reg:  rdi register offset information
+ *
+ */
+struct cam_tfe_csid_reg_offset {
+	const struct cam_tfe_csid_common_reg_offset   *cmn_reg;
+	const struct cam_tfe_csid_csi2_rx_reg_offset  *csi2_reg;
+	const struct cam_tfe_csid_pxl_reg_offset      *ipp_reg;
+	const struct cam_tfe_csid_rdi_reg_offset *rdi_reg[CAM_TFE_CSID_RDI_MAX];
+};
+
+/**
+ * struct cam_tfe_csid_hw_info- CSID HW info
+ *
+ * @csid_reg:        csid register offsets
+ * @hw_dts_version:  HW DTS version
+ * @csid_max_clk:    maximim csid clock
+ *
+ */
+struct cam_tfe_csid_hw_info {
+	const struct cam_tfe_csid_reg_offset   *csid_reg;
+	uint32_t                                hw_dts_version;
+	uint32_t                                csid_max_clk;
+};
+
+/**
+ * struct cam_tfe_csid_csi2_rx_cfg- csid csi2 rx configuration data
+ * @phy_sel:     input resource type for sensor only
+ * @lane_type:   lane type: c-phy or d-phy
+ * @lane_num :   active lane number
+ * @lane_cfg:    lane configurations: 4 bits per lane
+ *
+ */
+struct cam_tfe_csid_csi2_rx_cfg  {
+	uint32_t                        phy_sel;
+	uint32_t                        lane_type;
+	uint32_t                        lane_num;
+	uint32_t                        lane_cfg;
+};
+
+/**
+ * struct cam_tfe_csid_cid_data- cid configuration private data
+ *
+ * @vc:          Virtual channel
+ * @dt:          Data type
+ * @cnt:         Cid resource reference count.
+ *
+ */
+struct cam_tfe_csid_cid_data {
+	uint32_t                     vc;
+	uint32_t                     dt;
+	uint32_t                     cnt;
+};
+
+/**
+ * struct cam_tfe_csid_path_cfg- csid path configuration details. It is stored
+ *                          as private data for IPP/ RDI paths
+ * @vc :            Virtual channel number
+ * @dt :            Data type number
+ * @cid             cid number, it is same as DT_ID number in HW
+ * @in_format:      input decode format
+ * @out_format:     output format
+ * @crop_enable:    crop is enable or disabled, if enabled
+ *                  then remaining parameters are valid.
+ * @start_pixel:    start pixel
+ * @end_pixel:      end_pixel
+ * @width:          width
+ * @start_line:     start line
+ * @end_line:       end_line
+ * @height:         heigth
+ * @sync_mode:      Applicable for IPP/RDI path reservation
+ *                  Reserving the path for master IPP or slave IPP
+ *                  master (set value 1), Slave ( set value 2)
+ *                  for RDI, set  mode to none
+ * @master_idx:     For Slave reservation, Give master TFE instance Index.
+ *                  Slave will synchronize with master Start and stop operations
+ * @clk_rate        Clock rate
+ * @sensor_width    Sensor width in pixel
+ * @sensor_height   Sensor height in pixel
+ * @sensor_fps      Sensor fps
+ * @sensor_hbi      Sensor horizontal blanking interval
+ * @sensor_vbi      Sensor vertical blanking interval
+ *
+ */
+struct cam_tfe_csid_path_cfg {
+	uint32_t                        vc;
+	uint32_t                        dt;
+	uint32_t                        cid;
+	uint32_t                        in_format;
+	uint32_t                        out_format;
+	bool                            crop_enable;
+	uint32_t                        start_pixel;
+	uint32_t                        end_pixel;
+	uint32_t                        width;
+	uint32_t                        start_line;
+	uint32_t                        end_line;
+	uint32_t                        height;
+	enum cam_isp_hw_sync_mode       sync_mode;
+	uint32_t                        master_idx;
+	uint64_t                        clk_rate;
+	uint32_t                        sensor_width;
+	uint32_t                        sensor_height;
+	uint32_t                        sensor_fps;
+	uint32_t                        sensor_hbi;
+	uint32_t                        sensor_vbi;
+};
+
+/**
+ * struct cam_tfe_csid_hw- csid hw device resources data
+ *
+ * @hw_intf:                  contain the csid hw interface information
+ * @hw_info:                  csid hw device information
+ * @csid_info:                csid hw specific information
+ * @in_res_id:                csid in resource type
+ * @csi2_rx_cfg:              csi2 rx decoder configuration for csid
+ * @csi2_rx_reserve_cnt:      csi2 reservations count value
+ * @ipp_res:                  image pixel path resource
+ * @rdi_res:                  raw dump image path resources
+ * @cid_res:                  cid resources values
+ * @csid_top_reset_complete:  csid top reset completion
+ * @csid_csi2_reset_complete: csi2 reset completion
+ * @csid_ipp_reset_complete:  ipp reset completion
+ * @csid_ppp_complete:        ppp reset completion
+ * @csid_rdin_reset_complete: rdi n completion
+ * @csid_debug:               csid debug information to enable the SOT, EOT,
+ *                            SOF, EOF, measure etc in the csid hw
+ * @clk_rate                  Clock rate
+ * @sof_irq_triggered:        Flag is set on receiving event to enable sof irq
+ *                            incase of SOF freeze.
+ * @irq_debug_cnt:            Counter to track sof irq's when above flag is set.
+ * @error_irq_count           Error IRQ count, if continuous error irq comes
+ *                            need to stop the CSID and mask interrupts.
+ * @device_enabled            Device enabled will set once CSID powered on and
+ *                            initial configuration are done.
+ * @lock_state                csid spin lock
+ * @event_cb:                 Callback function to hw mgr in case of hw events
+ * @event_cb_priv:            Context data
+ *
+ */
+struct cam_tfe_csid_hw {
+	struct cam_hw_intf                 *hw_intf;
+	struct cam_hw_info                 *hw_info;
+	struct cam_tfe_csid_hw_info        *csid_info;
+	uint32_t                            in_res_id;
+	struct cam_tfe_csid_csi2_rx_cfg     csi2_rx_cfg;
+	uint32_t                            csi2_reserve_cnt;
+	uint32_t                            pxl_pipe_enable;
+	struct cam_isp_resource_node        ipp_res;
+	struct cam_isp_resource_node        rdi_res[CAM_TFE_CSID_RDI_MAX];
+	struct cam_tfe_csid_cid_data        cid_res[CAM_TFE_CSID_CID_MAX];
+	struct completion                   csid_top_complete;
+	struct completion                   csid_csi2_complete;
+	struct completion                   csid_ipp_complete;
+	struct completion     csid_rdin_complete[CAM_TFE_CSID_RDI_MAX];
+	uint64_t                            csid_debug;
+	uint64_t                            clk_rate;
+	bool                                sof_irq_triggered;
+	uint32_t                            irq_debug_cnt;
+	uint32_t                            error_irq_count;
+	uint32_t                            device_enabled;
+	spinlock_t                          spin_lock;
+	cam_hw_mgr_event_cb_func            event_cb;
+	void                               *event_cb_priv;
+};
+
+int cam_tfe_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx);
+
+int cam_tfe_csid_hw_deinit(struct cam_tfe_csid_hw *tfe_csid_hw);
+
+#endif /* _CAM_TFE_CSID_HW_H_ */

+ 139 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.c

@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_tfe_csid_core.h"
+#include "cam_tfe_csid_dev.h"
+#include "cam_tfe_csid_hw_intf.h"
+#include "cam_debug_util.h"
+
+static struct cam_hw_intf *cam_tfe_csid_hw_list[CAM_TFE_CSID_HW_NUM_MAX] = {
+	0, 0, 0};
+
+static char csid_dev_name[8];
+
+int cam_tfe_csid_probe(struct platform_device *pdev)
+{
+
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+	struct cam_tfe_csid_hw         *csid_dev = NULL;
+	const struct of_device_id      *match_dev = NULL;
+	struct cam_tfe_csid_hw_info    *csid_hw_data = NULL;
+	uint32_t                        csid_dev_idx;
+	int                             rc = 0;
+
+	CAM_DBG(CAM_ISP, "probe called");
+
+	csid_hw_intf = kzalloc(sizeof(*csid_hw_intf), GFP_KERNEL);
+	if (!csid_hw_intf) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	csid_hw_info = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!csid_hw_info) {
+		rc = -ENOMEM;
+		goto free_hw_intf;
+	}
+
+	csid_dev = kzalloc(sizeof(struct cam_tfe_csid_hw), GFP_KERNEL);
+	if (!csid_dev) {
+		rc = -ENOMEM;
+		goto free_hw_info;
+	}
+
+	/* get tfe csid hw index */
+	of_property_read_u32(pdev->dev.of_node, "cell-index", &csid_dev_idx);
+	/* get tfe csid hw information */
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_ISP, "No matching table for the tfe csid hw");
+		rc = -EINVAL;
+		goto free_dev;
+	}
+
+	memset(csid_dev_name, 0, sizeof(csid_dev_name));
+	snprintf(csid_dev_name, sizeof(csid_dev_name),
+		"csid%1u", csid_dev_idx);
+
+	csid_hw_intf->hw_idx = csid_dev_idx;
+	csid_hw_intf->hw_type = CAM_ISP_HW_TYPE_TFE_CSID;
+	csid_hw_intf->hw_priv = csid_hw_info;
+
+	csid_hw_info->core_info = csid_dev;
+	csid_hw_info->soc_info.pdev = pdev;
+	csid_hw_info->soc_info.dev = &pdev->dev;
+	csid_hw_info->soc_info.dev_name = csid_dev_name;
+	csid_hw_info->soc_info.index = csid_dev_idx;
+
+	csid_hw_data = (struct cam_tfe_csid_hw_info  *)match_dev->data;
+	/* need to setup the pdev before call the tfe hw probe init */
+	csid_dev->csid_info = csid_hw_data;
+
+	rc = cam_tfe_csid_hw_probe_init(csid_hw_intf, csid_dev_idx);
+	if (rc)
+		goto free_dev;
+
+	platform_set_drvdata(pdev, csid_dev);
+	CAM_DBG(CAM_ISP, "CSID:%d probe successful",
+		csid_hw_intf->hw_idx);
+
+	if (csid_hw_intf->hw_idx < CAM_TFE_CSID_HW_NUM_MAX)
+		cam_tfe_csid_hw_list[csid_hw_intf->hw_idx] = csid_hw_intf;
+	else
+		goto free_dev;
+
+	return 0;
+
+free_dev:
+	kfree(csid_dev);
+free_hw_info:
+	kfree(csid_hw_info);
+free_hw_intf:
+	kfree(csid_hw_intf);
+err:
+	return rc;
+}
+
+int cam_tfe_csid_remove(struct platform_device *pdev)
+{
+	struct cam_tfe_csid_hw         *csid_dev = NULL;
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+
+	csid_dev = (struct cam_tfe_csid_hw *)platform_get_drvdata(pdev);
+	csid_hw_intf = csid_dev->hw_intf;
+	csid_hw_info = csid_dev->hw_info;
+
+	CAM_DBG(CAM_ISP, "CSID:%d remove",
+		csid_dev->hw_intf->hw_idx);
+
+	cam_tfe_csid_hw_deinit(csid_dev);
+
+	/*release the csid device memory */
+	kfree(csid_dev);
+	kfree(csid_hw_info);
+	kfree(csid_hw_intf);
+	return 0;
+}
+
+int cam_tfe_csid_hw_init(struct cam_hw_intf **tfe_csid_hw,
+	uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_tfe_csid_hw_list[hw_idx]) {
+		*tfe_csid_hw = cam_tfe_csid_hw_list[hw_idx];
+	} else {
+		*tfe_csid_hw = NULL;
+		rc = -1;
+	}
+
+	return rc;
+}

+ 16 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.h

@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_CSID_DEV_H_
+#define _CAM_TFE_CSID_DEV_H_
+
+#include "cam_isp_hw.h"
+
+irqreturn_t cam_tfe_csid_irq(int irq_num, void *data);
+
+int cam_tfe_csid_probe(struct platform_device *pdev);
+int cam_tfe_csid_remove(struct platform_device *pdev);
+
+#endif /*_CAM_TFE_CSID_DEV_H_ */

+ 209 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.c

@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+#include <linux/slab.h>
+#include "cam_tfe_csid_soc.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+
+int cam_tfe_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data)
+{
+	int rc = 0;
+	struct cam_cpas_register_params   cpas_register_param;
+	struct cam_tfe_csid_soc_private      *soc_private;
+
+	soc_private = kzalloc(sizeof(struct cam_tfe_csid_soc_private),
+		GFP_KERNEL);
+	if (!soc_private)
+		return -ENOMEM;
+
+	soc_info->soc_private = soc_private;
+
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	/* Need to see if we want post process the clock list */
+	rc = cam_soc_util_request_platform_resource(soc_info, csid_irq_handler,
+		irq_data);
+
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP,
+			"Error Request platform resources failed rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "csid",
+		CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = soc_info->dev;
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+		goto release_soc;
+	} else {
+		soc_private->cpas_handle = cpas_register_param.client_handle;
+	}
+
+	return rc;
+
+release_soc:
+	cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+	kfree(soc_private);
+
+	return rc;
+}
+
+int cam_tfe_csid_deinit_soc_resources(
+	struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_tfe_csid_soc_private       *soc_private;
+
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -ENODEV;
+	}
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+
+	return rc;
+}
+
+int cam_tfe_csid_enable_soc_resources(
+	struct cam_hw_soc_info *soc_info, enum cam_vote_level clk_level)
+{
+	int rc = 0;
+	struct cam_tfe_csid_soc_private       *soc_private;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote = {0};
+
+	soc_private = soc_info->soc_private;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.num_paths = 1;
+	axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL;
+	axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE;
+
+	axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+	CAM_DBG(CAM_ISP, "csid camnoc_bw:%lld mnoc_ab_bw:%lld mnoc_ib_bw:%lld ",
+		axi_vote.axi_path[0].camnoc_bw,
+		axi_vote.axi_path[0].mnoc_ab_bw,
+		axi_vote.axi_path[0].mnoc_ib_bw);
+
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error CPAS start failed");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		clk_level, true);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "enable platform failed");
+		goto stop_cpas;
+	}
+
+	return rc;
+
+stop_cpas:
+	cam_cpas_stop(soc_private->cpas_handle);
+end:
+	return rc;
+}
+
+int cam_tfe_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_tfe_csid_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+	soc_private = soc_info->soc_private;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Disable platform failed");
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error CPAS stop failed rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_tfe_csid_enable_tfe_force_clock_on(struct cam_hw_soc_info  *soc_info,
+	uint32_t cpas_tfe_base_offset)
+{
+	int rc = 0;
+	struct cam_tfe_csid_soc_private       *soc_private;
+	uint32_t                           cpass_tfe_force_clk_offset;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+
+	soc_private = soc_info->soc_private;
+	cpass_tfe_force_clk_offset =
+		cpas_tfe_base_offset + (0x4 * soc_info->index);
+	rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REG_CPASTOP,
+		cpass_tfe_force_clk_offset, 1, 1);
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPASS set TFE:%d Force clock On failed",
+			soc_info->index);
+	else
+		CAM_DBG(CAM_ISP, "CPASS set TFE:%d Force clock On",
+		soc_info->index);
+
+	return rc;
+}
+
+int cam_tfe_csid_disable_tfe_force_clock_on(struct cam_hw_soc_info *soc_info,
+	uint32_t cpas_tfe_base_offset)
+{
+	int rc = 0;
+	struct cam_tfe_csid_soc_private       *soc_private;
+	uint32_t                           cpass_tfe_force_clk_offset;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+
+	soc_private = soc_info->soc_private;
+	cpass_tfe_force_clk_offset =
+		cpas_tfe_base_offset + (0x4 * soc_info->index);
+	rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REG_CPASTOP,
+		cpass_tfe_force_clk_offset,  1, 0);
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPASS set TFE:%d Force clock Off failed",
+			soc_info->index);
+	else
+		CAM_DBG(CAM_ISP, "CPASS set TFE:%d Force clock off",
+		soc_info->index);
+
+	return rc;
+}

+ 119 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.h

@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_CSID_SOC_H_
+#define _CAM_TFE_CSID_SOC_H_
+
+#include "cam_isp_hw.h"
+
+/*
+ * struct cam_csid_soc_private:
+ *
+ * @Brief:                   Private SOC data specific to CSID HW Driver
+ *
+ * @cpas_handle:             Handle returned on registering with CPAS driver.
+ *                           This handle is used for all further interface
+ *                           with CPAS.
+ */
+struct cam_tfe_csid_soc_private {
+	uint32_t cpas_handle;
+};
+
+/**
+ * struct csid_device_soc_info - CSID SOC info object
+ *
+ * @csi_vdd_voltage:       Csi vdd voltage value
+ *
+ */
+struct cam_tfe_csid_device_soc_info {
+	int                             csi_vdd_voltage;
+};
+
+/**
+ * cam_tfe_csid_init_soc_resources()
+ *
+ * @brief:                 Csid initialization function for the soc info
+ *
+ * @soc_info:              Soc info structure pointer
+ * @csid_irq_handler:      Irq handler function to be registered
+ * @irq_data:              Irq data for the callback function
+ *
+ */
+int cam_tfe_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data);
+
+
+/**
+ * cam_tfe_csid_deinit_soc_resources()
+ *
+ * @brief:                 Csid de initialization function for the soc info
+ *
+ * @soc_info:              Soc info structure pointer
+ *
+ */
+int cam_tfe_csid_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_tfe_csid_enable_soc_resources()
+ *
+ * @brief:                 Csid soc resource enable function
+ *
+ * @soc_info:              Soc info structure pointer
+ * @clk_lvl:               Vote level to start with
+ *
+ */
+int cam_tfe_csid_enable_soc_resources(struct cam_hw_soc_info  *soc_info,
+	uint32_t clk_lvl);
+
+/**
+ * cam_tfe_csid_disable_soc_resources()
+ *
+ * @brief:                 Csid soc resource disable function
+ *
+ * @soc_info:              Soc info structure pointer
+ *
+ */
+int cam_tfe_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_tfe_csid_enable_tfe_force_clock()
+ *
+ * @brief:                 If csid testgen used for dual isp case, before
+ *                         starting csid test gen, enable tfe force clock on
+ *                         through cpas
+ *
+ * @soc_info:              Soc info structure pointer
+ * @cpas_tfe_base_offset:  Cpas tfe force clock base reg offset value
+ *
+ */
+int cam_tfe_csid_enable_tfe_force_clock_on(struct cam_hw_soc_info  *soc_info,
+	uint32_t cpas_tfe_base_offset);
+
+/**
+ * cam_tfe_csid_disable_tfe_force_clock_on()
+ *
+ * @brief:                 Disable the TFE force clock on after dual ISP
+ *                         CSID test gen stop
+ *
+ * @soc_info:              Soc info structure pointer
+ * @cpas_tfe_base_offset:  Cpas tfe force clock base reg offset value
+ *
+ */
+int cam_tfe_csid_disable_tfe_force_clock_on(struct cam_hw_soc_info *soc_info,
+	uint32_t cpas_tfe_base_offset);
+
+/**
+ * cam_tfe_csid_get_vote_level()
+ *
+ * @brief:                 Get the vote level from clock rate
+ *
+ * @soc_info:              Soc info structure pointer
+ * @clock_rate             Clock rate
+ *
+ */
+uint32_t cam_tfe_csid_get_vote_level(struct cam_hw_soc_info *soc_info,
+	uint64_t clock_rate);
+
+#endif /* _CAM_TFE_CSID_SOC_H_ */

+ 13 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/Makefile

@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cdm/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_tfe_soc.o cam_tfe_dev.o cam_tfe_core.o cam_tfe_bus.o cam_tfe.o

+ 42 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe.c

@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include "cam_tfe530.h"
+#include "cam_tfe_hw_intf.h"
+#include "cam_tfe_core.h"
+#include "cam_tfe_dev.h"
+
+static const struct of_device_id cam_tfe_dt_match[] = {
+	{
+		.compatible = "qcom,tfe530",
+		.data = &cam_tfe530,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_tfe_dt_match);
+
+static struct platform_driver cam_tfe_driver = {
+	.probe = cam_tfe_probe,
+	.remove = cam_tfe_remove,
+	.driver = {
+		.name = "cam_tfe",
+		.of_match_table = cam_tfe_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+int cam_tfe_init_module(void)
+{
+	return platform_driver_register(&cam_tfe_driver);
+}
+
+void cam_tfe_exit_module(void)
+{
+	platform_driver_unregister(&cam_tfe_driver);
+}
+
+MODULE_DESCRIPTION("CAM TFE driver");
+MODULE_LICENSE("GPL v2");

+ 813 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe530.h

@@ -0,0 +1,813 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef _CAM_TFE530_H_
+#define _CAM_TFE530_H_
+#include "cam_tfe_core.h"
+#include "cam_tfe_bus.h"
+
+
+static struct cam_tfe_top_reg_offset_common  tfe530_top_commong_reg  = {
+	.hw_version                   = 0x00001000,
+	.hw_capability                = 0x00001004,
+	.lens_feature                 = 0x00001008,
+	.stats_feature                = 0x0000100C,
+	.zoom_feature                 = 0x00001010,
+	.global_reset_cmd             = 0x00001014,
+	.core_cgc_ctrl                = 0x00001018,
+	.ahb_cgc_ctrl                 = 0x0000101C,
+	.core_cfg_0                   = 0x00001024,
+	.core_cfg_1                   = 0x00001028,
+	.reg_update_cmd               = 0x0000102C,
+	.diag_config                  = 0x00001060,
+	.diag_sensor_status_0         = 0x00001064,
+	.diag_sensor_status_1         = 0x00001068,
+	.diag_sensor_frame_cnt_status = 0x0000106C,
+	.violation_status             = 0x00001070,
+	.stats_throttle_cnt_cfg_0     = 0x00001074,
+	.stats_throttle_cnt_cfg_1     = 0x00001078,
+	.debug_0                      = 0x000010A0,
+	.debug_1                      = 0x000010A4,
+	.debug_2                      = 0x000010A8,
+	.debug_3                      = 0x000010AC,
+	.debug_cfg                    = 0x000010DC,
+	.perf_cnt_cfg                 = 0x000010E0,
+	.perf_pixel_count             = 0x000010E4,
+	.perf_line_count              = 0x000010E8,
+	.perf_stall_count             = 0x000010EC,
+	.perf_always_count            = 0x000010F0,
+	.perf_count_status            = 0x000010F4,
+};
+
+static struct cam_tfe_camif_reg  tfe530_camif_reg = {
+	.hw_version                   = 0x00001200,
+	.hw_status                    = 0x00001204,
+	.module_cfg                   = 0x00001260,
+	.pdaf_raw_crop_width_cfg      = 0x00001268,
+	.pdaf_raw_crop_height_cfg     = 0x0000126C,
+	.line_skip_pattern            = 0x00001270,
+	.pixel_skip_pattern           = 0x00001274,
+	.period_cfg                   = 0x00001278,
+	.irq_subsample_pattern        = 0x0000127C,
+	.epoch_irq_cfg                = 0x00001280,
+	.debug_1                      = 0x000013F0,
+	.debug_0                      = 0x000013F4,
+	.test_bus_ctrl                = 0x000013F8,
+	.spare                        = 0x000013F8,
+	.reg_update_cmd               = 0x0000102C,
+};
+
+static struct cam_tfe_camif_reg_data tfe530_camif_reg_data = {
+	.extern_reg_update_mask       = 0x00000001,
+	.dual_tfe_pix_en_shift        = 0x00000001,
+	.extern_reg_update_shift      = 0x0,
+	.camif_pd_rdi2_src_sel_shift  = 0x2,
+	.dual_tfe_sync_sel_shift      = 18,
+	.pixel_pattern_shift          = 24,
+	.pixel_pattern_mask           = 0x7000000,
+	.module_enable_shift          = 0,
+	.pix_out_enable_shift         = 8,
+	.pdaf_output_enable_shift     = 9,
+	.dsp_mode_shift               = 0,
+	.dsp_mode_mask                = 0,
+	.dsp_en_shift                 = 0,
+	.dsp_en_mask                  = 0,
+	.reg_update_cmd_data          = 0x1,
+	.epoch_line_cfg               = 0x00140014,
+	.sof_irq_mask                 = 0x00000001,
+	.epoch0_irq_mask              = 0x00000004,
+	.epoch1_irq_mask              = 0x00000008,
+	.eof_irq_mask                 = 0x00000002,
+	.reg_update_irq_mask          = 0x00000001,
+	.error_irq_mask0              = 0x00010100,
+	.error_irq_mask2              = 0x00000023,
+	.subscribe_irq_mask           = {
+		0x00000000,
+		0x00000007,
+		0x00000000,
+	},
+	.enable_diagnostic_hw         = 0x1,
+	.perf_cnt_start_cmd_shift     = 0,
+	.perf_cnt_continuous_shift    = 2,
+	.perf_client_sel_shift        = 8,
+	.perf_window_start_shift      = 16,
+	.perf_window_end_shift        = 20,
+};
+
+static struct cam_tfe_rdi_reg  tfe530_rdi0_reg = {
+	.rdi_hw_version              = 0x00001400,
+	.rdi_hw_status               = 0x00001404,
+	.rdi_module_config           = 0x00001460,
+	.rdi_skip_period             = 0x00001468,
+	.rdi_irq_subsample_pattern   = 0x0000146C,
+	.rdi_epoch_irq               = 0x00001470,
+	.rdi_debug_1                 = 0x000015F0,
+	.rdi_debug_0                 = 0x000015F4,
+	.rdi_test_bus_ctrl           = 0x000015F8,
+	.rdi_spare                   = 0x000015FC,
+	.reg_update_cmd              = 0x0000102C,
+};
+
+static struct cam_tfe_rdi_reg_data tfe530_rdi0_reg_data = {
+	.reg_update_cmd_data         = 0x2,
+	.epoch_line_cfg              = 0x00140014,
+	.pixel_pattern_shift         = 24,
+	.pixel_pattern_mask          = 0x07000000,
+	.rdi_out_enable_shift        = 0,
+
+	.sof_irq_mask                = 0x00000010,
+	.epoch0_irq_mask             = 0x00000040,
+	.epoch1_irq_mask             = 0x00000080,
+	.eof_irq_mask                = 0x00000020,
+	.error_irq_mask0             = 0x00020200,
+	.error_irq_mask2             = 0x00000004,
+	.subscribe_irq_mask          = {
+		0x00000000,
+		0x00000030,
+		0x00000000,
+	},
+	.enable_diagnostic_hw        = 0x1,
+};
+
+static struct cam_tfe_rdi_reg  tfe530_rdi1_reg = {
+	.rdi_hw_version              = 0x00001600,
+	.rdi_hw_status               = 0x00001604,
+	.rdi_module_config           = 0x00001660,
+	.rdi_skip_period             = 0x00001668,
+	.rdi_irq_subsample_pattern   = 0x0000166C,
+	.rdi_epoch_irq               = 0x00001670,
+	.rdi_debug_1                 = 0x000017F0,
+	.rdi_debug_0                 = 0x000017F4,
+	.rdi_test_bus_ctrl           = 0x000017F8,
+	.rdi_spare                   = 0x000017FC,
+	.reg_update_cmd              = 0x0000102C,
+};
+
+static struct cam_tfe_rdi_reg_data tfe530_rdi1_reg_data = {
+	.reg_update_cmd_data         = 0x4,
+	.epoch_line_cfg              = 0x00140014,
+	.pixel_pattern_shift         = 24,
+	.pixel_pattern_mask          = 0x07000000,
+	.rdi_out_enable_shift        = 0,
+
+	.sof_irq_mask                = 0x00000100,
+	.epoch0_irq_mask             = 0x00000400,
+	.epoch1_irq_mask             = 0x00000800,
+	.eof_irq_mask                = 0x00000200,
+	.error_irq_mask0             = 0x00040400,
+	.error_irq_mask2             = 0x00000008,
+	.subscribe_irq_mask          = {
+		0x00000000,
+		0x00000300,
+		0x00000000,
+	},
+	.enable_diagnostic_hw        = 0x1,
+};
+
+static struct cam_tfe_rdi_reg  tfe530_rdi2_reg = {
+	.rdi_hw_version              = 0x00001800,
+	.rdi_hw_status               = 0x00001804,
+	.rdi_module_config           = 0x00001860,
+	.rdi_skip_period             = 0x00001868,
+	.rdi_irq_subsample_pattern   = 0x0000186C,
+	.rdi_epoch_irq               = 0x00001870,
+	.rdi_debug_1                 = 0x000019F0,
+	.rdi_debug_0                 = 0x000019F4,
+	.rdi_test_bus_ctrl           = 0x000019F8,
+	.rdi_spare                   = 0x000019FC,
+	.reg_update_cmd              = 0x0000102C,
+};
+
+static struct cam_tfe_rdi_reg_data tfe530_rdi2_reg_data = {
+	.reg_update_cmd_data         = 0x8,
+	.epoch_line_cfg              = 0x00140014,
+	.pixel_pattern_shift         = 24,
+	.pixel_pattern_mask          = 0x07000000,
+	.rdi_out_enable_shift        = 0,
+
+	.sof_irq_mask                = 0x00001000,
+	.epoch0_irq_mask             = 0x00004000,
+	.epoch1_irq_mask             = 0x00008000,
+	.eof_irq_mask                = 0x00002000,
+	.error_irq_mask0             = 0x00080800,
+	.error_irq_mask2             = 0x00000004,
+	.subscribe_irq_mask          = {
+		0x00000000,
+		0x00003000,
+		0x00000000,
+	},
+	.enable_diagnostic_hw        = 0x1,
+};
+
+static struct  cam_tfe_top_hw_info tfe530_top_hw_info = {
+	.common_reg = &tfe530_top_commong_reg,
+	.camif_hw_info = {
+		.camif_reg = &tfe530_camif_reg,
+		.reg_data  = &tfe530_camif_reg_data,
+	},
+	.rdi_hw_info  = {
+		{
+			.rdi_reg  = &tfe530_rdi0_reg,
+			.reg_data = &tfe530_rdi0_reg_data,
+		},
+		{
+			.rdi_reg  = &tfe530_rdi1_reg,
+			.reg_data = &tfe530_rdi1_reg_data,
+		},
+		{
+			.rdi_reg  = &tfe530_rdi2_reg,
+			.reg_data = &tfe530_rdi2_reg_data,
+		},
+	},
+	.in_port = {
+		CAM_TFE_CAMIF_VER_1_0,
+		CAM_TFE_RDI_VER_1_0,
+		CAM_TFE_RDI_VER_1_0,
+		CAM_TFE_RDI_VER_1_0
+	},
+	.reg_dump_data  = {
+		.num_reg_dump_entries    = 19,
+		.num_lut_dump_entries    = 0,
+		.bus_start_addr          = 0x2000,
+		.bus_write_top_end_addr  = 0x2120,
+		.bus_client_start_addr   = 0x2200,
+		.bus_client_offset       = 0x100,
+		.num_bus_clients         = 10,
+		.reg_entry = {
+			{
+				.start_offset = 0x1000,
+				.end_offset   = 0x10F4,
+			},
+			{
+				.start_offset = 0x1260,
+				.end_offset   = 0x1280,
+			},
+			{
+				.start_offset = 0x13F0,
+				.end_offset   = 0x13FC,
+			},
+			{
+				.start_offset = 0x1460,
+				.end_offset   = 0x1470,
+			},
+			{
+				.start_offset = 0x15F0,
+				.end_offset   = 0x15FC,
+			},
+			{
+				.start_offset = 0x1660,
+				.end_offset   = 0x1670,
+			},
+			{
+				.start_offset = 0x17F0,
+				.end_offset   = 0x17FC,
+			},
+			{
+				.start_offset = 0x1860,
+				.end_offset   = 0x1870,
+			},
+			{
+				.start_offset = 0x19F0,
+				.end_offset   = 0x19FC,
+			},
+			{
+				.start_offset = 0x2660,
+				.end_offset   = 0x2694,
+			},
+			{
+				.start_offset = 0x2860,
+				.end_offset   = 0x2884,
+			},
+			{
+				.start_offset = 0x2A60,
+				.end_offset   = 0X2B34,
+			},
+			{
+				.start_offset = 0x2C60,
+				.end_offset   = 0X2C80,
+			},
+			{
+				.start_offset = 0x2E60,
+				.end_offset   = 0X2E7C,
+			},
+			{
+				.start_offset = 0x3060,
+				.end_offset   = 0X3110,
+			},
+			{
+				.start_offset = 0x3260,
+				.end_offset   = 0X3278,
+			},
+			{
+				.start_offset = 0x3460,
+				.end_offset   = 0X3478,
+			},
+			{
+				.start_offset = 0x3660,
+				.end_offset   = 0X3684,
+			},
+			{
+				.start_offset = 0x3860,
+				.end_offset   = 0X3884,
+			},
+		},
+		.lut_entry = {
+			{
+				.lut_word_size = 1,
+				.lut_bank_sel  = 0x40,
+				.lut_addr_size = 180,
+				.dmi_reg_offset = 0x2800,
+			},
+			{
+				.lut_word_size = 1,
+				.lut_bank_sel  = 0x41,
+				.lut_addr_size = 180,
+				.dmi_reg_offset = 0x3000,
+			},
+		},
+	},
+};
+
+static struct cam_tfe_bus_hw_info  tfe530_bus_hw_info = {
+	.common_reg = {
+		.hw_version  = 0x00001A00,
+		.cgc_ovd     = 0x00001A08,
+		.comp_cfg_0  = 0x00001A0C,
+		.comp_cfg_1  = 0x00001A10,
+		.frameheader_cfg  = {
+			0x00001A34,
+			0x00001A38,
+			0x00001A3C,
+			0x00001A40,
+		},
+		.pwr_iso_cfg = 0x00001A5C,
+		.overflow_status_clear = 0x00001A60,
+		.ccif_violation_status = 0x00001A64,
+		.overflow_status       = 0x00001A68,
+		.image_size_violation_status = 0x00001A70,
+		.perf_count_cfg = {
+			0x00001A74,
+			0x00001A78,
+			0x00001A7C,
+			0x00001A80,
+			0x00001A84,
+			0x00001A88,
+			0x00001A8C,
+			0x00001A90,
+		},
+		.perf_count_val = {
+			0x00001A94,
+			0x00001A98,
+			0x00001A9C,
+			0x00001AA0,
+			0x00001AA4,
+			0x00001AA8,
+			0x00001AAC,
+			0x00001AB0,
+		},
+		.perf_count_status = 0x00001AB4,
+		.debug_status_top_cfg = 0x00001AD4,
+		.debug_status_top = 0x00001AD8,
+		.test_bus_ctrl = 0x00001ADC,
+		.irq_mask = {
+			0x00001A18,
+			0x00001A1C,
+		},
+		.irq_clear = {
+			0x00001A20,
+			0x00001A24,
+		},
+		.irq_status = {
+			0x00001A28,
+			0x00001A2C,
+		},
+		.irq_cmd = 0x00001A30,
+	},
+	.num_client = CAM_TFE_BUS_MAX_CLIENTS,
+	.bus_client_reg = {
+		/* BUS Client 0 BAYER */
+		{
+			.cfg                   = 0x00001C00,
+			.image_addr            = 0x00001C04,
+			.frame_incr            = 0x00001C08,
+			.image_cfg_0           = 0x00001C0C,
+			.image_cfg_1           = 0x00001C10,
+			.image_cfg_2           = 0x00001C14,
+			.packer_cfg            = 0x00001C18,
+			.bw_limit              = 0x00001C1C,
+			.frame_header_addr     = 0x00001C20,
+			.frame_header_incr     = 0x00001C24,
+			.frame_header_cfg      = 0x00001C28,
+			.line_done_cfg         = 0x00000000,
+			.irq_subsample_period  = 0x00001C30,
+			.irq_subsample_pattern = 0x00001C34,
+			.framedrop_period      = 0x00001C38,
+			.framedrop_pattern     = 0x00001C3C,
+			.addr_status_0         = 0x00001C68,
+			.addr_status_1         = 0x00001C6C,
+			.addr_status_2         = 0x00001C70,
+			.addr_status_3         = 0x00001C74,
+			.debug_status_cfg      = 0x00001C78,
+			.debug_status_0        = 0x00001C7C,
+			.debug_status_1        = 0x00001C80,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_0,
+		},
+		/* BUS Client 1 IDEAL RAW*/
+		{
+			.cfg                   = 0x00001D00,
+			.image_addr            = 0x00001D04,
+			.frame_incr            = 0x00001D08,
+			.image_cfg_0           = 0x00001D0C,
+			.image_cfg_1           = 0x00001D10,
+			.image_cfg_2           = 0x00001D14,
+			.packer_cfg            = 0x00001D18,
+			.bw_limit              = 0x00001D1C,
+			.frame_header_addr     = 0x00001D20,
+			.frame_header_incr     = 0x00001D24,
+			.frame_header_cfg      = 0x00001D28,
+			.line_done_cfg         = 0x00000000,
+			.irq_subsample_period  = 0x00001D30,
+			.irq_subsample_pattern = 0x00001D34,
+			.framedrop_period      = 0x00001D38,
+			.framedrop_pattern     = 0x00001D3C,
+			.addr_status_0         = 0x00001D68,
+			.addr_status_1         = 0x00001D6C,
+			.addr_status_2         = 0x00001D70,
+			.addr_status_3         = 0x00001D74,
+			.debug_status_cfg      = 0x00001D78,
+			.debug_status_0        = 0x00001D7C,
+			.debug_status_1        = 0x00001D80,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_1,
+		},
+		/* BUS Client 2 Stats BE Tintless */
+		{
+			.cfg                   = 0x00001E00,
+			.image_addr            = 0x00001E04,
+			.frame_incr            = 0x00001E08,
+			.image_cfg_0           = 0x00001E0C,
+			.image_cfg_1           = 0x00001E10,
+			.image_cfg_2           = 0x00001E14,
+			.packer_cfg            = 0x00001E18,
+			.bw_limit              = 0x00001E1C,
+			.frame_header_addr     = 0x00001E20,
+			.frame_header_incr     = 0x00001E24,
+			.frame_header_cfg      = 0x00001E28,
+			.line_done_cfg         = 0x00001E00,
+			.irq_subsample_period  = 0x00001E30,
+			.irq_subsample_pattern = 0x00000E34,
+			.framedrop_period      = 0x00001E38,
+			.framedrop_pattern     = 0x00001E3C,
+			.addr_status_0         = 0x00001E68,
+			.addr_status_1         = 0x00001E6C,
+			.addr_status_2         = 0x00001E70,
+			.addr_status_3         = 0x00001E74,
+			.debug_status_cfg      = 0x00001E78,
+			.debug_status_0        = 0x00001E7C,
+			.debug_status_1        = 0x00001E80,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_2,
+		},
+		/* BUS Client 3 Stats Bhist */
+		{
+			.cfg                   = 0x00001F00,
+			.image_addr            = 0x00001F04,
+			.frame_incr            = 0x00001F08,
+			.image_cfg_0           = 0x00001F0C,
+			.image_cfg_1           = 0x00001F10,
+			.image_cfg_2           = 0x00001F14,
+			.packer_cfg            = 0x00001F18,
+			.bw_limit              = 0x00001F1C,
+			.frame_header_addr     = 0x00001F20,
+			.frame_header_incr     = 0x00001F24,
+			.frame_header_cfg      = 0x00001F28,
+			.line_done_cfg         = 0x00000000,
+			.irq_subsample_period  = 0x00001F30,
+			.irq_subsample_pattern = 0x00001F34,
+			.framedrop_period      = 0x00001F38,
+			.framedrop_pattern     = 0x00001F3C,
+			.addr_status_0         = 0x00001F68,
+			.addr_status_1         = 0x00001F6C,
+			.addr_status_2         = 0x00001F70,
+			.addr_status_3         = 0x00001F74,
+			.debug_status_cfg      = 0x00001F78,
+			.debug_status_0        = 0x00001F7C,
+			.debug_status_1        = 0x00001F80,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_2,
+		},
+		/* BUS Client 4 Stats AWB BG */
+		{
+			.cfg                   = 0x00002000,
+			.image_addr            = 0x00002004,
+			.frame_incr            = 0x00002008,
+			.image_cfg_0           = 0x0000200C,
+			.image_cfg_1           = 0x00002010,
+			.image_cfg_2           = 0x00002014,
+			.packer_cfg            = 0x00002018,
+			.bw_limit              = 0x0000201C,
+			.frame_header_addr     = 0x00002020,
+			.frame_header_incr     = 0x00002024,
+			.frame_header_cfg      = 0x00002028,
+			.line_done_cfg         = 0x00000000,
+			.irq_subsample_period  = 0x00002030,
+			.irq_subsample_pattern = 0x00002034,
+			.framedrop_period      = 0x00002038,
+			.framedrop_pattern     = 0x0000203C,
+			.addr_status_0         = 0x00002068,
+			.addr_status_1         = 0x0000206C,
+			.addr_status_2         = 0x00002070,
+			.addr_status_3         = 0x00002074,
+			.debug_status_cfg      = 0x00002078,
+			.debug_status_0        = 0x0000207C,
+			.debug_status_1        = 0x00002080,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_3,
+		},
+		/* BUS Client 5 Stats AEC BG */
+		{
+			.cfg                   = 0x00002100,
+			.image_addr            = 0x00002104,
+			.frame_incr            = 0x00002108,
+			.image_cfg_0           = 0x0000210C,
+			.image_cfg_1           = 0x00002110,
+			.image_cfg_2           = 0x00002114,
+			.packer_cfg            = 0x00002118,
+			.bw_limit              = 0x0000211C,
+			.frame_header_addr     = 0x00002120,
+			.frame_header_incr     = 0x00002124,
+			.frame_header_cfg      = 0x00002128,
+			.line_done_cfg         = 0x00000000,
+			.irq_subsample_period  = 0x00002130,
+			.irq_subsample_pattern = 0x00002134,
+			.framedrop_period      = 0x00002138,
+			.framedrop_pattern     = 0x0000213C,
+			.addr_status_0         = 0x00002168,
+			.addr_status_1         = 0x0000216C,
+			.addr_status_2         = 0x00002170,
+			.addr_status_3         = 0x00002174,
+			.debug_status_cfg      = 0x00002178,
+			.debug_status_0        = 0x0000217C,
+			.debug_status_1        = 0x00002180,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_3,
+		},
+		/* BUS Client 6 Stats BAF */
+		{
+			.cfg                   = 0x00002200,
+			.image_addr            = 0x00002204,
+			.frame_incr            = 0x00002208,
+			.image_cfg_0           = 0x0000220C,
+			.image_cfg_1           = 0x00002210,
+			.image_cfg_2           = 0x00002214,
+			.packer_cfg            = 0x00002218,
+			.bw_limit              = 0x0000221C,
+			.frame_header_addr     = 0x00002220,
+			.frame_header_incr     = 0x00002224,
+			.frame_header_cfg      = 0x00002228,
+			.line_done_cfg         = 0x00000000,
+			.irq_subsample_period  = 0x00002230,
+			.irq_subsample_pattern = 0x00002234,
+			.framedrop_period      = 0x00002238,
+			.framedrop_pattern     = 0x0000223C,
+			.addr_status_0         = 0x00002268,
+			.addr_status_1         = 0x0000226C,
+			.addr_status_2         = 0x00002270,
+			.addr_status_3         = 0x00002274,
+			.debug_status_cfg      = 0x00002278,
+			.debug_status_0        = 0x0000227C,
+			.debug_status_1        = 0x00002280,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_4,
+		},
+		/* BUS Client 7 RDI0 */
+		{
+			.cfg                   = 0x00002300,
+			.image_addr            = 0x00002304,
+			.frame_incr            = 0x00002308,
+			.image_cfg_0           = 0x0000230C,
+			.image_cfg_1           = 0x00002310,
+			.image_cfg_2           = 0x00002314,
+			.packer_cfg            = 0x00002318,
+			.bw_limit              = 0x0000231C,
+			.frame_header_addr     = 0x00002320,
+			.frame_header_incr     = 0x00002324,
+			.frame_header_cfg      = 0x00002328,
+			.line_done_cfg         = 0x00000000,
+			.irq_subsample_period  = 0x00002330,
+			.irq_subsample_pattern = 0x00002334,
+			.framedrop_period      = 0x00002338,
+			.framedrop_pattern     = 0x0000233C,
+			.addr_status_0         = 0x00002368,
+			.addr_status_1         = 0x0000236C,
+			.addr_status_2         = 0x00002370,
+			.addr_status_3         = 0x00002374,
+			.debug_status_cfg      = 0x00002378,
+			.debug_status_0        = 0x0000237C,
+			.debug_status_1        = 0x00002380,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_5,
+		},
+		/* BUS Client 8 RDI1 */
+		{
+			.cfg                   = 0x00002400,
+			.image_addr            = 0x00002404,
+			.frame_incr            = 0x00002408,
+			.image_cfg_0           = 0x0000240C,
+			.image_cfg_1           = 0x00002410,
+			.image_cfg_2           = 0x00002414,
+			.packer_cfg            = 0x00002418,
+			.bw_limit              = 0x0000241C,
+			.frame_header_addr     = 0x00002420,
+			.frame_header_incr     = 0x00002424,
+			.frame_header_cfg      = 0x00002428,
+			.line_done_cfg         = 0x00000000,
+			.irq_subsample_period  = 0x00002430,
+			.irq_subsample_pattern = 0x00002434,
+			.framedrop_period      = 0x00002438,
+			.framedrop_pattern     = 0x0000243C,
+			.addr_status_0         = 0x00002468,
+			.addr_status_1         = 0x0000246C,
+			.addr_status_2         = 0x00002470,
+			.addr_status_3         = 0x00002474,
+			.debug_status_cfg      = 0x00002478,
+			.debug_status_0        = 0x0000247C,
+			.debug_status_1        = 0x00002480,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_6,
+		},
+		/* BUS Client 9 PDAF/RDI2*/
+		{
+			.cfg                   = 0x00002500,
+			.image_addr            = 0x00002504,
+			.frame_incr            = 0x00002508,
+			.image_cfg_0           = 0x0000250C,
+			.image_cfg_1           = 0x00002510,
+			.image_cfg_2           = 0x00002514,
+			.packer_cfg            = 0x00002518,
+			.bw_limit              = 0x0000251C,
+			.frame_header_addr     = 0x00002520,
+			.frame_header_incr     = 0x00002524,
+			.frame_header_cfg      = 0x00002528,
+			.line_done_cfg         = 0x00000000,
+			.irq_subsample_period  = 0x00002530,
+			.irq_subsample_pattern = 0x00002534,
+			.framedrop_period      = 0x00002538,
+			.framedrop_pattern     = 0x0000253C,
+			.addr_status_0         = 0x00002568,
+			.addr_status_1         = 0x0000256C,
+			.addr_status_2         = 0x00002570,
+			.addr_status_3         = 0x00002574,
+			.debug_status_cfg      = 0x00002578,
+			.debug_status_0        = 0x0000257C,
+			.debug_status_1        = 0x00002580,
+			.comp_group            = CAM_TFE_BUS_COMP_GRP_7,
+		},
+	},
+	.num_out  = CAM_TFE_BUS_TFE_OUT_MAX,
+	.tfe_out_hw_info = {
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_RDI0,
+			.max_width        = -1,
+			.max_height       = -1,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_5,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_1,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_RDI1,
+			.max_width        = -1,
+			.max_height       = -1,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_6,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_2,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_RDI2,
+			.max_width        = -1,
+			.max_height       = -1,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_7,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_3,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_FULL,
+			.max_width        = 4096,
+			.max_height       = 4096,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_0,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_0,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_RAW_DUMP,
+			.max_width        = 4096,
+			.max_height       = 4096,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_1,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_0,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_PDAF,
+			.max_width        = 4096,
+			.max_height       = 4096,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_7,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_3,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE,
+			.max_width        = -1,
+			.max_height       = -1,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_3,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_0,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST,
+			.max_width        = -1,
+			.max_height       = -1,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_2,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_0,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_STATS_TL_BG,
+			.max_width        = -1,
+			.max_height       = -1,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_2,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_0,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG,
+			.max_width        = -1,
+			.max_height       = -1,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_3,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_0,
+		},
+		{
+			.tfe_out_id       = CAM_TFE_BUS_TFE_OUT_STATS_BF,
+			.max_width        = -1,
+			.max_height       = -1,
+			.composite_group  = CAM_TFE_BUS_COMP_GRP_4,
+			.rup_group_id     = CAM_TFE_BUS_RUP_GRP_0,
+		},
+	},
+	.comp_done_shift          = 8,
+	.top_bus_wr_irq_shift     = 1,
+	.comp_buf_done_mask = 0xFF00,
+	.comp_rup_done_mask = 0xF,
+	.bus_irq_error_mask = {
+		0xD0000000,
+		0x00000000,
+	},
+};
+
+struct cam_tfe_hw_info cam_tfe530 = {
+	.top_irq_mask = {
+		0x00001034,
+		0x00001038,
+		0x0000103C,
+	},
+	.top_irq_clear = {
+		0x00001040,
+		0x00001044,
+		0x00001048,
+	},
+	.top_irq_status = {
+		0x0000104C,
+		0x00001050,
+		0x00001054,
+	},
+	.top_irq_cmd                       = 0x00001030,
+	.global_clear_bitmask              = 0x00000001,
+
+	.bus_irq_mask = {
+		0x00001A18,
+		0x00001A1C,
+	},
+	.bus_irq_clear = {
+		0x00001A20,
+		0x00001A24,
+	},
+	.bus_irq_status = {
+		0x00001A28,
+		0x00001A2C,
+	},
+	.bus_irq_cmd = 0x00001A30,
+	.bus_violation_reg = 0x00001A64,
+	.bus_overflow_reg = 0x00001A68,
+	.bus_image_size_vilation_reg = 0x1A70,
+	.bus_overflow_clear_cmd = 0x1A60,
+	.debug_status_top = 0x1AD8,
+
+	.reset_irq_mask = {
+		0x00000001,
+		0x00000000,
+		0x00000000,
+	},
+	.error_irq_mask = {
+		0x000F0F00,
+		0x00000000,
+		0x0000003F,
+	},
+	.bus_reg_irq_mask = {
+		0x00000002,
+		0x00000000,
+		0x00000000,
+	},
+
+	.bus_version                   = CAM_TFE_BUS_1_0,
+	.bus_hw_info                   = &tfe530_bus_hw_info,
+
+	.top_version                   = CAM_TFE_TOP_1_0,
+	.top_hw_info                   = &tfe530_top_hw_info,
+};
+
+#endif /* _CAM_TFE530_H_ */

+ 2149 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.c

@@ -0,0 +1,2149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <media/cam_tfe.h>
+#include <media/cam_isp_tfe.h>
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+#include "cam_hw_intf.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_tfe_hw_intf.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
+#include "cam_tfe_bus.h"
+#include "cam_tfe_irq.h"
+#include "cam_tfe_soc.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+
+
+static const char drv_name[] = "tfe_bus";
+
+#define CAM_TFE_BUS_IRQ_REG0                0
+#define CAM_TFE_BUS_IRQ_REG1                1
+
+#define CAM_TFE_BUS_PAYLOAD_MAX             256
+
+#define CAM_TFE_RDI_BUS_DEFAULT_WIDTH               0xFFFF
+#define CAM_TFE_RDI_BUS_DEFAULT_STRIDE              0xFFFF
+
+#define CAM_TFE_MAX_OUT_RES_PER_COMP_GRP    2
+
+#define MAX_BUF_UPDATE_REG_NUM   \
+	(sizeof(struct cam_tfe_bus_reg_offset_bus_client) / 4)
+#define MAX_REG_VAL_PAIR_SIZE    \
+	(MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
+
+enum cam_tfe_bus_packer_format {
+	PACKER_FMT_PLAIN_128,
+	PACKER_FMT_PLAIN_8,
+	PACKER_FMT_PLAIN_8_ODD_EVEN,
+	PACKER_FMT_PLAIN_8_LSB_MSB_10,
+	PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN,
+	PACKER_FMT_PLAIN_16_10BPP,
+	PACKER_FMT_PLAIN_16_12BPP,
+	PACKER_FMT_PLAIN_16_14BPP,
+	PACKER_FMT_PLAIN_16_16BPP,
+	PACKER_FMT_PLAIN_32,
+	PACKER_FMT_PLAIN_64,
+	PACKER_FMT_TP_10,
+	PACKET_FMT_MIPI10,
+	PACKET_FMT_MIPI12,
+	PACKER_FMT_MAX,
+};
+
+struct cam_tfe_bus_common_data {
+	uint32_t                                    core_index;
+	void __iomem                               *mem_base;
+	struct cam_hw_intf                         *hw_intf;
+	void                                       *tfe_core_data;
+	struct cam_tfe_bus_reg_offset_common       *common_reg;
+	uint32_t       io_buf_update[MAX_REG_VAL_PAIR_SIZE];
+
+	spinlock_t                                  spin_lock;
+	struct mutex                                bus_mutex;
+	uint32_t                                    secure_mode;
+	uint32_t                                    num_sec_out;
+	uint32_t                                    comp_done_shift;
+	bool                                        is_lite;
+	cam_hw_mgr_event_cb_func                    event_cb;
+	bool                        rup_irq_enable[CAM_TFE_BUS_RUP_GRP_MAX];
+};
+
+struct cam_tfe_bus_wm_resource_data {
+	uint32_t             index;
+	struct cam_tfe_bus_common_data            *common_data;
+	struct cam_tfe_bus_reg_offset_bus_client  *hw_regs;
+
+	uint32_t             offset;
+	uint32_t             width;
+	uint32_t             height;
+	uint32_t             stride;
+	uint32_t             format;
+	uint32_t             pack_fmt;
+	uint32_t             burst_len;
+
+	uint32_t             irq_subsample_period;
+	uint32_t             irq_subsample_pattern;
+	uint32_t             framedrop_period;
+	uint32_t             framedrop_pattern;
+
+	uint32_t             en_cfg;
+	uint32_t             is_dual;
+};
+
+struct cam_tfe_bus_comp_grp_data {
+	enum cam_tfe_bus_comp_grp_id            comp_grp_id;
+	struct cam_tfe_bus_common_data         *common_data;
+
+	uint32_t                                is_master;
+	uint32_t                                is_dual;
+	uint32_t                                addr_sync_mode;
+	uint32_t                                composite_mask;
+
+	uint32_t                                acquire_dev_cnt;
+	uint32_t                                source_grp;
+
+	struct cam_isp_resource_node
+		*out_rsrc[CAM_TFE_MAX_OUT_RES_PER_COMP_GRP];
+};
+
+struct cam_tfe_bus_tfe_out_data {
+	uint32_t                         out_id;
+	uint32_t                         composite_group;
+	uint32_t                         rup_group_id;
+	uint32_t                         source_group;
+	struct cam_tfe_bus_common_data  *common_data;
+
+	uint32_t                         num_wm;
+	struct cam_isp_resource_node    *wm_res[PLANE_MAX];
+
+	struct cam_isp_resource_node    *comp_grp;
+	struct list_head                 tfe_out_list;
+
+	uint32_t                         is_master;
+	uint32_t                         is_dual;
+
+	uint32_t                         format;
+	uint32_t                         max_width;
+	uint32_t                         max_height;
+	struct cam_cdm_utils_ops        *cdm_util_ops;
+	uint32_t                         secure_mode;
+	void                            *priv;
+	cam_hw_mgr_event_cb_func         event_cb;
+};
+
+struct cam_tfe_bus_priv {
+	struct cam_tfe_bus_common_data      common_data;
+	uint32_t                            num_client;
+	uint32_t                            num_out;
+	uint32_t                            top_bus_wr_irq_shift;
+
+	struct cam_isp_resource_node  bus_client[CAM_TFE_BUS_MAX_CLIENTS];
+	struct cam_isp_resource_node  comp_grp[CAM_TFE_BUS_COMP_GRP_MAX];
+	struct cam_isp_resource_node  tfe_out[CAM_TFE_BUS_TFE_OUT_MAX];
+
+	struct list_head                    free_comp_grp;
+	struct list_head                    used_comp_grp;
+
+	void                               *tasklet_info;
+	uint32_t                            comp_buf_done_mask;
+	uint32_t                            comp_rup_done_mask;
+	uint32_t           bus_irq_error_mask[CAM_TFE_BUS_IRQ_REGISTERS_MAX];
+};
+
+static bool cam_tfe_bus_can_be_secure(uint32_t out_id)
+{
+	switch (out_id) {
+	case CAM_TFE_BUS_TFE_OUT_FULL:
+	case CAM_TFE_BUS_TFE_OUT_RAW_DUMP:
+	case CAM_TFE_BUS_TFE_OUT_RDI0:
+	case CAM_TFE_BUS_TFE_OUT_RDI1:
+	case CAM_TFE_BUS_TFE_OUT_RDI2:
+		return true;
+
+	case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE:
+	case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST:
+	case CAM_TFE_BUS_TFE_OUT_STATS_TL_BG:
+	case CAM_TFE_BUS_TFE_OUT_STATS_BF:
+	case CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG:
+	default:
+		return false;
+	}
+}
+
+static enum cam_tfe_bus_tfe_out_id
+	cam_tfe_bus_get_out_res_id(uint32_t out_res_id)
+{
+	switch (out_res_id) {
+	case CAM_ISP_TFE_OUT_RES_FULL:
+		return CAM_TFE_BUS_TFE_OUT_FULL;
+	case CAM_ISP_TFE_OUT_RES_RAW_DUMP:
+		return CAM_TFE_BUS_TFE_OUT_RAW_DUMP;
+	case CAM_ISP_TFE_OUT_RES_PDAF:
+		return CAM_TFE_BUS_TFE_OUT_PDAF;
+	case CAM_ISP_TFE_OUT_RES_RDI_0:
+		return CAM_TFE_BUS_TFE_OUT_RDI0;
+	case CAM_ISP_TFE_OUT_RES_RDI_1:
+		return CAM_TFE_BUS_TFE_OUT_RDI1;
+	case CAM_ISP_TFE_OUT_RES_RDI_2:
+		return CAM_TFE_BUS_TFE_OUT_RDI2;
+	case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE:
+		return CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE;
+	case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST:
+		return CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST;
+	case CAM_ISP_TFE_OUT_RES_STATS_TL_BG:
+		return CAM_TFE_BUS_TFE_OUT_STATS_TL_BG;
+	case CAM_ISP_TFE_OUT_RES_STATS_BF:
+		return CAM_TFE_BUS_TFE_OUT_STATS_BF;
+	case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG:
+		return CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG;
+	default:
+		return CAM_TFE_BUS_TFE_OUT_MAX;
+	}
+}
+
+static int cam_tfe_bus_get_num_wm(
+	enum cam_tfe_bus_tfe_out_id           out_res_id,
+	uint32_t                              format)
+{
+	switch (out_res_id) {
+	case CAM_TFE_BUS_TFE_OUT_RDI0:
+	case CAM_TFE_BUS_TFE_OUT_RDI1:
+	case CAM_TFE_BUS_TFE_OUT_RDI2:
+		switch (format) {
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_MIPI_RAW_12:
+		case CAM_FORMAT_MIPI_RAW_14:
+		case CAM_FORMAT_MIPI_RAW_16:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_PLAIN16_16:
+		case CAM_FORMAT_PLAIN128:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_PDAF:
+		switch (format) {
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+			return 1;
+		default:
+			break;
+		}
+		break;
+
+	case CAM_TFE_BUS_TFE_OUT_FULL:
+		switch (format) {
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_MIPI_RAW_12:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_RAW_DUMP:
+		switch (format) {
+		case CAM_FORMAT_ARGB_14:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_MIPI_RAW_12:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE:
+	case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST:
+	case CAM_TFE_BUS_TFE_OUT_STATS_TL_BG:
+	case CAM_TFE_BUS_TFE_OUT_STATS_BF:
+	case CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG:
+		switch (format) {
+		case CAM_FORMAT_PLAIN64:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	CAM_ERR(CAM_ISP, "Unsupported format %u for resource id %u",
+		format, out_res_id);
+
+	return -EINVAL;
+}
+
+static int cam_tfe_bus_get_wm_idx(
+	enum cam_tfe_bus_tfe_out_id tfe_out_res_id,
+	enum cam_tfe_bus_plane_type plane)
+{
+	int wm_idx = -1;
+
+	switch (tfe_out_res_id) {
+	case CAM_TFE_BUS_TFE_OUT_RDI0:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_RDI1:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 8;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_RDI2:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 9;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_PDAF:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 9;
+			break;
+		default:
+			break;
+		}
+		break;
+
+	case CAM_TFE_BUS_TFE_OUT_FULL:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 0;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_RAW_DUMP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 5;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 3;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 4;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_STATS_TL_BG:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_TFE_BUS_TFE_OUT_STATS_BF:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 6;
+			break;
+		default:
+			break;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return wm_idx;
+}
+
+static enum cam_tfe_bus_packer_format
+	cam_tfe_bus_get_packer_fmt(uint32_t out_fmt, int wm_index)
+{
+	switch (out_fmt) {
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+	case CAM_FORMAT_MIPI_RAW_14:
+	case CAM_FORMAT_MIPI_RAW_16:
+	case CAM_FORMAT_MIPI_RAW_20:
+	case CAM_FORMAT_PLAIN16_8:
+	case CAM_FORMAT_PLAIN128:
+	case CAM_FORMAT_PD8:
+		return PACKER_FMT_PLAIN_128;
+	case CAM_FORMAT_PLAIN8:
+		return PACKER_FMT_PLAIN_8;
+	case CAM_FORMAT_Y_ONLY:
+		return PACKER_FMT_PLAIN_8_LSB_MSB_10;
+	case CAM_FORMAT_PLAIN16_10:
+		return PACKER_FMT_PLAIN_16_10BPP;
+	case CAM_FORMAT_PLAIN16_12:
+		return PACKER_FMT_PLAIN_16_12BPP;
+	case CAM_FORMAT_PLAIN16_14:
+		return PACKER_FMT_PLAIN_16_14BPP;
+	case CAM_FORMAT_PLAIN16_16:
+		return PACKER_FMT_PLAIN_16_16BPP;
+	case CAM_FORMAT_ARGB:
+		return PACKER_FMT_PLAIN_32;
+	case CAM_FORMAT_PLAIN64:
+	case CAM_FORMAT_PD10:
+		return PACKER_FMT_PLAIN_64;
+	case CAM_FORMAT_TP10:
+		return PACKER_FMT_TP_10;
+	default:
+		return PACKER_FMT_MAX;
+	}
+}
+
+static int cam_tfe_bus_acquire_wm(
+	struct cam_tfe_bus_priv               *bus_priv,
+	struct cam_isp_tfe_out_port_info      *out_port_info,
+	struct cam_isp_resource_node         **wm_res,
+	void                                  *tasklet,
+	enum cam_tfe_bus_tfe_out_id            tfe_out_res_id,
+	enum cam_tfe_bus_plane_type            plane,
+	uint32_t                              *client_done_mask,
+	uint32_t                               is_dual,
+	enum cam_tfe_bus_comp_grp_id          *comp_grp_id)
+{
+	struct cam_isp_resource_node         *wm_res_local = NULL;
+	struct cam_tfe_bus_wm_resource_data  *rsrc_data = NULL;
+	uint32_t wm_idx = 0;
+
+	*wm_res = NULL;
+
+	/* No need to allocate for BUS TFE OUT to WM is fixed. */
+	wm_idx = cam_tfe_bus_get_wm_idx(tfe_out_res_id, plane);
+	if (wm_idx < 0 || wm_idx >= bus_priv->num_client) {
+		CAM_ERR(CAM_ISP, "Unsupported TFE out %d plane %d",
+			tfe_out_res_id, plane);
+		return -EINVAL;
+	}
+
+	wm_res_local = &bus_priv->bus_client[wm_idx];
+	if (wm_res_local->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_ERR(CAM_ISP, "WM:%d not available state:%d",
+			wm_idx, wm_res_local->res_state);
+		return -EALREADY;
+	}
+	wm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	wm_res_local->tasklet_info = tasklet;
+
+	rsrc_data = wm_res_local->res_priv;
+	rsrc_data->format = out_port_info->format;
+	rsrc_data->pack_fmt = cam_tfe_bus_get_packer_fmt(rsrc_data->format,
+		wm_idx);
+
+	rsrc_data->width = out_port_info->width;
+	rsrc_data->height = out_port_info->height;
+	rsrc_data->stride = out_port_info->stride;
+	rsrc_data->is_dual = is_dual;
+	/* Set WM offset value to default */
+	rsrc_data->offset  = 0;
+
+	if (rsrc_data->index > 6) {
+		/* WM 7-9 refers to RDI 0/ RDI 1/RDI 2 */
+		switch (rsrc_data->format) {
+		case CAM_FORMAT_MIPI_RAW_6:
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_MIPI_RAW_12:
+		case CAM_FORMAT_MIPI_RAW_14:
+		case CAM_FORMAT_MIPI_RAW_16:
+		case CAM_FORMAT_PLAIN128:
+			rsrc_data->width = CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
+			rsrc_data->height = 0;
+			rsrc_data->stride = CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
+			rsrc_data->pack_fmt = 0xA;
+			rsrc_data->en_cfg = (0x1 << 16) | 0x1;
+			break;
+		case CAM_FORMAT_PLAIN8:
+			rsrc_data->en_cfg = 0x1;
+			rsrc_data->pack_fmt = 0xA;
+			rsrc_data->stride = rsrc_data->width * 2;
+			break;
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_PLAIN16_16:
+			rsrc_data->width = CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
+			rsrc_data->height = 0;
+			rsrc_data->stride = CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
+			rsrc_data->pack_fmt = 0xA;
+			rsrc_data->en_cfg = (0x1 << 16) | 0x1;
+			break;
+		case CAM_FORMAT_PLAIN64:
+			rsrc_data->en_cfg = 0x1;
+			rsrc_data->pack_fmt = 0xA;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Unsupported RDI format %d",
+				rsrc_data->format);
+			return -EINVAL;
+		}
+	} else if (rsrc_data->index == 0) {
+	/*  WM 0 FULL_OUT */
+		switch (rsrc_data->format) {
+		case CAM_FORMAT_MIPI_RAW_8:
+			rsrc_data->pack_fmt = 0x1;
+			break;
+		case CAM_FORMAT_MIPI_RAW_10:
+			rsrc_data->pack_fmt = 0xc;
+			break;
+		case CAM_FORMAT_MIPI_RAW_12:
+			rsrc_data->pack_fmt = 0xd;
+			break;
+		case CAM_FORMAT_PLAIN8:
+			rsrc_data->pack_fmt = 0x1;
+			break;
+		case CAM_FORMAT_PLAIN16_10:
+			rsrc_data->pack_fmt = 0x5;
+			rsrc_data->pack_fmt |= 0x10;
+			break;
+		case CAM_FORMAT_PLAIN16_12:
+			rsrc_data->pack_fmt = 0x6;
+			rsrc_data->pack_fmt |= 0x10;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid format %d",
+				rsrc_data->format);
+			return -EINVAL;
+		}
+
+		rsrc_data->en_cfg = 0x1;
+	} else if (rsrc_data->index  >= 2 && rsrc_data->index <= 6) {
+		/* WM 2-6 stats */
+		rsrc_data->width = 0;
+		rsrc_data->height = 0;
+		rsrc_data->stride = 1;
+		rsrc_data->en_cfg = (0x1 << 16) | 0x1;
+	} else if (rsrc_data->index == 1) {
+		/* WM 1 Raw dump */
+		rsrc_data->stride = rsrc_data->width;
+		rsrc_data->en_cfg = 0x1;
+		/* LSB aligned */
+		rsrc_data->pack_fmt |= 0x10;
+	} else {
+		CAM_ERR(CAM_ISP, "Invalid WM:%d requested", rsrc_data->index);
+		return -EINVAL;
+	}
+
+	*wm_res = wm_res_local;
+	*comp_grp_id = rsrc_data->hw_regs->comp_group;
+	*client_done_mask |= (1 << wm_idx);
+
+	CAM_DBG(CAM_ISP,
+		"WM:%d processed width:%d height:%d format:0x%x comp_group:%d packt format:0x%x",
+		rsrc_data->index, rsrc_data->width, rsrc_data->height,
+		rsrc_data->format, *comp_grp_id, rsrc_data->pack_fmt);
+	return 0;
+}
+
+static int cam_tfe_bus_release_wm(void   *bus_priv,
+	struct cam_isp_resource_node     *wm_res)
+{
+	struct cam_tfe_bus_wm_resource_data   *rsrc_data = wm_res->res_priv;
+
+	rsrc_data->offset = 0;
+	rsrc_data->width = 0;
+	rsrc_data->height = 0;
+	rsrc_data->stride = 0;
+	rsrc_data->format = 0;
+	rsrc_data->pack_fmt = 0;
+	rsrc_data->burst_len = 0;
+	rsrc_data->irq_subsample_period = 0;
+	rsrc_data->irq_subsample_pattern = 0;
+	rsrc_data->framedrop_period = 0;
+	rsrc_data->framedrop_pattern = 0;
+	rsrc_data->en_cfg = 0;
+	rsrc_data->is_dual = 0;
+
+	wm_res->tasklet_info = NULL;
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	CAM_DBG(CAM_ISP, "TFE:%dRelease WM:%d",
+		rsrc_data->common_data->core_index, rsrc_data->index);
+
+	return 0;
+}
+
+static int cam_tfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
+{
+	struct cam_tfe_bus_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+	struct cam_tfe_bus_common_data        *common_data =
+		rsrc_data->common_data;
+
+	cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->bw_limit);
+
+	cam_io_w((rsrc_data->height << 16) | rsrc_data->width,
+		common_data->mem_base + rsrc_data->hw_regs->image_cfg_0);
+	cam_io_w(rsrc_data->pack_fmt,
+		common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
+
+	/* Configure stride for RDIs on full TFE and TFE lite  */
+	if (rsrc_data->index > 6)
+		cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
+			rsrc_data->hw_regs->image_cfg_2));
+
+	/* Enable WM */
+	cam_io_w_mb(rsrc_data->en_cfg, common_data->mem_base +
+		rsrc_data->hw_regs->cfg);
+
+	CAM_DBG(CAM_ISP, "TFE:%d WM:%d width = %d, height = %d",
+		common_data->core_index, rsrc_data->index,
+		rsrc_data->width, rsrc_data->height);
+	CAM_DBG(CAM_ISP, "WM:%d pk_fmt = %d", rsrc_data->index,
+		rsrc_data->pack_fmt);
+	CAM_DBG(CAM_ISP, "WM:%d stride = %d, burst len = %d",
+		rsrc_data->index, rsrc_data->stride, 0xf);
+	CAM_DBG(CAM_ISP, "TFE:%d Start WM:%d offset 0x%x val 0x%x",
+		common_data->core_index, rsrc_data->index,
+		(uint32_t) rsrc_data->hw_regs->cfg, rsrc_data->en_cfg);
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+static int cam_tfe_bus_stop_wm(struct cam_isp_resource_node *wm_res)
+{
+	struct cam_tfe_bus_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+	struct cam_tfe_bus_common_data        *common_data =
+		rsrc_data->common_data;
+
+	/* Disable WM */
+	cam_io_w_mb(0x0, common_data->mem_base + rsrc_data->hw_regs->cfg);
+	CAM_DBG(CAM_ISP, "TFE:%d Stop WM:%d",
+		rsrc_data->common_data->core_index, rsrc_data->index);
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return 0;
+}
+
+static int cam_tfe_bus_init_wm_resource(uint32_t index,
+	struct cam_tfe_bus_priv    *bus_priv,
+	struct cam_tfe_bus_hw_info *hw_info,
+	struct cam_isp_resource_node    *wm_res)
+{
+	struct cam_tfe_bus_wm_resource_data *rsrc_data;
+
+	rsrc_data = kzalloc(sizeof(struct cam_tfe_bus_wm_resource_data),
+		GFP_KERNEL);
+	if (!rsrc_data) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for WM res priv");
+		return -ENOMEM;
+	}
+	wm_res->res_priv = rsrc_data;
+
+	rsrc_data->index = index;
+	rsrc_data->hw_regs = &hw_info->bus_client_reg[index];
+	rsrc_data->common_data = &bus_priv->common_data;
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&wm_res->list);
+
+	wm_res->start = cam_tfe_bus_start_wm;
+	wm_res->stop = cam_tfe_bus_stop_wm;
+	wm_res->hw_intf = bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_tfe_bus_deinit_wm_resource(
+	struct cam_isp_resource_node    *wm_res)
+{
+	struct cam_tfe_bus_wm_resource_data *rsrc_data;
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+	INIT_LIST_HEAD(&wm_res->list);
+
+	wm_res->start = NULL;
+	wm_res->stop = NULL;
+	wm_res->top_half_handler = NULL;
+	wm_res->bottom_half_handler = NULL;
+	wm_res->hw_intf = NULL;
+
+	rsrc_data = wm_res->res_priv;
+	wm_res->res_priv = NULL;
+	if (!rsrc_data)
+		return -ENOMEM;
+	kfree(rsrc_data);
+
+	return 0;
+}
+
+static void cam_tfe_bus_add_wm_to_comp_grp(
+	struct cam_isp_resource_node    *comp_grp,
+	uint32_t                         composite_mask)
+{
+	struct cam_tfe_bus_comp_grp_data  *rsrc_data = comp_grp->res_priv;
+
+	rsrc_data->composite_mask |= composite_mask;
+}
+
+static bool cam_tfe_bus_match_comp_grp(
+	struct cam_tfe_bus_priv                *bus_priv,
+	struct cam_isp_resource_node          **comp_grp,
+	uint32_t                                comp_grp_id)
+{
+	struct cam_tfe_bus_comp_grp_data       *rsrc_data = NULL;
+	struct cam_isp_resource_node           *comp_grp_local = NULL;
+
+	list_for_each_entry(comp_grp_local,
+		&bus_priv->used_comp_grp, list) {
+		rsrc_data = comp_grp_local->res_priv;
+		if (rsrc_data->comp_grp_id == comp_grp_id) {
+			/* Match found */
+			*comp_grp = comp_grp_local;
+			return true;
+		}
+	}
+
+	list_for_each_entry(comp_grp_local,
+		&bus_priv->free_comp_grp, list) {
+		rsrc_data = comp_grp_local->res_priv;
+		if (rsrc_data->comp_grp_id == comp_grp_id) {
+			/* Match found */
+			*comp_grp = comp_grp_local;
+			list_del(&comp_grp_local->list);
+			list_add_tail(&comp_grp_local->list,
+			&bus_priv->used_comp_grp);
+			return false;
+		}
+	}
+
+	*comp_grp = NULL;
+	return false;
+}
+
+static int cam_tfe_bus_acquire_comp_grp(
+	struct cam_tfe_bus_priv             *bus_priv,
+	struct cam_isp_tfe_out_port_info    *out_port_info,
+	void                                *tasklet,
+	uint32_t                             is_dual,
+	uint32_t                             is_master,
+	struct cam_isp_resource_node       **comp_grp,
+	enum cam_tfe_bus_comp_grp_id         comp_grp_id,
+	struct cam_isp_resource_node        *out_rsrc,
+	uint32_t                             source_group)
+{
+	int rc = 0;
+	struct cam_isp_resource_node      *comp_grp_local = NULL;
+	struct cam_tfe_bus_comp_grp_data  *rsrc_data = NULL;
+	bool previously_acquired  = false;
+
+	if (comp_grp_id >= CAM_TFE_BUS_COMP_GRP_0 &&
+		comp_grp_id <= CAM_TFE_BUS_COMP_GRP_7) {
+		/* Check if matching comp_grp has already been acquired */
+		previously_acquired = cam_tfe_bus_match_comp_grp(
+			bus_priv, &comp_grp_local, comp_grp_id);
+	}
+
+	if (!comp_grp_local) {
+		CAM_ERR(CAM_ISP, "Invalid comp_grp_id:%d", comp_grp_id);
+		return -ENODEV;
+	}
+
+	rsrc_data = comp_grp_local->res_priv;
+	if (rsrc_data->acquire_dev_cnt > CAM_TFE_MAX_OUT_RES_PER_COMP_GRP) {
+		CAM_ERR(CAM_ISP, "Many acquires comp_grp_id:%d", comp_grp_id);
+		return -ENODEV;
+	}
+
+	if (!previously_acquired) {
+		comp_grp_local->tasklet_info = tasklet;
+		comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+		rsrc_data->is_master = is_master;
+		rsrc_data->is_dual = is_dual;
+
+		if (is_master)
+			rsrc_data->addr_sync_mode = 0;
+		else
+			rsrc_data->addr_sync_mode = 1;
+	} else {
+		rsrc_data = comp_grp_local->res_priv;
+		/* Do not support runtime change in composite mask */
+		if (comp_grp_local->res_state ==
+			CAM_ISP_RESOURCE_STATE_STREAMING) {
+			CAM_ERR(CAM_ISP, "Invalid State %d Comp Grp %u",
+				comp_grp_local->res_state,
+				rsrc_data->comp_grp_id);
+			return -EBUSY;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Acquire comp_grp id:%u", rsrc_data->comp_grp_id);
+	rsrc_data->source_grp = source_group;
+	rsrc_data->out_rsrc[rsrc_data->acquire_dev_cnt] = out_rsrc;
+	rsrc_data->acquire_dev_cnt++;
+	*comp_grp = comp_grp_local;
+
+	return rc;
+}
+
+static int cam_tfe_bus_release_comp_grp(
+	struct cam_tfe_bus_priv              *bus_priv,
+	struct cam_isp_resource_node         *comp_grp)
+{
+	struct cam_isp_resource_node      *comp_grp_local = NULL;
+	struct cam_tfe_bus_comp_grp_data  *comp_rsrc_data = NULL;
+	int match_found = 0;
+
+	if (!comp_grp) {
+		CAM_ERR(CAM_ISP, "Invalid Params Comp Grp %pK", comp_grp);
+		return -EINVAL;
+	}
+
+	if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_ERR(CAM_ISP, "Already released Comp Grp");
+		return 0;
+	}
+
+	if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CAM_ERR(CAM_ISP, "Invalid State %d",
+			comp_grp->res_state);
+		return -EBUSY;
+	}
+
+	comp_rsrc_data = comp_grp->res_priv;
+	CAM_DBG(CAM_ISP, "Comp Grp id %u", comp_rsrc_data->comp_grp_id);
+
+	list_for_each_entry(comp_grp_local, &bus_priv->used_comp_grp, list) {
+		if (comp_grp_local == comp_grp) {
+			match_found = 1;
+			break;
+		}
+	}
+
+	if (!match_found) {
+		CAM_ERR(CAM_ISP, "Could not find comp_grp_id:%u",
+			comp_rsrc_data->comp_grp_id);
+		return -ENODEV;
+	}
+
+	comp_rsrc_data->acquire_dev_cnt--;
+	if (comp_rsrc_data->acquire_dev_cnt == 0) {
+		list_del(&comp_grp_local->list);
+
+		comp_rsrc_data->addr_sync_mode = 0;
+		comp_rsrc_data->composite_mask = 0;
+
+		comp_grp_local->tasklet_info = NULL;
+		comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+		list_add_tail(&comp_grp_local->list, &bus_priv->free_comp_grp);
+		CAM_DBG(CAM_ISP, "Comp Grp id %u released",
+			comp_rsrc_data->comp_grp_id);
+	}
+
+	return 0;
+}
+
+static int cam_tfe_bus_start_comp_grp(
+	struct cam_isp_resource_node *comp_grp)
+{
+	int rc = 0;
+	uint32_t val;
+	struct cam_tfe_bus_comp_grp_data *rsrc_data = NULL;
+	struct cam_tfe_bus_common_data   *common_data = NULL;
+	uint32_t     bus_irq_reg_mask_0  = 0;
+
+	rsrc_data = comp_grp->res_priv;
+	common_data = rsrc_data->common_data;
+
+	CAM_DBG(CAM_ISP, "TFE:%d comp_grp_id:%d streaming state:%d mask:0x%x",
+		common_data->core_index, rsrc_data->comp_grp_id,
+		comp_grp->res_state, rsrc_data->composite_mask);
+
+	if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		return 0;
+
+	if (rsrc_data->is_dual) {
+		if (rsrc_data->is_master) {
+			val = cam_io_r(common_data->mem_base +
+				common_data->common_reg->comp_cfg_0);
+			val |= (0x1 << (rsrc_data->comp_grp_id + 16));
+			cam_io_w_mb(val, common_data->mem_base +
+				common_data->common_reg->comp_cfg_0);
+
+			val = cam_io_r(common_data->mem_base +
+				common_data->common_reg->comp_cfg_1);
+			val |= (0x1 << rsrc_data->comp_grp_id);
+			cam_io_w_mb(val, common_data->mem_base +
+				common_data->common_reg->comp_cfg_1);
+		} else {
+			val = cam_io_r(common_data->mem_base +
+				common_data->common_reg->comp_cfg_0);
+			val |= (0x1 << rsrc_data->comp_grp_id);
+			cam_io_w(val, common_data->mem_base +
+				common_data->common_reg->comp_cfg_0);
+
+			val = cam_io_r(common_data->mem_base +
+				common_data->common_reg->comp_cfg_1);
+			val |= (0x1 << rsrc_data->comp_grp_id);
+			cam_io_w(val, common_data->mem_base +
+				common_data->common_reg->comp_cfg_1);
+		}
+	}
+
+	if (rsrc_data->is_dual && !rsrc_data->is_master)
+		goto end;
+
+	/* Update the composite done mask in bus irq mask*/
+	bus_irq_reg_mask_0 = cam_io_r(common_data->mem_base +
+		common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]);
+	bus_irq_reg_mask_0 |= (0x1 << (rsrc_data->comp_grp_id +
+		rsrc_data->common_data->comp_done_shift));
+	cam_io_w_mb(bus_irq_reg_mask_0, common_data->mem_base +
+		common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]);
+
+	CAM_DBG(CAM_ISP, "TFE:%d start COMP_GRP:%d bus_irq_mask_0 0x%x",
+		common_data->core_index, rsrc_data->comp_grp_id,
+		bus_irq_reg_mask_0);
+
+end:
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return rc;
+}
+
+static int cam_tfe_bus_stop_comp_grp(
+	struct cam_isp_resource_node          *comp_grp)
+{
+	struct cam_tfe_bus_comp_grp_data *rsrc_data = NULL;
+	struct cam_tfe_bus_common_data *common_data = NULL;
+	uint32_t      bus_irq_reg_mask_0 = 0;
+
+	if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_RESERVED)
+		return 0;
+
+	rsrc_data = (struct cam_tfe_bus_comp_grp_data *)comp_grp->res_priv;
+	common_data = rsrc_data->common_data;
+
+	/* Update the composite done mask in bus irq mask*/
+	bus_irq_reg_mask_0  = cam_io_r(common_data->mem_base +
+		common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]);
+	bus_irq_reg_mask_0 &= ~(0x1 << (rsrc_data->comp_grp_id +
+		rsrc_data->common_data->comp_done_shift));
+	cam_io_w_mb(bus_irq_reg_mask_0, common_data->mem_base +
+		common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]);
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return 0;
+}
+
+static int cam_tfe_bus_init_comp_grp(uint32_t index,
+	struct cam_hw_soc_info          *soc_info,
+	struct cam_tfe_bus_priv         *bus_priv,
+	struct cam_tfe_bus_hw_info      *hw_info,
+	struct cam_isp_resource_node    *comp_grp)
+{
+	struct cam_tfe_bus_comp_grp_data *rsrc_data = NULL;
+
+	rsrc_data = kzalloc(sizeof(struct cam_tfe_bus_comp_grp_data),
+		GFP_KERNEL);
+	if (!rsrc_data)
+		return -ENOMEM;
+
+	comp_grp->res_priv = rsrc_data;
+
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&comp_grp->list);
+
+	comp_grp->res_id = index;
+	rsrc_data->comp_grp_id   = index;
+	rsrc_data->common_data     = &bus_priv->common_data;
+
+	list_add_tail(&comp_grp->list, &bus_priv->free_comp_grp);
+
+	comp_grp->hw_intf = bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_tfe_bus_deinit_comp_grp(
+	struct cam_isp_resource_node    *comp_grp)
+{
+	struct cam_tfe_bus_comp_grp_data *rsrc_data =
+		comp_grp->res_priv;
+
+	comp_grp->start = NULL;
+	comp_grp->stop = NULL;
+	comp_grp->top_half_handler = NULL;
+	comp_grp->bottom_half_handler = NULL;
+	comp_grp->hw_intf = NULL;
+
+	list_del_init(&comp_grp->list);
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+
+	comp_grp->res_priv = NULL;
+
+	if (!rsrc_data) {
+		CAM_ERR(CAM_ISP, "comp_grp_priv is NULL");
+		return -ENODEV;
+	}
+	kfree(rsrc_data);
+
+	return 0;
+}
+
+static int cam_tfe_bus_get_secure_mode(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	bool *mode = cmd_args;
+	struct cam_isp_resource_node *res =
+		(struct cam_isp_resource_node *) priv;
+	struct cam_tfe_bus_tfe_out_data *rsrc_data =
+		(struct cam_tfe_bus_tfe_out_data *)res->res_priv;
+
+	*mode = (rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ?
+		true : false;
+
+	return 0;
+}
+
+static int cam_tfe_bus_acquire_tfe_out(void *priv, void *acquire_args,
+	uint32_t args_size)
+{
+	struct cam_tfe_bus_priv                *bus_priv = priv;
+	struct cam_tfe_acquire_args            *acq_args = acquire_args;
+	struct cam_tfe_hw_tfe_out_acquire_args *out_acquire_args;
+	struct cam_isp_resource_node           *rsrc_node = NULL;
+	struct cam_tfe_bus_tfe_out_data        *rsrc_data = NULL;
+	enum cam_tfe_bus_tfe_out_id             tfe_out_res_id;
+	enum cam_tfe_bus_comp_grp_id            comp_grp_id;
+	int                                     rc = -ENODEV;
+	uint32_t                                secure_caps = 0, mode;
+	uint32_t  i, format, num_wm, client_done_mask = 0;
+
+	if (!bus_priv || !acquire_args) {
+		CAM_ERR(CAM_ISP, "Invalid Param");
+		return -EINVAL;
+	}
+
+	out_acquire_args = &acq_args->tfe_out;
+	format = out_acquire_args->out_port_info->format;
+
+	CAM_DBG(CAM_ISP, "resid 0x%x fmt:%d, sec mode:%d wm mode:%d",
+		out_acquire_args->out_port_info->res_id, format,
+		out_acquire_args->out_port_info->secure_mode,
+		out_acquire_args->out_port_info->wm_mode);
+	CAM_DBG(CAM_ISP, "width:%d, height:%d stride:%d",
+		out_acquire_args->out_port_info->width,
+		out_acquire_args->out_port_info->height,
+		out_acquire_args->out_port_info->stride);
+
+	tfe_out_res_id = cam_tfe_bus_get_out_res_id(
+		out_acquire_args->out_port_info->res_id);
+	if (tfe_out_res_id == CAM_TFE_BUS_TFE_OUT_MAX)
+		return -ENODEV;
+
+	num_wm = cam_tfe_bus_get_num_wm(tfe_out_res_id, format);
+	if (num_wm < 1)
+		return -EINVAL;
+
+	rsrc_node = &bus_priv->tfe_out[tfe_out_res_id];
+	if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_ERR(CAM_ISP, "Resource not available: Res_id %d state:%d",
+			tfe_out_res_id, rsrc_node->res_state);
+		return -EBUSY;
+	}
+
+	rsrc_data = rsrc_node->res_priv;
+	rsrc_data->common_data->event_cb = acq_args->event_cb;
+	rsrc_data->event_cb = acq_args->event_cb;
+	rsrc_data->priv = acq_args->priv;
+
+	secure_caps = cam_tfe_bus_can_be_secure(rsrc_data->out_id);
+	mode = out_acquire_args->out_port_info->secure_mode;
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (!rsrc_data->common_data->num_sec_out) {
+			rsrc_data->secure_mode = mode;
+			rsrc_data->common_data->secure_mode = mode;
+		} else {
+			if (mode == rsrc_data->common_data->secure_mode) {
+				rsrc_data->secure_mode =
+					rsrc_data->common_data->secure_mode;
+			} else {
+				rc = -EINVAL;
+				CAM_ERR_RATE_LIMIT(CAM_ISP,
+					"Mismatch: Acquire mode[%d], drvr mode[%d]",
+					rsrc_data->common_data->secure_mode,
+					mode);
+				mutex_unlock(
+					&rsrc_data->common_data->bus_mutex);
+				return -EINVAL;
+			}
+		}
+		rsrc_data->common_data->num_sec_out++;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
+	bus_priv->tasklet_info = acq_args->tasklet;
+	rsrc_data->num_wm = num_wm;
+	rsrc_node->rdi_only_ctx = 0;
+	rsrc_node->res_id = out_acquire_args->out_port_info->res_id;
+	rsrc_node->cdm_ops = out_acquire_args->cdm_ops;
+	rsrc_data->cdm_util_ops = out_acquire_args->cdm_ops;
+
+	/* Acquire WM and retrieve COMP GRP ID */
+	for (i = 0; i < num_wm; i++) {
+		rc = cam_tfe_bus_acquire_wm(bus_priv,
+			out_acquire_args->out_port_info,
+			&rsrc_data->wm_res[i],
+			acq_args->tasklet,
+			tfe_out_res_id,
+			i,
+			&client_done_mask,
+			out_acquire_args->is_dual,
+			&comp_grp_id);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"TFE:%d WM acquire failed for Out %d rc=%d",
+				rsrc_data->common_data->core_index,
+				tfe_out_res_id, rc);
+			goto release_wm;
+		}
+	}
+
+	/* Acquire composite group using COMP GRP ID */
+	rc = cam_tfe_bus_acquire_comp_grp(bus_priv,
+		out_acquire_args->out_port_info,
+		acq_args->tasklet,
+		out_acquire_args->is_dual,
+		out_acquire_args->is_master,
+		&rsrc_data->comp_grp,
+		comp_grp_id,
+		rsrc_node,
+		rsrc_data->source_group);
+	if (rc) {
+		CAM_ERR(CAM_ISP,
+			"TFE%d Comp_Grp acquire fail for Out %d rc=%d",
+			rsrc_data->common_data->core_index,
+			tfe_out_res_id, rc);
+		return rc;
+	}
+
+	rsrc_data->is_dual = out_acquire_args->is_dual;
+	rsrc_data->is_master = out_acquire_args->is_master;
+
+	cam_tfe_bus_add_wm_to_comp_grp(rsrc_data->comp_grp,
+		client_done_mask);
+
+	rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	out_acquire_args->rsrc_node = rsrc_node;
+
+	return rc;
+
+release_wm:
+	for (i--; i >= 0; i--)
+		cam_tfe_bus_release_wm(bus_priv,
+			rsrc_data->wm_res[i]);
+
+	cam_tfe_bus_release_comp_grp(bus_priv, rsrc_data->comp_grp);
+
+	return rc;
+}
+
+static int cam_tfe_bus_release_tfe_out(void *priv, void *release_args,
+	uint32_t args_size)
+{
+	struct cam_tfe_bus_priv               *bus_priv = priv;
+	struct cam_isp_resource_node          *tfe_out = NULL;
+	struct cam_tfe_bus_tfe_out_data       *rsrc_data = NULL;
+	uint32_t                               secure_caps = 0;
+	uint32_t i;
+
+	if (!bus_priv || !release_args) {
+		CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK",
+			bus_priv, release_args);
+		return -EINVAL;
+	}
+
+	tfe_out = (struct cam_isp_resource_node *)release_args;
+	rsrc_data = (struct cam_tfe_bus_tfe_out_data *)tfe_out->res_priv;
+
+	if (tfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Invalid resource state:%d res id:%d",
+			tfe_out->res_state, tfe_out->res_id);
+	}
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		cam_tfe_bus_release_wm(bus_priv, rsrc_data->wm_res[i]);
+
+	rsrc_data->num_wm = 0;
+
+	if (rsrc_data->comp_grp)
+		cam_tfe_bus_release_comp_grp(bus_priv, rsrc_data->comp_grp);
+
+	rsrc_data->comp_grp = NULL;
+
+	tfe_out->tasklet_info = NULL;
+	tfe_out->cdm_ops = NULL;
+	rsrc_data->cdm_util_ops = NULL;
+
+	secure_caps = cam_tfe_bus_can_be_secure(rsrc_data->out_id);
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (rsrc_data->secure_mode ==
+			rsrc_data->common_data->secure_mode) {
+			rsrc_data->common_data->num_sec_out--;
+			rsrc_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+		} else {
+			/*
+			 * The validity of the mode is properly
+			 * checked while acquiring the output port.
+			 * not expected to reach here, unless there is
+			 * some corruption.
+			 */
+			CAM_ERR(CAM_ISP, "driver[%d],resource[%d] mismatch",
+				rsrc_data->common_data->secure_mode,
+				rsrc_data->secure_mode);
+		}
+
+		if (!rsrc_data->common_data->num_sec_out)
+			rsrc_data->common_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
+	if (tfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED)
+		tfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+static int cam_tfe_bus_start_tfe_out(void *hw_priv,
+	void *start_hw_args, uint32_t arg_size)
+{
+	struct cam_isp_resource_node     *tfe_out = hw_priv;
+	struct cam_tfe_bus_tfe_out_data  *rsrc_data = NULL;
+	struct cam_tfe_bus_common_data   *common_data = NULL;
+	uint32_t bus_irq_reg_mask_0 = 0;
+	uint32_t rup_group_id = 0;
+	int rc = 0, i;
+
+	if (!tfe_out) {
+		CAM_ERR(CAM_ISP, "Invalid input");
+		return -EINVAL;
+	}
+
+	rsrc_data = tfe_out->res_priv;
+	common_data = rsrc_data->common_data;
+	rup_group_id = rsrc_data->rup_group_id;
+
+	CAM_DBG(CAM_ISP, "TFE:%d Start resource index %d",
+		common_data->core_index, rsrc_data->out_id);
+
+	if (tfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid resource state:%d",
+			common_data->core_index, tfe_out->res_state);
+		return -EACCES;
+	}
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		rc = cam_tfe_bus_start_wm(rsrc_data->wm_res[i]);
+
+	rc = cam_tfe_bus_start_comp_grp(rsrc_data->comp_grp);
+
+	if (rsrc_data->is_dual && !rsrc_data->is_master &&
+		!tfe_out->rdi_only_ctx)
+		goto end;
+
+	if (common_data->rup_irq_enable[rup_group_id])
+		goto end;
+
+	/* Update the composite regupdate mask in bus irq mask*/
+	bus_irq_reg_mask_0 = cam_io_r(common_data->mem_base +
+		common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]);
+	bus_irq_reg_mask_0 |= (0x1 << rup_group_id);
+	cam_io_w_mb(bus_irq_reg_mask_0, common_data->mem_base +
+		common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]);
+	common_data->rup_irq_enable[rup_group_id] = true;
+
+end:
+	tfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+	return rc;
+}
+
+static int cam_tfe_bus_stop_tfe_out(void *hw_priv,
+	void *stop_hw_args, uint32_t arg_size)
+{
+	struct cam_isp_resource_node      *tfe_out = hw_priv;
+	struct cam_tfe_bus_tfe_out_data   *rsrc_data = NULL;
+	struct cam_tfe_bus_common_data    *common_data = NULL;
+	uint32_t bus_irq_reg_mask_0 = 0,  rup_group = 0;
+	int rc = 0, i;
+
+	if (!tfe_out) {
+		CAM_ERR(CAM_ISP, "Invalid input");
+		return -EINVAL;
+	}
+
+	rsrc_data = tfe_out->res_priv;
+	common_data = rsrc_data->common_data;
+	rup_group = rsrc_data->rup_group_id;
+
+	if (tfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
+		tfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_DBG(CAM_ISP, "tfe_out res_state is %d", tfe_out->res_state);
+		return rc;
+	}
+
+	rc = cam_tfe_bus_stop_comp_grp(rsrc_data->comp_grp);
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		rc = cam_tfe_bus_stop_wm(rsrc_data->wm_res[i]);
+
+
+	if (!common_data->rup_irq_enable[rup_group])
+		goto end;
+
+	/* disable composite regupdate mask in bus irq mask register*/
+	bus_irq_reg_mask_0 = cam_io_r(common_data->mem_base +
+		common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]);
+	bus_irq_reg_mask_0 &= ~(0x1 << rup_group);
+	cam_io_w_mb(bus_irq_reg_mask_0, common_data->mem_base +
+		common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]);
+	common_data->rup_irq_enable[rup_group] = false;
+
+end:
+	tfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_tfe_bus_init_tfe_out_resource(uint32_t  index,
+	struct cam_tfe_bus_priv                  *bus_priv,
+	struct cam_tfe_bus_hw_info               *hw_info)
+{
+	struct cam_isp_resource_node         *tfe_out = NULL;
+	struct cam_tfe_bus_tfe_out_data *rsrc_data = NULL;
+	int rc = 0;
+	int32_t tfe_out_id = hw_info->tfe_out_hw_info[index].tfe_out_id;
+
+	if (tfe_out_id < 0 ||
+		tfe_out_id >= CAM_TFE_BUS_TFE_OUT_MAX) {
+		CAM_ERR(CAM_ISP, "Init TFE Out failed, Invalid type=%d",
+			tfe_out_id);
+		return -EINVAL;
+	}
+
+	tfe_out = &bus_priv->tfe_out[tfe_out_id];
+	if (tfe_out->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE ||
+		tfe_out->res_priv) {
+		CAM_ERR(CAM_ISP, "tfe_out_id %d has already been initialized",
+			tfe_out_id);
+		return -EFAULT;
+	}
+
+	rsrc_data = kzalloc(sizeof(struct cam_tfe_bus_tfe_out_data),
+		GFP_KERNEL);
+	if (!rsrc_data) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	tfe_out->res_priv = rsrc_data;
+
+	tfe_out->res_type = CAM_ISP_RESOURCE_TFE_OUT;
+	tfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&tfe_out->list);
+
+	rsrc_data->composite_group =
+		hw_info->tfe_out_hw_info[index].composite_group;
+	rsrc_data->rup_group_id    =
+		hw_info->tfe_out_hw_info[index].rup_group_id;
+	rsrc_data->out_id          =
+		hw_info->tfe_out_hw_info[index].tfe_out_id;
+	rsrc_data->common_data     = &bus_priv->common_data;
+	rsrc_data->max_width       =
+		hw_info->tfe_out_hw_info[index].max_width;
+	rsrc_data->max_height      =
+		hw_info->tfe_out_hw_info[index].max_height;
+	rsrc_data->secure_mode  = CAM_SECURE_MODE_NON_SECURE;
+
+	tfe_out->hw_intf = bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_tfe_bus_deinit_tfe_out_resource(
+	struct cam_isp_resource_node    *tfe_out)
+{
+	struct cam_tfe_bus_tfe_out_data *rsrc_data = tfe_out->res_priv;
+
+	if (tfe_out->res_state == CAM_ISP_RESOURCE_STATE_UNAVAILABLE) {
+		/*
+		 * This is not error. It can happen if the resource is
+		 * never supported in the HW.
+		 */
+		CAM_DBG(CAM_ISP, "HW%d Res %d already deinitialized");
+		return 0;
+	}
+
+	tfe_out->start = NULL;
+	tfe_out->stop = NULL;
+	tfe_out->top_half_handler = NULL;
+	tfe_out->bottom_half_handler = NULL;
+	tfe_out->hw_intf = NULL;
+
+	tfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+	INIT_LIST_HEAD(&tfe_out->list);
+	tfe_out->res_priv = NULL;
+
+	if (!rsrc_data)
+		return -ENOMEM;
+	kfree(rsrc_data);
+
+	return 0;
+}
+
+static const char *cam_tfe_bus_rup_type(
+	uint32_t group_id)
+{
+	switch (group_id) {
+	case CAM_ISP_HW_TFE_IN_CAMIF:
+		return "CAMIF RUP";
+	case CAM_ISP_HW_TFE_IN_RDI0:
+		return "RDI0 RUP";
+	case CAM_ISP_HW_TFE_IN_RDI1:
+		return "RDI1 RUP";
+	case CAM_ISP_HW_TFE_IN_RDI2:
+		return "RDI2 RUP";
+	default:
+		return "invalid rup group";
+	}
+}
+static int cam_tfe_bus_rup_bottom_half(
+	struct cam_tfe_bus_priv            *bus_priv,
+	struct cam_tfe_irq_evt_payload *evt_payload)
+{
+	struct cam_tfe_bus_common_data     *common_data;
+	struct cam_tfe_bus_tfe_out_data    *out_rsrc_data;
+	struct cam_isp_hw_event_info        evt_info;
+	uint32_t i, j;
+
+	common_data = &bus_priv->common_data;
+	evt_info.hw_idx = bus_priv->common_data.core_index;
+	evt_info.res_type = CAM_ISP_RESOURCE_TFE_OUT;
+
+	for (i = 0; i < CAM_TFE_BUS_RUP_GRP_MAX; i++) {
+		if (!(evt_payload->bus_irq_val[0] &
+			bus_priv->comp_rup_done_mask))
+			break;
+
+		if (evt_payload->bus_irq_val[0] & BIT(i)) {
+			for (j = 0; j < CAM_TFE_BUS_TFE_OUT_MAX; j++) {
+				out_rsrc_data =
+					(struct cam_tfe_bus_tfe_out_data *)
+					bus_priv->tfe_out[j].res_priv;
+				if ((out_rsrc_data->rup_group_id == i) &&
+					(bus_priv->tfe_out[j].res_state ==
+					CAM_ISP_RESOURCE_STATE_STREAMING))
+					break;
+			}
+
+			if (j == CAM_TFE_BUS_TFE_OUT_MAX) {
+				CAM_ERR(CAM_ISP,
+					"TFE:%d out rsc active status[0]:0x%x",
+					bus_priv->common_data.core_index,
+					evt_payload->bus_irq_val[0]);
+				continue;
+			}
+
+			CAM_DBG(CAM_ISP, "TFE:%d Received %s",
+				bus_priv->common_data.core_index,
+				cam_tfe_bus_rup_type(i));
+			evt_info.res_id = i;
+			if (out_rsrc_data->event_cb) {
+				out_rsrc_data->event_cb(
+					out_rsrc_data->priv,
+					CAM_ISP_HW_EVENT_REG_UPDATE,
+					(void *)&evt_info);
+				/* reset the rup bit */
+				evt_payload->bus_irq_val[0] &= ~BIT(i);
+			} else
+				CAM_ERR(CAM_ISP,
+					"TFE:%d No event cb id:%lld evt id:%d",
+					bus_priv->common_data.core_index,
+					out_rsrc_data->out_id, evt_info.res_id);
+		}
+	}
+
+	return 0;
+}
+
+static int cam_tfe_bus_bufdone_bottom_half(
+	struct cam_tfe_bus_priv            *bus_priv,
+	struct cam_tfe_irq_evt_payload *evt_payload)
+{
+	struct cam_tfe_bus_common_data     *common_data;
+	struct cam_tfe_bus_tfe_out_data    *out_rsrc_data;
+	struct cam_isp_hw_event_info        evt_info;
+	struct cam_isp_resource_node       *out_rsrc = NULL;
+	struct cam_tfe_bus_comp_grp_data   *comp_rsrc_data;
+	uint32_t i, j;
+
+	common_data = &bus_priv->common_data;
+
+	for (i = 0; i < CAM_TFE_BUS_COMP_GRP_MAX; i++) {
+		if (!(evt_payload->bus_irq_val[0] &
+			bus_priv->comp_buf_done_mask))
+			break;
+
+		comp_rsrc_data = (struct cam_tfe_bus_comp_grp_data  *)
+			bus_priv->comp_grp[i].res_priv;
+
+		if (evt_payload->bus_irq_val[0] &
+			BIT(comp_rsrc_data->comp_grp_id +
+			bus_priv->common_data.comp_done_shift)) {
+			for (j = 0; j < comp_rsrc_data->acquire_dev_cnt; j++) {
+				out_rsrc = comp_rsrc_data->out_rsrc[j];
+				out_rsrc_data = out_rsrc->res_priv;
+				evt_info.res_type = out_rsrc->res_type;
+				evt_info.hw_idx = out_rsrc->hw_intf->hw_idx;
+				evt_info.res_id = out_rsrc->res_id;
+				out_rsrc_data->event_cb(out_rsrc_data->priv,
+					CAM_ISP_HW_EVENT_DONE,
+					(void *)&evt_info);
+			}
+
+			evt_payload->bus_irq_val[0] &=
+				~BIT(comp_rsrc_data->comp_grp_id +
+				bus_priv->common_data.comp_done_shift);
+		}
+	}
+
+	return 0;
+}
+
+static int cam_tfe_bus_bottom_half(void   *priv,
+	bool rup_process, struct cam_tfe_irq_evt_payload   *evt_payload)
+{
+	struct cam_tfe_bus_priv          *bus_priv;
+	uint32_t val;
+
+	if (!priv || !evt_payload) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid priv param");
+		return -EINVAL;
+	}
+	bus_priv = (struct cam_tfe_bus_priv   *) priv;
+
+	/* if bus errors are there, mask all bus errors */
+	if (evt_payload->bus_irq_val[0] & bus_priv->bus_irq_error_mask[0]) {
+		val = cam_io_r(bus_priv->common_data.mem_base +
+			bus_priv->common_data.common_reg->irq_mask[0]);
+		val &= ~bus_priv->bus_irq_error_mask[0];
+		cam_io_w(val, bus_priv->common_data.mem_base +
+			bus_priv->common_data.common_reg->irq_mask[0]);
+	}
+
+	if (rup_process) {
+		if (evt_payload->bus_irq_val[0] &
+			bus_priv->comp_rup_done_mask)
+			cam_tfe_bus_rup_bottom_half(bus_priv, evt_payload);
+	} else {
+		if (evt_payload->bus_irq_val[0] &
+			bus_priv->comp_buf_done_mask)
+			cam_tfe_bus_bufdone_bottom_half(bus_priv, evt_payload);
+	}
+
+	return 0;
+
+}
+
+static int cam_tfe_bus_update_wm(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_tfe_bus_priv              *bus_priv;
+	struct cam_isp_hw_get_cmd_update     *update_buf;
+	struct cam_buf_io_cfg                *io_cfg;
+	struct cam_tfe_bus_tfe_out_data      *tfe_out_data = NULL;
+	struct cam_tfe_bus_wm_resource_data  *wm_data = NULL;
+	uint32_t *reg_val_pair;
+	uint32_t i, j, size = 0;
+	uint32_t frame_inc = 0, val;
+
+	bus_priv = (struct cam_tfe_bus_priv  *) priv;
+	update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+	tfe_out_data = (struct cam_tfe_bus_tfe_out_data *)
+		update_buf->res->res_priv;
+
+	if (!tfe_out_data || !tfe_out_data->cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	if (update_buf->wm_update->num_buf != tfe_out_data->num_wm) {
+		CAM_ERR(CAM_ISP,
+			"Failed! Invalid number buffers:%d required:%d",
+			update_buf->wm_update->num_buf, tfe_out_data->num_wm);
+		return -EINVAL;
+	}
+
+	reg_val_pair = &tfe_out_data->common_data->io_buf_update[0];
+	io_cfg = update_buf->wm_update->io_cfg;
+
+	for (i = 0, j = 0; i < tfe_out_data->num_wm; i++) {
+		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+			CAM_ERR(CAM_ISP,
+				"reg_val_pair %d exceeds the array limit %zu",
+				j, MAX_REG_VAL_PAIR_SIZE);
+			return -ENOMEM;
+		}
+
+		wm_data = tfe_out_data->wm_res[i]->res_priv;
+		/* update width register */
+		val = cam_io_r_mb(wm_data->common_data->mem_base +
+			wm_data->hw_regs->image_cfg_0);
+		/* mask previously written width but preserve height */
+		val = val & 0xFFFF0000;
+		val |= wm_data->width;
+		CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->image_cfg_0, val);
+		CAM_DBG(CAM_ISP, "WM:%d image height and width 0x%x",
+			wm_data->index, reg_val_pair[j-1]);
+
+		val = io_cfg->planes[i].plane_stride;
+		CAM_DBG(CAM_ISP, "before stride %d", val);
+		val = ALIGNUP(val, 16);
+		if (val != io_cfg->planes[i].plane_stride &&
+			val != wm_data->stride)
+			CAM_WARN(CAM_ISP, "Warning stride %u expected %u",
+				io_cfg->planes[i].plane_stride, val);
+
+		val = wm_data->offset;
+		CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->image_cfg_1, val);
+		CAM_DBG(CAM_ISP, "WM:%d xinit 0x%x",
+			wm_data->index, reg_val_pair[j-1]);
+
+		if (wm_data->index < 7) {
+			CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->image_cfg_2,
+				io_cfg->planes[i].plane_stride);
+			wm_data->stride = val;
+			CAM_DBG(CAM_ISP, "WM %d image stride 0x%x",
+				wm_data->index, reg_val_pair[j-1]);
+		}
+
+		frame_inc = io_cfg->planes[i].plane_stride *
+			io_cfg->planes[i].slice_height;
+
+		CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->image_addr,
+			update_buf->wm_update->image_buf[i]);
+		CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
+			wm_data->index, reg_val_pair[j-1]);
+
+		CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->frame_incr, frame_inc);
+		CAM_DBG(CAM_ISP, "WM %d frame_inc %d",
+			wm_data->index, reg_val_pair[j-1]);
+
+		/* enable the WM */
+		CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->cfg,
+			wm_data->en_cfg);
+	}
+
+	size = tfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > update_buf->cmd.size) {
+		CAM_ERR(CAM_ISP,
+			"Failed! Buf size:%d insufficient, expected size:%d",
+			update_buf->cmd.size, size);
+		return -ENOMEM;
+	}
+
+	tfe_out_data->cdm_util_ops->cdm_write_regrandom(
+		update_buf->cmd.cmd_buf_addr, j/2, reg_val_pair);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	update_buf->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+static int cam_tfe_bus_update_hfr(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_tfe_bus_priv                  *bus_priv;
+	struct cam_isp_hw_get_cmd_update         *update_hfr;
+	struct cam_tfe_bus_tfe_out_data          *tfe_out_data = NULL;
+	struct cam_tfe_bus_wm_resource_data      *wm_data = NULL;
+	struct cam_isp_tfe_port_hfr_config       *hfr_cfg = NULL;
+	uint32_t *reg_val_pair;
+	uint32_t  i, j, size = 0;
+
+	bus_priv = (struct cam_tfe_bus_priv  *) priv;
+	update_hfr =  (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+	tfe_out_data = (struct cam_tfe_bus_tfe_out_data *)
+		update_hfr->res->res_priv;
+
+	if (!tfe_out_data || !tfe_out_data->cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	reg_val_pair = &tfe_out_data->common_data->io_buf_update[0];
+	hfr_cfg = (struct cam_isp_tfe_port_hfr_config *)update_hfr->data;
+
+	for (i = 0, j = 0; i < tfe_out_data->num_wm; i++) {
+		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+			CAM_ERR(CAM_ISP,
+				"reg_val_pair %d exceeds the array limit %zu",
+				j, MAX_REG_VAL_PAIR_SIZE);
+			return -ENOMEM;
+		}
+
+		wm_data = tfe_out_data->wm_res[i]->res_priv;
+		CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->framedrop_pattern,
+			hfr_cfg->framedrop_pattern);
+		wm_data->framedrop_pattern = hfr_cfg->framedrop_pattern;
+		CAM_DBG(CAM_ISP, "WM:%d framedrop pattern 0x%x",
+			wm_data->index, wm_data->framedrop_pattern);
+
+		CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->framedrop_period,
+			hfr_cfg->framedrop_period);
+		wm_data->framedrop_period = hfr_cfg->framedrop_period;
+		CAM_DBG(CAM_ISP, "WM:%d framedrop period 0x%x",
+			wm_data->index, wm_data->framedrop_period);
+
+		CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->irq_subsample_period,
+			hfr_cfg->subsample_period);
+		wm_data->irq_subsample_period = hfr_cfg->subsample_period;
+		CAM_DBG(CAM_ISP, "WM:%d irq subsample period 0x%x",
+			wm_data->index, wm_data->irq_subsample_period);
+
+		CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->irq_subsample_pattern,
+			hfr_cfg->subsample_pattern);
+		wm_data->irq_subsample_pattern = hfr_cfg->subsample_pattern;
+		CAM_DBG(CAM_ISP, "WM:%d irq subsample pattern 0x%x",
+			wm_data->index, wm_data->irq_subsample_pattern);
+	}
+
+	size = tfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > update_hfr->cmd.size) {
+		CAM_ERR(CAM_ISP,
+			"Failed! Buf size:%d insufficient, expected size:%d",
+			update_hfr->cmd.size, size);
+		return -ENOMEM;
+	}
+
+	tfe_out_data->cdm_util_ops->cdm_write_regrandom(
+		update_hfr->cmd.cmd_buf_addr, j/2, reg_val_pair);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	update_hfr->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+static int cam_tfe_bus_update_stripe_cfg(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_tfe_bus_priv                     *bus_priv;
+	struct cam_tfe_dual_update_args             *stripe_args;
+	struct cam_tfe_bus_tfe_out_data             *tfe_out_data = NULL;
+	struct cam_tfe_bus_wm_resource_data         *wm_data = NULL;
+	struct cam_isp_tfe_dual_stripe_config       *stripe_config;
+	uint32_t i;
+
+	bus_priv = (struct cam_tfe_bus_priv  *) priv;
+	stripe_args = (struct cam_tfe_dual_update_args *)cmd_args;
+
+	tfe_out_data = (struct cam_tfe_bus_tfe_out_data *)
+		stripe_args->res->res_priv;
+
+	if (!tfe_out_data) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	if (stripe_args->res->res_id < CAM_ISP_TFE_OUT_RES_BASE ||
+		stripe_args->res->res_id >= CAM_ISP_TFE_OUT_RES_MAX)
+		return 0;
+
+	stripe_config = (struct cam_isp_tfe_dual_stripe_config *)
+		stripe_args->stripe_config;
+
+	for (i = 0; i < tfe_out_data->num_wm; i++) {
+		stripe_config = &stripe_args->stripe_config[i];
+		wm_data = tfe_out_data->wm_res[i]->res_priv;
+		wm_data->width = stripe_config->width;
+		wm_data->offset = stripe_config->offset;
+		CAM_DBG(CAM_ISP, "id:%x WM:%d width:0x%x offset:%x",
+			stripe_args->res->res_id, wm_data->index,
+			wm_data->width, wm_data->offset);
+	}
+
+	return 0;
+}
+
+static int cam_tfe_bus_init_hw(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_tfe_bus_priv    *bus_priv = hw_priv;
+	uint32_t                   i, top_irq_reg_mask[3] = {0};
+	int rc = -EINVAL;
+
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return -EINVAL;
+	}
+
+	top_irq_reg_mask[0] = (1 << bus_priv->top_bus_wr_irq_shift);
+
+	rc  = cam_tfe_irq_config(bus_priv->common_data.tfe_core_data,
+		top_irq_reg_mask, CAM_TFE_TOP_IRQ_REG_NUM, true);
+	if (rc)
+		return rc;
+
+	/* configure the error irq */
+	for (i = 0; i < CAM_TFE_BUS_IRQ_REGISTERS_MAX; i++)
+		cam_io_w(bus_priv->bus_irq_error_mask[i],
+			bus_priv->common_data.mem_base +
+			bus_priv->common_data.common_reg->irq_mask[i]);
+
+	return 0;
+}
+
+static int cam_tfe_bus_deinit_hw(void *hw_priv,
+	void *deinit_hw_args, uint32_t arg_size)
+{
+	struct cam_tfe_bus_priv    *bus_priv = hw_priv;
+	uint32_t                    top_irq_reg_mask[3] = {0};
+	int                              rc = 0;
+
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "Error: Invalid args");
+		return -EINVAL;
+	}
+	top_irq_reg_mask[0] = (1 << bus_priv->top_bus_wr_irq_shift);
+	rc  = cam_tfe_irq_config(bus_priv->common_data.tfe_core_data,
+		top_irq_reg_mask, CAM_TFE_TOP_IRQ_REG_NUM, false);
+	if (rc)
+		return rc;
+
+	/* configure the error irq */
+	cam_io_w(0, bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->irq_mask[0]);
+
+	cam_io_w_mb(0, bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->irq_mask[1]);
+
+	return rc;
+}
+
+static int cam_tfe_bus_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	struct cam_tfe_bus_priv      *bus_priv;
+	int rc = -EINVAL;
+	uint32_t i, val;
+
+	if (!priv || !cmd_args) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+		rc = cam_tfe_bus_update_wm(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
+		rc = cam_tfe_bus_update_hfr(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_SECURE_MODE:
+		rc = cam_tfe_bus_get_secure_mode(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_STRIPE_UPDATE:
+		rc = cam_tfe_bus_update_stripe_cfg(priv,
+			cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ:
+		bus_priv = (struct cam_tfe_bus_priv  *) priv;
+		/* disable the bus error interrupts */
+		for (i = 0; i < CAM_TFE_BUS_IRQ_REGISTERS_MAX; i++) {
+			val = cam_io_r(bus_priv->common_data.mem_base +
+				bus_priv->common_data.common_reg->irq_mask[i]);
+			val &= ~bus_priv->bus_irq_error_mask[i];
+			cam_io_w(val, bus_priv->common_data.mem_base +
+				bus_priv->common_data.common_reg->irq_mask[i]);
+		}
+		break;
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d",
+			cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+int cam_tfe_bus_init(
+	struct cam_hw_soc_info               *soc_info,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *core_data,
+	struct cam_tfe_bus                  **tfe_bus)
+{
+	int i, rc = 0;
+	struct cam_tfe_bus_priv    *bus_priv = NULL;
+	struct cam_tfe_bus         *tfe_bus_local;
+	struct cam_tfe_bus_hw_info *hw_info = bus_hw_info;
+
+	if (!soc_info || !hw_intf || !bus_hw_info) {
+		CAM_ERR(CAM_ISP,
+			"Invalid params soc_info:%pK hw_intf:%pK hw_info%pK",
+			soc_info, hw_intf, bus_hw_info);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	tfe_bus_local = kzalloc(sizeof(struct cam_tfe_bus), GFP_KERNEL);
+	if (!tfe_bus_local) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for tfe_bus");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	bus_priv = kzalloc(sizeof(struct cam_tfe_bus_priv),
+		GFP_KERNEL);
+	if (!bus_priv) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for tfe_bus_priv");
+		rc = -ENOMEM;
+		goto free_bus_local;
+	}
+	tfe_bus_local->bus_priv = bus_priv;
+
+	bus_priv->num_client                   = hw_info->num_client;
+	bus_priv->num_out                      = hw_info->num_out;
+	bus_priv->top_bus_wr_irq_shift         = hw_info->top_bus_wr_irq_shift;
+	bus_priv->common_data.comp_done_shift  = hw_info->comp_done_shift;
+
+	bus_priv->common_data.num_sec_out      = 0;
+	bus_priv->common_data.secure_mode      = CAM_SECURE_MODE_NON_SECURE;
+	bus_priv->common_data.core_index       = soc_info->index;
+	bus_priv->common_data.mem_base         =
+		CAM_SOC_GET_REG_MAP_START(soc_info, TFE_CORE_BASE_IDX);
+	bus_priv->common_data.hw_intf          = hw_intf;
+	bus_priv->common_data.tfe_core_data    = core_data;
+	bus_priv->common_data.common_reg       = &hw_info->common_reg;
+	bus_priv->comp_buf_done_mask      = hw_info->comp_buf_done_mask;
+	bus_priv->comp_rup_done_mask      = hw_info->comp_rup_done_mask;
+
+	for (i = 0; i < CAM_TFE_BUS_IRQ_REGISTERS_MAX; i++)
+		bus_priv->bus_irq_error_mask[i] =
+			hw_info->bus_irq_error_mask[i];
+
+	if (strnstr(soc_info->compatible, "lite",
+		strlen(soc_info->compatible)) != NULL)
+		bus_priv->common_data.is_lite = true;
+	else
+		bus_priv->common_data.is_lite = false;
+
+	for (i = 0; i < CAM_TFE_BUS_RUP_GRP_MAX; i++)
+		bus_priv->common_data.rup_irq_enable[i] = false;
+
+	mutex_init(&bus_priv->common_data.bus_mutex);
+
+	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+	for (i = 0; i < bus_priv->num_client; i++) {
+		rc = cam_tfe_bus_init_wm_resource(i, bus_priv, bus_hw_info,
+			&bus_priv->bus_client[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "Init WM failed rc=%d", rc);
+			goto deinit_wm;
+		}
+	}
+
+	for (i = 0; i < CAM_TFE_BUS_COMP_GRP_MAX; i++) {
+		rc = cam_tfe_bus_init_comp_grp(i, soc_info,
+			bus_priv, bus_hw_info,
+			&bus_priv->comp_grp[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "Init Comp Grp failed rc=%d", rc);
+			goto deinit_comp_grp;
+		}
+	}
+
+	for (i = 0; i < bus_priv->num_out; i++) {
+		rc = cam_tfe_bus_init_tfe_out_resource(i, bus_priv,
+			bus_hw_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "Init TFE Out failed rc=%d", rc);
+			goto deinit_tfe_out;
+		}
+	}
+
+	spin_lock_init(&bus_priv->common_data.spin_lock);
+
+	tfe_bus_local->hw_ops.reserve      = cam_tfe_bus_acquire_tfe_out;
+	tfe_bus_local->hw_ops.release      = cam_tfe_bus_release_tfe_out;
+	tfe_bus_local->hw_ops.start        = cam_tfe_bus_start_tfe_out;
+	tfe_bus_local->hw_ops.stop         = cam_tfe_bus_stop_tfe_out;
+	tfe_bus_local->hw_ops.init         = cam_tfe_bus_init_hw;
+	tfe_bus_local->hw_ops.deinit       = cam_tfe_bus_deinit_hw;
+	tfe_bus_local->bottom_half_handler = cam_tfe_bus_bottom_half;
+	tfe_bus_local->hw_ops.process_cmd  = cam_tfe_bus_process_cmd;
+
+	*tfe_bus = tfe_bus_local;
+
+	return rc;
+
+deinit_tfe_out:
+	if (i < 0)
+		i = CAM_TFE_BUS_TFE_OUT_MAX;
+	for (--i; i >= 0; i--)
+		cam_tfe_bus_deinit_tfe_out_resource(&bus_priv->tfe_out[i]);
+
+deinit_comp_grp:
+	if (i < 0)
+		i = CAM_TFE_BUS_COMP_GRP_MAX;
+	for (--i; i >= 0; i--)
+		cam_tfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+
+deinit_wm:
+	if (i < 0)
+		i = bus_priv->num_client;
+	for (--i; i >= 0; i--)
+		cam_tfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
+
+	kfree(tfe_bus_local->bus_priv);
+
+free_bus_local:
+	kfree(tfe_bus_local);
+
+end:
+	return rc;
+}
+
+int cam_tfe_bus_deinit(
+	struct cam_tfe_bus                  **tfe_bus)
+{
+	int i, rc = 0;
+	struct cam_tfe_bus_priv         *bus_priv = NULL;
+	struct cam_tfe_bus              *tfe_bus_local;
+
+	if (!tfe_bus || !*tfe_bus) {
+		CAM_ERR(CAM_ISP, "Invalid input");
+		return -EINVAL;
+	}
+	tfe_bus_local = *tfe_bus;
+	bus_priv = tfe_bus_local->bus_priv;
+
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "bus_priv is NULL");
+		rc = -ENODEV;
+		goto free_bus_local;
+	}
+
+	for (i = 0; i < bus_priv->num_client; i++) {
+		rc = cam_tfe_bus_deinit_wm_resource(
+			&bus_priv->bus_client[i]);
+		if (rc < 0)
+			CAM_ERR(CAM_ISP,
+				"Deinit WM failed rc=%d", rc);
+	}
+
+	for (i = 0; i < CAM_TFE_BUS_COMP_GRP_MAX; i++) {
+		rc = cam_tfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+		if (rc < 0)
+			CAM_ERR(CAM_ISP,
+				"Deinit Comp Grp failed rc=%d", rc);
+	}
+
+	for (i = 0; i < CAM_TFE_BUS_TFE_OUT_MAX; i++) {
+		rc = cam_tfe_bus_deinit_tfe_out_resource(
+			&bus_priv->tfe_out[i]);
+		if (rc < 0)
+			CAM_ERR(CAM_ISP,
+				"Deinit TFE Out failed rc=%d", rc);
+	}
+
+	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+	mutex_destroy(&bus_priv->common_data.bus_mutex);
+	kfree(tfe_bus_local->bus_priv);
+
+free_bus_local:
+	kfree(tfe_bus_local);
+
+	*tfe_bus = NULL;
+
+	return rc;
+}

+ 240 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.h

@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef _CAM_TFE_BUS_H_
+#define _CAM_TFE_BUS_H_
+
+#include "cam_soc_util.h"
+#include "cam_isp_hw.h"
+#include "cam_tfe_hw_intf.h"
+
+#define CAM_TFE_BUS_MAX_CLIENTS         10
+#define CAM_TFE_BUS_MAX_SUB_GRPS         4
+#define CAM_TFE_BUS_MAX_PERF_CNT_REG     8
+#define CAM_TFE_BUS_MAX_IRQ_REGISTERS    2
+
+#define CAM_TFE_BUS_1_0             0x1000
+
+
+#define CAM_TFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val)    \
+	do {                                               \
+		buf_array[(index)++] = offset;             \
+		buf_array[(index)++] = val;                \
+	} while (0)
+
+#define ALIGNUP(value, alignment) \
+	((value + alignment - 1) / alignment * alignment)
+
+typedef int (*CAM_BUS_HANDLER_BOTTOM_HALF)(void      *bus_priv,
+	bool rup_process, struct cam_tfe_irq_evt_payload   *evt_payload);
+
+enum cam_tfe_bus_plane_type {
+	PLANE_Y,
+	PLANE_C,
+	PLANE_MAX,
+};
+
+
+enum cam_tfe_bus_tfe_core_id {
+	CAM_TFE_BUS_TFE_CORE_0,
+	CAM_TFE_BUS_TFE_CORE_1,
+	CAM_TFE_BUS_TFE_CORE_2,
+	CAM_TFE_BUS_TFE_CORE_MAX,
+};
+
+enum cam_tfe_bus_comp_grp_id {
+	CAM_TFE_BUS_COMP_GRP_0,
+	CAM_TFE_BUS_COMP_GRP_1,
+	CAM_TFE_BUS_COMP_GRP_2,
+	CAM_TFE_BUS_COMP_GRP_3,
+	CAM_TFE_BUS_COMP_GRP_4,
+	CAM_TFE_BUS_COMP_GRP_5,
+	CAM_TFE_BUS_COMP_GRP_6,
+	CAM_TFE_BUS_COMP_GRP_7,
+	CAM_TFE_BUS_COMP_GRP_MAX,
+};
+
+enum cam_tfe_bus_rup_grp_id {
+	CAM_TFE_BUS_RUP_GRP_0,
+	CAM_TFE_BUS_RUP_GRP_1,
+	CAM_TFE_BUS_RUP_GRP_2,
+	CAM_TFE_BUS_RUP_GRP_3,
+	CAM_TFE_BUS_RUP_GRP_MAX,
+};
+
+enum cam_tfe_bus_tfe_out_id {
+	CAM_TFE_BUS_TFE_OUT_RDI0,
+	CAM_TFE_BUS_TFE_OUT_RDI1,
+	CAM_TFE_BUS_TFE_OUT_RDI2,
+	CAM_TFE_BUS_TFE_OUT_FULL,
+	CAM_TFE_BUS_TFE_OUT_RAW_DUMP,
+	CAM_TFE_BUS_TFE_OUT_PDAF,
+	CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE,
+	CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST,
+	CAM_TFE_BUS_TFE_OUT_STATS_TL_BG,
+	CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG,
+	CAM_TFE_BUS_TFE_OUT_STATS_BF,
+	CAM_TFE_BUS_TFE_OUT_MAX,
+};
+
+/*
+ * struct cam_tfe_bus_reg_offset_common:
+ *
+ * @Brief:        Common registers across all BUS Clients
+ */
+struct cam_tfe_bus_reg_offset_common {
+	uint32_t hw_version;
+	uint32_t cgc_ovd;
+	uint32_t comp_cfg_0;
+	uint32_t comp_cfg_1;
+	uint32_t frameheader_cfg[4];
+	uint32_t pwr_iso_cfg;
+	uint32_t overflow_status_clear;
+	uint32_t ccif_violation_status;
+	uint32_t overflow_status;
+	uint32_t image_size_violation_status;
+	uint32_t perf_count_cfg[CAM_TFE_BUS_MAX_PERF_CNT_REG];
+	uint32_t perf_count_val[CAM_TFE_BUS_MAX_PERF_CNT_REG];
+	uint32_t perf_count_status;
+	uint32_t debug_status_top_cfg;
+	uint32_t debug_status_top;
+	uint32_t test_bus_ctrl;
+	uint32_t irq_mask[CAM_TFE_BUS_IRQ_REGISTERS_MAX];
+	uint32_t irq_clear[CAM_TFE_BUS_IRQ_REGISTERS_MAX];
+	uint32_t irq_status[CAM_TFE_BUS_IRQ_REGISTERS_MAX];
+	uint32_t irq_cmd;
+};
+
+/*
+ * struct cam_tfe_bus_reg_offset_bus_client:
+ *
+ * @Brief:        Register offsets for BUS Clients
+ */
+struct cam_tfe_bus_reg_offset_bus_client {
+	uint32_t cfg;
+	uint32_t image_addr;
+	uint32_t frame_incr;
+	uint32_t image_cfg_0;
+	uint32_t image_cfg_1;
+	uint32_t image_cfg_2;
+	uint32_t packer_cfg;
+	uint32_t bw_limit;
+	uint32_t frame_header_addr;
+	uint32_t frame_header_incr;
+	uint32_t frame_header_cfg;
+	uint32_t line_done_cfg;
+	uint32_t irq_subsample_period;
+	uint32_t irq_subsample_pattern;
+	uint32_t framedrop_period;
+	uint32_t framedrop_pattern;
+	uint32_t addr_status_0;
+	uint32_t addr_status_1;
+	uint32_t addr_status_2;
+	uint32_t addr_status_3;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+	uint32_t comp_group;
+};
+
+/*
+ * struct cam_tfe_bus_tfe_out_hw_info:
+ *
+ * @Brief:           HW capability of TFE Bus Client
+ * tfe_out_id        Tfe out port id
+ * max_width         Max width supported by the outport
+ * max_height        Max height supported by outport
+ * composite_group   Out port composite group id
+ * rup_group_id      Reg update group of outport id
+ */
+struct cam_tfe_bus_tfe_out_hw_info {
+	enum cam_tfe_bus_tfe_out_id         tfe_out_id;
+	uint32_t                            max_width;
+	uint32_t                            max_height;
+	uint32_t                            composite_group;
+	uint32_t                            rup_group_id;
+};
+
+/*
+ * struct cam_tfe_bus_hw_info:
+ *
+ * @Brief:                 HW register info for entire Bus
+ *
+ * @common_reg:            Common register details
+ * @num_client:            Total number of write clients
+ * @bus_client_reg:        Bus client register info
+ * @tfe_out_hw_info:       TFE output capability
+ * @comp_done_shift:       Mask shift for comp done mask
+ * @top_bus_wr_irq_shift:  Mask shift for top level BUS WR irq
+ * @comp_buf_done_mask:    Composite buf done bits mask
+ * @comp_rup_done_mask:    Reg update done mask
+ * @bus_irq_error_mask:    Bus irq error mask bits
+ */
+struct cam_tfe_bus_hw_info {
+	struct cam_tfe_bus_reg_offset_common common_reg;
+	uint32_t num_client;
+	struct cam_tfe_bus_reg_offset_bus_client
+		bus_client_reg[CAM_TFE_BUS_MAX_CLIENTS];
+	uint32_t num_out;
+	struct cam_tfe_bus_tfe_out_hw_info
+		tfe_out_hw_info[CAM_TFE_BUS_TFE_OUT_MAX];
+	uint32_t comp_done_shift;
+	uint32_t top_bus_wr_irq_shift;
+	uint32_t comp_buf_done_mask;
+	uint32_t comp_rup_done_mask;
+	uint32_t bus_irq_error_mask[CAM_TFE_BUS_IRQ_REGISTERS_MAX];
+};
+
+/*
+ * struct cam_tfe_bus:
+ *
+ * @Brief:                   Bus interface structure
+ *
+ * @bus_priv:                Private data of bus
+ * @hw_ops:                  Hardware interface functions
+ * @bottom_half_handler:     Bottom Half handler function
+ */
+struct cam_tfe_bus {
+	void                          *bus_priv;
+	struct cam_hw_ops              hw_ops;
+	CAM_BUS_HANDLER_BOTTOM_HALF    bottom_half_handler;
+};
+
+/*
+ * cam_tfe_bus_init()
+ *
+ * @Brief:                   Initialize Bus layer
+ *
+ * @soc_info:                Soc Information for the associated HW
+ * @hw_intf:                 HW Interface of HW to which this resource belongs
+ * @bus_hw_info:             BUS HW info that contains details of BUS registers
+ * @core_data:               Core data pointer used for top irq config
+ * @tfe_bus:                 Pointer to tfe_bus structure which will be filled
+ *                           and returned on successful initialize
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_bus_init(
+	struct cam_hw_soc_info               *soc_info,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *core_data,
+	struct cam_tfe_bus                  **tfe_bus);
+
+/*
+ * cam_tfe_bus_deinit()
+ *
+ * @Brief:                   Deinitialize Bus layer
+ *
+ * @tfe_bus:                 Pointer to tfe_bus structure to deinitialize
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_bus_deinit(struct cam_tfe_bus     **tfe_bus);
+
+#endif /* _CAM_TFE_BUS_H_ */

+ 2529 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.c

@@ -0,0 +1,2529 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/ratelimit.h>
+#include <media/cam_tfe.h>
+#include "cam_cdm_util.h"
+#include "cam_tasklet_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_tfe_soc.h"
+#include "cam_tfe_core.h"
+#include "cam_tfe_bus.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+
+static const char drv_name[] = "tfe";
+
+#define CAM_TFE_HW_RESET_HW_AND_REG_VAL       0x1
+#define CAM_TFE_HW_RESET_HW_VAL               0x10000
+#define CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
+#define CAM_TFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX  2
+#define CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
+
+struct cam_tfe_top_common_data {
+	struct cam_hw_soc_info                     *soc_info;
+	struct cam_hw_intf                         *hw_intf;
+	struct cam_tfe_top_reg_offset_common       *common_reg;
+	struct cam_tfe_reg_dump_data               *reg_dump_data;
+};
+
+struct cam_tfe_top_priv {
+	struct cam_tfe_top_common_data      common_data;
+	struct cam_isp_resource_node        in_rsrc[CAM_TFE_TOP_IN_PORT_MAX];
+	unsigned long                       hw_clk_rate;
+	struct cam_axi_vote                 applied_axi_vote;
+	struct cam_axi_vote             req_axi_vote[CAM_TFE_TOP_IN_PORT_MAX];
+	unsigned long                   req_clk_rate[CAM_TFE_TOP_IN_PORT_MAX];
+	struct cam_axi_vote             last_vote[CAM_TFE_TOP_IN_PORT_MAX *
+					CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES];
+	uint32_t                        last_counter;
+	uint64_t                        total_bw_applied;
+	enum cam_tfe_bw_control_action
+		axi_vote_control[CAM_TFE_TOP_IN_PORT_MAX];
+	uint32_t                          irq_prepared_mask[3];
+	void                            *tasklet_info;
+};
+
+struct cam_tfe_camif_data {
+	void __iomem                                *mem_base;
+	struct cam_hw_intf                          *hw_intf;
+	struct cam_tfe_top_reg_offset_common        *common_reg;
+	struct cam_tfe_camif_reg                    *camif_reg;
+	struct cam_tfe_camif_reg_data               *reg_data;
+	struct cam_hw_soc_info                      *soc_info;
+
+
+	cam_hw_mgr_event_cb_func           event_cb;
+	void                              *priv;
+	enum cam_isp_hw_sync_mode          sync_mode;
+	uint32_t                           dsp_mode;
+	uint32_t                           pix_pattern;
+	uint32_t                           first_pixel;
+	uint32_t                           first_line;
+	uint32_t                           last_pixel;
+	uint32_t                           last_line;
+	bool                               enable_sof_irq_debug;
+	uint32_t                           irq_debug_cnt;
+	uint32_t                           camif_debug;
+	uint32_t                           camif_pd_enable;
+	uint32_t                           dual_tfe_sync_sel;
+};
+
+struct cam_tfe_rdi_data {
+	void __iomem                                *mem_base;
+	struct cam_hw_intf                          *hw_intf;
+	struct cam_tfe_top_reg_offset_common        *common_reg;
+	struct cam_tfe_rdi_reg                      *rdi_reg;
+	struct cam_tfe_rdi_reg_data                 *reg_data;
+	cam_hw_mgr_event_cb_func                     event_cb;
+	void                                        *priv;
+	enum cam_isp_hw_sync_mode                    sync_mode;
+	uint32_t                                     pix_pattern;
+};
+
+static int cam_tfe_validate_pix_pattern(uint32_t pattern)
+{
+	int rc;
+
+	switch (pattern) {
+	case CAM_ISP_TFE_PATTERN_BAYER_RGRGRG:
+	case CAM_ISP_TFE_PATTERN_BAYER_GRGRGR:
+	case CAM_ISP_TFE_PATTERN_BAYER_BGBGBG:
+	case CAM_ISP_TFE_PATTERN_BAYER_GBGBGB:
+	case CAM_ISP_TFE_PATTERN_YUV_YCBYCR:
+	case CAM_ISP_TFE_PATTERN_YUV_YCRYCB:
+	case CAM_ISP_TFE_PATTERN_YUV_CBYCRY:
+	case CAM_ISP_TFE_PATTERN_YUV_CRYCBY:
+		rc = 0;
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Error Invalid pix pattern:%d", pattern);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int cam_tfe_get_evt_payload(struct cam_tfe_hw_core_info *core_info,
+	struct cam_tfe_irq_evt_payload    **evt_payload)
+{
+	spin_lock(&core_info->spin_lock);
+	if (list_empty(&core_info->free_payload_list)) {
+		*evt_payload = NULL;
+		spin_unlock(&core_info->spin_lock);
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload, core id 0x%x",
+			core_info->core_index);
+		return -ENODEV;
+	}
+
+	*evt_payload = list_first_entry(&core_info->free_payload_list,
+		struct cam_tfe_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	spin_unlock(&core_info->spin_lock);
+
+	return 0;
+}
+
+int cam_tfe_put_evt_payload(void             *core_info,
+	struct cam_tfe_irq_evt_payload  **evt_payload)
+{
+	struct cam_tfe_hw_core_info        *tfe_core_info = core_info;
+	unsigned long                       flags;
+
+	if (!core_info) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&tfe_core_info->spin_lock, flags);
+	(*evt_payload)->error_type = 0;
+	list_add_tail(&(*evt_payload)->list, &tfe_core_info->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&tfe_core_info->spin_lock, flags);
+
+	return 0;
+}
+
+int cam_tfe_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+		uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+void cam_tfe_get_timestamp(struct cam_isp_timestamp *time_stamp)
+{
+	struct timespec ts;
+
+	ts = ktime_to_timespec(ktime_get_boottime());
+	time_stamp->mono_time.tv_sec    = ts.tv_sec;
+	time_stamp->mono_time.tv_usec   = ts.tv_nsec/1000;
+}
+
+int cam_tfe_irq_config(void     *tfe_core_data,
+	uint32_t  *irq_mask, uint32_t num_reg, bool enable)
+{
+	struct cam_tfe_hw_core_info    *core_info;
+	struct cam_tfe_top_priv        *top_priv;
+	struct cam_hw_soc_info         *soc_info;
+	void __iomem                   *mem_base;
+	bool                            need_lock;
+	unsigned long                   flags = 0;
+	uint32_t i, val;
+
+	if (!tfe_core_data) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Invalid core data");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_tfe_hw_core_info    *)tfe_core_data;
+	top_priv = (struct cam_tfe_top_priv        *)core_info->top_priv;
+	soc_info = (struct cam_hw_soc_info  *)top_priv->common_data.soc_info;
+	mem_base = soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base;
+
+	need_lock = !in_irq();
+	if (need_lock)
+		spin_lock_irqsave(&core_info->spin_lock, flags);
+
+	for (i = 0; i < num_reg; i++) {
+		val = cam_io_r_mb(mem_base  +
+			core_info->tfe_hw_info->top_irq_mask[i]);
+		if (enable)
+			val |= irq_mask[i];
+		else
+			val &= ~irq_mask[i];
+		cam_io_w_mb(val, mem_base +
+			core_info->tfe_hw_info->top_irq_mask[i]);
+	}
+	if (need_lock)
+		spin_unlock_irqrestore(&core_info->spin_lock, flags);
+
+	return 0;
+}
+
+static void cam_tfe_log_error_irq_status(
+	struct cam_tfe_hw_core_info          *core_info,
+	struct cam_tfe_top_priv              *top_priv,
+	struct cam_tfe_irq_evt_payload       *evt_payload)
+{
+	struct cam_tfe_hw_info               *hw_info;
+	void __iomem                         *mem_base;
+	struct cam_hw_soc_info               *soc_info;
+	struct cam_tfe_soc_private           *soc_private;
+	struct cam_tfe_camif_data            *camif_data;
+	struct cam_tfe_rdi_data              *rdi_data;
+	uint32_t  i, val_0, val_1, val_2, val_3;
+
+	hw_info = core_info->tfe_hw_info;
+	mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base;
+	soc_info = top_priv->common_data.soc_info;
+	soc_private = top_priv->common_data.soc_info->soc_private;
+
+	val_0 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->debug_0);
+	val_1 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->debug_1);
+	val_2 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->debug_2);
+	val_3 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->debug_3);
+
+	CAM_INFO(CAM_ISP, "TOP IRQ[0]:0x%x IRQ[1]:0x%x IRQ[2]:0x%x",
+		evt_payload->irq_reg_val[0], evt_payload->irq_reg_val[1],
+		evt_payload->irq_reg_val[2]);
+
+	CAM_INFO(CAM_ISP, "BUS IRQ[0]:0x%x BUS IRQ[1]:0x%x",
+		evt_payload->bus_irq_val[0], evt_payload->bus_irq_val[1]);
+
+	CAM_INFO(CAM_ISP, "ccif violation:0x%x image size:0x%x overflow:0x%x",
+		evt_payload->ccif_violation_status,
+		evt_payload->image_size_violation_status,
+		evt_payload->overflow_status);
+
+	cam_cpas_reg_read(soc_private->cpas_handle,
+		CAM_CPAS_REG_CAMNOC, 0x20, true, &val_0);
+	CAM_INFO(CAM_ISP, "tfe_niu_MaxWr_Low offset 0x20 val 0x%x",
+		val_0);
+
+	CAM_INFO(CAM_ISP, "Top debug [0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x",
+		val_0, val_1, val_2, val_3);
+
+	val_0 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->perf_pixel_count);
+
+	val_1 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->perf_line_count);
+
+	val_2 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->perf_stall_count);
+
+	val_3 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->perf_always_count);
+
+	CAM_INFO(CAM_ISP,
+		"Top perf cnt pix:0x%x line:0x%x stall:0x%x always:0x%x",
+		val_0, val_1, val_2, val_3);
+
+	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		if ((top_priv->in_rsrc[i].res_state !=
+			CAM_ISP_RESOURCE_STATE_STREAMING))
+			continue;
+
+		if (top_priv->in_rsrc[i].res_id == CAM_ISP_HW_TFE_IN_CAMIF) {
+			camif_data = (struct cam_tfe_camif_data  *)
+				top_priv->in_rsrc[i].res_priv;
+			val_0 = cam_io_r(mem_base  +
+				camif_data->camif_reg->debug_0);
+			val_1 = cam_io_r(mem_base  +
+				camif_data->camif_reg->debug_1);
+			CAM_INFO(CAM_ISP,
+				"camif debug1:0x%x Height:0x%x, width:0x%x",
+				val_1,
+				((val_0 >> 16) & 0x1FFF),
+				(val_0 & 0x1FFF));
+		} else if ((top_priv->in_rsrc[i].res_id >=
+			CAM_ISP_HW_TFE_IN_RDI0) ||
+			(top_priv->in_rsrc[i].res_id <=
+			CAM_ISP_HW_TFE_IN_RDI2)) {
+			rdi_data = (struct cam_tfe_rdi_data  *)
+				top_priv->in_rsrc[i].res_priv;
+			val_0 = cam_io_r(mem_base  +
+				rdi_data->rdi_reg->rdi_debug_0);
+			val_1 = cam_io_r(mem_base  +
+				rdi_data->rdi_reg->rdi_debug_1);
+			CAM_INFO(CAM_ISP,
+				"RDI res id:%d debug1:0x%x Height:0x%x, width:0x%x",
+				top_priv->in_rsrc[i].res_id,
+				val_1, ((val_0 >> 16) & 0x1FFF),
+				(val_0 & 0x1FFF));
+		}
+	}
+	val_0 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->perf_stall_count);
+
+	/* Check the overflow errors */
+	if (evt_payload->irq_reg_val[0] & hw_info->error_irq_mask[0]) {
+		if (evt_payload->irq_reg_val[0] & BIT(8))
+			CAM_INFO(CAM_ISP, "PP_FRAME_DROP");
+
+		if (evt_payload->irq_reg_val[0] & BIT(9))
+			CAM_INFO(CAM_ISP, "RDI0_FRAME_DROP");
+
+		if (evt_payload->irq_reg_val[0] & BIT(10))
+			CAM_INFO(CAM_ISP, "RDI1_FRAME_DROP");
+
+		if (evt_payload->irq_reg_val[0] & BIT(11))
+			CAM_INFO(CAM_ISP, "RDI2_FRAME_DROP");
+
+		if (evt_payload->irq_reg_val[0] & BIT(16))
+			CAM_INFO(CAM_ISP, "PP_OVERFLOW");
+
+		if (evt_payload->irq_reg_val[0] & BIT(17))
+			CAM_INFO(CAM_ISP, "RDI0_OVERFLOW");
+
+		if (evt_payload->irq_reg_val[0] & BIT(18))
+			CAM_INFO(CAM_ISP, "RDI1_OVERFLOW");
+
+		if (evt_payload->irq_reg_val[0] & BIT(19))
+			CAM_INFO(CAM_ISP, "RDI2_OVERFLOW");
+	}
+
+	/* Check the violation errors */
+	if (evt_payload->irq_reg_val[2] & hw_info->error_irq_mask[2]) {
+		if (evt_payload->irq_reg_val[2] & BIT(0))
+			CAM_INFO(CAM_ISP, "PP_CAMIF_VIOLATION");
+
+		if (evt_payload->irq_reg_val[2] & BIT(1))
+			CAM_INFO(CAM_ISP, "PP_VIOLATION");
+
+		if (evt_payload->irq_reg_val[2] & BIT(2))
+			CAM_INFO(CAM_ISP, "RDI0_CAMIF_VIOLATION");
+
+		if (evt_payload->irq_reg_val[2] & BIT(3))
+			CAM_INFO(CAM_ISP, "RDI1_CAMIF_VIOLATION");
+
+		if (evt_payload->irq_reg_val[2] & BIT(4))
+			CAM_INFO(CAM_ISP, "RDI2_CAMIF_VIOLATION");
+
+		if (evt_payload->irq_reg_val[2] & BIT(5))
+			CAM_INFO(CAM_ISP, "DIAG_VIOLATION");
+
+		val_0 = cam_io_r(mem_base  +
+		top_priv->common_data.common_reg->violation_status);
+		CAM_INFO(CAM_ISP, "TOP Violation status:0x%x", val_0);
+	}
+
+	/* Check the bus errors */
+	if (evt_payload->bus_irq_val[0] & BIT(29))
+		CAM_INFO(CAM_ISP, "CONS_VIOLATION");
+
+	if (evt_payload->bus_irq_val[0] & BIT(30))
+		CAM_INFO(CAM_ISP, "VIOLATION val 0x%x",
+		evt_payload->ccif_violation_status);
+
+	if (evt_payload->bus_irq_val[0] & BIT(31))
+		CAM_INFO(CAM_ISP, "IMAGE_SIZE_VIOLATION val :0x%x",
+		evt_payload->image_size_violation_status);
+
+	/* clear the bus irq overflow status*/
+	if (evt_payload->overflow_status)
+		cam_io_w_mb(1, mem_base  +
+		core_info->tfe_hw_info->bus_overflow_clear_cmd);
+
+}
+
+static int cam_tfe_error_irq_bottom_half(
+	struct cam_tfe_hw_core_info          *core_info,
+	struct cam_tfe_top_priv              *top_priv,
+	struct cam_tfe_irq_evt_payload       *evt_payload,
+	cam_hw_mgr_event_cb_func              event_cb,
+	void                                 *event_cb_priv)
+{
+	struct cam_isp_hw_event_info         evt_info;
+	struct cam_tfe_hw_info              *hw_info;
+
+	hw_info = core_info->tfe_hw_info;
+	evt_info.hw_idx = core_info->core_index;
+	evt_info.res_type = CAM_ISP_RESOURCE_TFE_IN;
+
+	if (evt_payload->irq_reg_val[0] & hw_info->error_irq_mask[0]) {
+		CAM_ERR(CAM_ISP, "TFE:%d Overflow error irq_status[0]:%x",
+			core_info->core_index,
+			evt_payload->irq_reg_val[0]);
+
+		evt_info.err_type = CAM_TFE_IRQ_STATUS_OVERFLOW;
+		cam_tfe_log_error_irq_status(core_info, top_priv, evt_payload);
+		if (event_cb)
+			event_cb(event_cb_priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+		else
+			CAM_ERR(CAM_ISP, "TFE:%d invalid eventcb:",
+				core_info->core_index);
+	}
+
+	if (evt_payload->irq_reg_val[2] & hw_info->error_irq_mask[2]) {
+		CAM_ERR(CAM_ISP, "TFE:%d Violation error irq_status[2]:%x",
+			core_info->core_index, evt_payload->irq_reg_val[2]);
+
+		evt_info.err_type = CAM_TFE_IRQ_STATUS_VIOLATION;
+		cam_tfe_log_error_irq_status(core_info, top_priv, evt_payload);
+
+		if (event_cb)
+			event_cb(event_cb_priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+		else
+			CAM_ERR(CAM_ISP, "TFE:%d invalid eventcb:",
+				core_info->core_index);
+	}
+
+	return 0;
+}
+
+static int cam_tfe_rdi_irq_bottom_half(
+	struct cam_isp_resource_node         *rdi_node,
+	bool                                  epoch_process,
+	struct cam_tfe_irq_evt_payload       *evt_payload)
+{
+	struct cam_tfe_rdi_data             *rdi_priv;
+	struct cam_isp_hw_event_info         evt_info;
+	struct cam_hw_info                  *hw_info;
+
+	rdi_priv = (struct cam_tfe_rdi_data    *)rdi_node->res_priv;
+	hw_info = rdi_node->hw_intf->hw_priv;
+
+	evt_info.hw_idx   = rdi_node->hw_intf->hw_idx;
+	evt_info.res_id   = rdi_node->res_id;
+	evt_info.res_type = rdi_node->res_type;
+
+	if ((!epoch_process) && (evt_payload->irq_reg_val[1] &
+		rdi_priv->reg_data->eof_irq_mask)) {
+		CAM_DBG(CAM_ISP, "Received EOF");
+		if (rdi_priv->event_cb)
+			rdi_priv->event_cb(rdi_priv->priv,
+				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
+	}
+
+	if ((!epoch_process) && (evt_payload->irq_reg_val[1] &
+		rdi_priv->reg_data->sof_irq_mask)) {
+		CAM_DBG(CAM_ISP, "Received SOF");
+		if (rdi_priv->event_cb)
+			rdi_priv->event_cb(rdi_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+	}
+
+	if (epoch_process && (evt_payload->irq_reg_val[1] &
+		rdi_priv->reg_data->epoch0_irq_mask)) {
+		CAM_DBG(CAM_ISP, "Received EPOCH0");
+
+		if (rdi_priv->event_cb)
+			rdi_priv->event_cb(rdi_priv->priv,
+				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
+	}
+
+	return 0;
+}
+
+static int cam_tfe_camif_irq_bottom_half(
+	struct cam_isp_resource_node         *camif_node,
+	bool                                  epoch_process,
+	struct cam_tfe_irq_evt_payload       *evt_payload)
+{
+	struct cam_tfe_camif_data            *camif_priv;
+	struct cam_isp_hw_event_info          evt_info;
+	struct cam_hw_info                   *hw_info;
+	uint32_t                              val;
+
+	camif_priv = camif_node->res_priv;
+	hw_info = camif_node->hw_intf->hw_priv;
+	evt_info.hw_idx   = camif_node->hw_intf->hw_idx;
+	evt_info.res_id   = camif_node->res_id;
+	evt_info.res_type = camif_node->res_type;
+
+	if ((!epoch_process) && (evt_payload->irq_reg_val[1] &
+		camif_priv->reg_data->eof_irq_mask)) {
+		CAM_DBG(CAM_ISP, "Received EOF");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
+	}
+
+	if ((!epoch_process) && (evt_payload->irq_reg_val[1] &
+		camif_priv->reg_data->sof_irq_mask)) {
+		if ((camif_priv->enable_sof_irq_debug) &&
+			(camif_priv->irq_debug_cnt <=
+			CAM_TFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+
+			camif_priv->irq_debug_cnt++;
+			if (camif_priv->irq_debug_cnt ==
+				CAM_TFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+				camif_priv->enable_sof_irq_debug =
+					false;
+				camif_priv->irq_debug_cnt = 0;
+			}
+		} else
+			CAM_DBG(CAM_ISP, "Received SOF");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+	}
+
+	if (epoch_process  && (evt_payload->irq_reg_val[1] &
+		camif_priv->reg_data->epoch0_irq_mask)) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
+	}
+
+	if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+		val = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->diag_sensor_status_0);
+		CAM_DBG(CAM_ISP, "TFE_DIAG_SENSOR_STATUS: 0x%x",
+			camif_priv->mem_base, val);
+	}
+
+	return 0;
+}
+
+static int cam_tfe_irq_bottom_half(void *handler_priv,
+	void *evt_payload_priv)
+{
+	struct cam_tfe_hw_core_info         *core_info;
+	struct cam_tfe_top_priv             *top_priv;
+	struct cam_tfe_irq_evt_payload      *evt_payload;
+	struct cam_tfe_camif_data           *camif_priv;
+	struct cam_tfe_rdi_data             *rdi_priv;
+	cam_hw_mgr_event_cb_func             event_cb = NULL;
+	void                                *event_cb_priv = NULL;
+	uint32_t i;
+
+	if (!handler_priv || !evt_payload_priv) {
+		CAM_ERR(CAM_ISP,
+			"Invalid params handle_priv:%pK, evt_payload_priv:%pK",
+			handler_priv, evt_payload_priv);
+		return 0;
+	}
+
+	core_info = (struct cam_tfe_hw_core_info *)handler_priv;
+	top_priv = (struct cam_tfe_top_priv  *)core_info->top_priv;
+	evt_payload = evt_payload_priv;
+
+	/* process sof and eof */
+	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		if ((top_priv->in_rsrc[i].res_id ==
+			CAM_ISP_HW_TFE_IN_CAMIF) &&
+			(top_priv->in_rsrc[i].res_state ==
+			CAM_ISP_RESOURCE_STATE_STREAMING)) {
+			camif_priv = (struct cam_tfe_camif_data  *)
+				top_priv->in_rsrc[i].res_priv;
+			event_cb = camif_priv->event_cb;
+			event_cb_priv = camif_priv->priv;
+
+			if (camif_priv->reg_data->subscribe_irq_mask[1] &
+				evt_payload->irq_reg_val[1])
+				cam_tfe_camif_irq_bottom_half(
+					&top_priv->in_rsrc[i], false,
+					evt_payload);
+
+		} else if ((top_priv->in_rsrc[i].res_id >=
+			CAM_ISP_HW_TFE_IN_RDI0) &&
+			(top_priv->in_rsrc[i].res_id <=
+			CAM_ISP_HW_TFE_IN_RDI2) &&
+			(top_priv->in_rsrc[i].res_state ==
+			CAM_ISP_RESOURCE_STATE_STREAMING)) {
+			rdi_priv = (struct cam_tfe_rdi_data *)
+				top_priv->in_rsrc[i].res_priv;
+			event_cb = rdi_priv->event_cb;
+			event_cb_priv = rdi_priv->priv;
+
+			if (rdi_priv->reg_data->subscribe_irq_mask[1] &
+				evt_payload->irq_reg_val[1])
+				cam_tfe_rdi_irq_bottom_half(
+					&top_priv->in_rsrc[i], false,
+					evt_payload);
+		}
+	}
+
+	/* process the irq errors */
+	cam_tfe_error_irq_bottom_half(core_info, top_priv, evt_payload,
+		event_cb, event_cb_priv);
+
+	/* process the reg update in the bus */
+	if (evt_payload->irq_reg_val[0] &
+		core_info->tfe_hw_info->bus_reg_irq_mask[0]) {
+		core_info->tfe_bus->bottom_half_handler(
+			core_info->tfe_bus->bus_priv, true, evt_payload);
+	}
+
+	/* process the epoch */
+	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		if ((top_priv->in_rsrc[i].res_id ==
+			CAM_ISP_HW_TFE_IN_CAMIF) &&
+			(top_priv->in_rsrc[i].res_state ==
+			CAM_ISP_RESOURCE_STATE_STREAMING)) {
+			camif_priv = (struct cam_tfe_camif_data  *)
+				top_priv->in_rsrc[i].res_priv;
+			if (camif_priv->reg_data->subscribe_irq_mask[1] &
+				evt_payload->irq_reg_val[1])
+				cam_tfe_camif_irq_bottom_half(
+					&top_priv->in_rsrc[i], true,
+					evt_payload);
+		} else if ((top_priv->in_rsrc[i].res_id >=
+			CAM_ISP_HW_TFE_IN_RDI0) &&
+			(top_priv->in_rsrc[i].res_id <=
+			CAM_ISP_HW_TFE_IN_RDI2) &&
+			(top_priv->in_rsrc[i].res_state ==
+			CAM_ISP_RESOURCE_STATE_STREAMING)) {
+			rdi_priv = (struct cam_tfe_rdi_data *)
+				top_priv->in_rsrc[i].res_priv;
+			if (rdi_priv->reg_data->subscribe_irq_mask[1] &
+				evt_payload->irq_reg_val[1])
+				cam_tfe_rdi_irq_bottom_half(
+					&top_priv->in_rsrc[i], true,
+					evt_payload);
+		}
+	}
+
+	/* process the bufone */
+	if (evt_payload->irq_reg_val[0] &
+		core_info->tfe_hw_info->bus_reg_irq_mask[0]) {
+		core_info->tfe_bus->bottom_half_handler(
+			core_info->tfe_bus->bus_priv, false, evt_payload);
+	}
+
+	cam_tfe_put_evt_payload(core_info, &evt_payload);
+
+	return 0;
+}
+
+static int cam_tfe_irq_err_top_half(
+	struct cam_tfe_hw_core_info       *core_info,
+	void __iomem                      *mem_base,
+	uint32_t                          *irq_status)
+{
+	uint32_t i;
+
+	if (irq_status[0] &  core_info->tfe_hw_info->error_irq_mask[0] ||
+		irq_status[2] &  core_info->tfe_hw_info->error_irq_mask[2]) {
+		CAM_ERR(CAM_ISP,
+			"Encountered Error: tfe:%d: Irq_status0=0x%x status2=0x%x",
+			core_info->core_index, irq_status[0],
+			irq_status[2]);
+		for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++)
+			cam_io_w(0, mem_base +
+				core_info->tfe_hw_info->top_irq_mask[i]);
+
+		cam_io_w_mb(core_info->tfe_hw_info->global_clear_bitmask,
+			mem_base + core_info->tfe_hw_info->top_irq_cmd);
+	}
+
+	return 0;
+}
+
+irqreturn_t cam_tfe_irq(int irq_num, void *data)
+{
+	struct cam_hw_info             *tfe_hw;
+	struct cam_tfe_hw_core_info    *core_info;
+	struct cam_tfe_top_priv        *top_priv;
+	void __iomem                   *mem_base;
+	struct cam_tfe_irq_evt_payload  *evt_payload;
+	uint32_t  top_irq_status[CAM_TFE_TOP_IRQ_REG_NUM] = {0};
+	uint32_t   bus_irq_status[CAM_TFE_BUS_MAX_IRQ_REGISTERS] = {0};
+	uint32_t  i,  ccif_violation = 0, overflow_status = 0;
+	uint32_t    image_sz_violation = 0;
+	void        *bh_cmd = NULL;
+	int rc = -EINVAL;
+
+	if (!data)
+		return IRQ_NONE;
+
+	tfe_hw = (struct cam_hw_info *)data;
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+	top_priv = (struct cam_tfe_top_priv  *)core_info->top_priv;
+	mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base;
+
+	if (tfe_hw->hw_state == CAM_HW_STATE_POWER_DOWN) {
+		CAM_ERR(CAM_ISP, "TFE:%d hw is not powered up",
+			core_info->core_index);
+		return IRQ_HANDLED;
+	}
+
+	for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++)
+		top_irq_status[i] = cam_io_r(mem_base  +
+		core_info->tfe_hw_info->top_irq_status[i]);
+
+	for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++)
+		cam_io_w(top_irq_status[i], mem_base +
+			core_info->tfe_hw_info->top_irq_clear[i]);
+
+	CAM_DBG(CAM_ISP, "TFE:%d IRQ status_0:0x%x status_1:0x%x status_2:0x%x",
+		core_info->core_index, top_irq_status[0],
+		top_irq_status[1], top_irq_status[2]);
+
+	if (top_irq_status[0] & core_info->tfe_hw_info->bus_reg_irq_mask[0]) {
+		for (i = 0; i < CAM_TFE_BUS_MAX_IRQ_REGISTERS; i++)
+			bus_irq_status[i] = cam_io_r(mem_base  +
+				core_info->tfe_hw_info->bus_irq_status[i]);
+
+		for (i = 0; i < CAM_TFE_BUS_MAX_IRQ_REGISTERS; i++)
+			cam_io_w(bus_irq_status[i], mem_base +
+				core_info->tfe_hw_info->bus_irq_clear[i]);
+
+		ccif_violation =  cam_io_r(mem_base  +
+			core_info->tfe_hw_info->bus_violation_reg);
+		overflow_status = cam_io_r(mem_base  +
+			core_info->tfe_hw_info->bus_overflow_reg);
+		image_sz_violation = cam_io_r(mem_base  +
+			core_info->tfe_hw_info->bus_image_size_vilation_reg);
+
+		cam_io_w(core_info->tfe_hw_info->global_clear_bitmask,
+			mem_base + core_info->tfe_hw_info->bus_irq_cmd);
+
+		CAM_DBG(CAM_ISP, "TFE:%d BUS IRQ status_0:0x%x status_1:0x%x",
+			core_info->core_index, bus_irq_status[0],
+			bus_irq_status[1]);
+	}
+
+	cam_io_w_mb(core_info->tfe_hw_info->global_clear_bitmask,
+		mem_base + core_info->tfe_hw_info->top_irq_cmd);
+
+	/* check reset */
+	if ((top_irq_status[0] & core_info->tfe_hw_info->reset_irq_mask[0]) ||
+		(top_irq_status[1] &
+			core_info->tfe_hw_info->reset_irq_mask[1]) ||
+		(top_irq_status[2] &
+			core_info->tfe_hw_info->reset_irq_mask[2])) {
+		/* Reset ack */
+		complete(&core_info->reset_complete);
+		return IRQ_HANDLED;
+	}
+
+	/* Check the irq errors  */
+	cam_tfe_irq_err_top_half(core_info, mem_base, top_irq_status);
+
+	rc  = cam_tfe_get_evt_payload(core_info, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status2=0x%x",
+			top_irq_status[0], top_irq_status[1]);
+		goto end;
+	}
+
+	cam_tfe_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++)
+		evt_payload->irq_reg_val[i] = top_irq_status[i];
+
+	for (i = 0; i < CAM_TFE_BUS_MAX_IRQ_REGISTERS; i++)
+		evt_payload->bus_irq_val[i] = bus_irq_status[i];
+
+	evt_payload->ccif_violation_status = ccif_violation;
+	evt_payload->overflow_status = overflow_status;
+	evt_payload->image_size_violation_status = image_sz_violation;
+
+	evt_payload->core_index = core_info->core_index;
+	evt_payload->core_info  = core_info;
+
+	rc = tasklet_bh_api.get_bh_payload_func(
+		top_priv->tasklet_info, &bh_cmd);
+	if (rc || !bh_cmd) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No payload, IRQ handling frozen");
+		cam_tfe_put_evt_payload(core_info, &evt_payload);
+		goto end;
+	}
+
+	tasklet_bh_api.bottom_half_enqueue_func(
+		top_priv->tasklet_info,
+		bh_cmd,
+		core_info,
+		evt_payload,
+		cam_tfe_irq_bottom_half);
+
+end:
+	return IRQ_HANDLED;
+
+}
+
+static int cam_tfe_top_set_hw_clk_rate(
+	struct cam_tfe_top_priv *top_priv)
+{
+	struct cam_hw_soc_info        *soc_info = NULL;
+	int                            i, rc = 0;
+	unsigned long                  max_clk_rate = 0;
+
+	soc_info = top_priv->common_data.soc_info;
+
+	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		if (top_priv->req_clk_rate[i] > max_clk_rate)
+			max_clk_rate = top_priv->req_clk_rate[i];
+	}
+	if (max_clk_rate == top_priv->hw_clk_rate)
+		return 0;
+
+	CAM_DBG(CAM_ISP, "TFE:%d Clock name=%s idx=%d clk=%llu",
+		top_priv->common_data.soc_info->index,
+		soc_info->clk_name[soc_info->src_clk_idx],
+		soc_info->src_clk_idx, max_clk_rate);
+
+	rc = cam_soc_util_set_src_clk_rate(soc_info, max_clk_rate);
+
+	if (!rc)
+		top_priv->hw_clk_rate = max_clk_rate;
+	else
+		CAM_ERR(CAM_ISP, "TFE:%d set src clock rate:%lld failed, rc=%d",
+		top_priv->common_data.soc_info->index, max_clk_rate,  rc);
+
+	return rc;
+}
+
+static struct cam_axi_vote *cam_tfe_top_delay_bw_reduction(
+	struct cam_tfe_top_priv *top_priv,
+	uint64_t *to_be_applied_bw)
+{
+	uint32_t i, j;
+	int vote_idx = -1;
+	uint64_t max_bw = 0;
+	uint64_t total_bw;
+	struct cam_axi_vote *curr_l_vote;
+
+	for (i = 0; i < (CAM_TFE_TOP_IN_PORT_MAX *
+		CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES); i++) {
+		total_bw = 0;
+		curr_l_vote = &top_priv->last_vote[i];
+		for (j = 0; j < curr_l_vote->num_paths; j++) {
+			if (total_bw >
+				(U64_MAX -
+				curr_l_vote->axi_path[j].camnoc_bw)) {
+				CAM_ERR(CAM_ISP, "Overflow at idx: %d", j);
+				return NULL;
+			}
+
+			total_bw += curr_l_vote->axi_path[j].camnoc_bw;
+		}
+
+		if (total_bw > max_bw) {
+			vote_idx = i;
+			max_bw = total_bw;
+		}
+	}
+
+	if (vote_idx < 0)
+		return NULL;
+
+	*to_be_applied_bw = max_bw;
+
+	return &top_priv->last_vote[vote_idx];
+}
+
+static int cam_tfe_top_set_axi_bw_vote(
+	struct cam_tfe_top_priv *top_priv,
+	bool start_stop)
+{
+	struct cam_axi_vote agg_vote = {0};
+	struct cam_axi_vote *to_be_applied_axi_vote = NULL;
+	struct cam_hw_soc_info   *soc_info = top_priv->common_data.soc_info;
+	struct cam_tfe_soc_private *soc_private = soc_info->soc_private;
+	int rc = 0;
+	uint32_t i;
+	uint32_t num_paths = 0;
+	uint64_t total_bw_new_vote = 0;
+	bool bw_unchanged = true;
+	bool apply_bw_update = false;
+
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		if (top_priv->axi_vote_control[i] ==
+			CAM_TFE_BW_CONTROL_INCLUDE) {
+			if (num_paths +
+				top_priv->req_axi_vote[i].num_paths >
+				CAM_CPAS_MAX_PATHS_PER_CLIENT) {
+				CAM_ERR(CAM_ISP,
+					"Required paths(%d) more than max(%d)",
+					num_paths +
+					top_priv->req_axi_vote[i].num_paths,
+					CAM_CPAS_MAX_PATHS_PER_CLIENT);
+				return -EINVAL;
+			}
+
+			memcpy(&agg_vote.axi_path[num_paths],
+				&top_priv->req_axi_vote[i].axi_path[0],
+				top_priv->req_axi_vote[i].num_paths *
+				sizeof(
+				struct cam_axi_per_path_bw_vote));
+			num_paths += top_priv->req_axi_vote[i].num_paths;
+		}
+	}
+
+	agg_vote.num_paths = num_paths;
+
+	for (i = 0; i < agg_vote.num_paths; i++) {
+		CAM_DBG(CAM_PERF,
+			"tfe[%d] : New BW Vote : counter[%d] [%s][%s] [%llu %llu %llu]",
+			top_priv->common_data.hw_intf->hw_idx,
+			top_priv->last_counter,
+			cam_cpas_axi_util_path_type_to_string(
+			agg_vote.axi_path[i].path_data_type),
+			cam_cpas_axi_util_trans_type_to_string(
+			agg_vote.axi_path[i].transac_type),
+			agg_vote.axi_path[i].camnoc_bw,
+			agg_vote.axi_path[i].mnoc_ab_bw,
+			agg_vote.axi_path[i].mnoc_ib_bw);
+
+		total_bw_new_vote += agg_vote.axi_path[i].camnoc_bw;
+	}
+
+	memcpy(&top_priv->last_vote[top_priv->last_counter], &agg_vote,
+		sizeof(struct cam_axi_vote));
+	top_priv->last_counter = (top_priv->last_counter + 1) %
+		(CAM_TFE_TOP_IN_PORT_MAX *
+		CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+
+	if ((agg_vote.num_paths != top_priv->applied_axi_vote.num_paths) ||
+		(total_bw_new_vote != top_priv->total_bw_applied))
+		bw_unchanged = false;
+
+	CAM_DBG(CAM_PERF,
+		"tfe[%d] : applied_total=%lld, new_total=%lld unchanged=%d, start_stop=%d",
+		top_priv->common_data.hw_intf->hw_idx,
+		top_priv->total_bw_applied, total_bw_new_vote,
+		bw_unchanged, start_stop);
+
+	if (bw_unchanged) {
+		CAM_DBG(CAM_ISP, "BW config unchanged");
+		return 0;
+	}
+
+	if (start_stop) {
+		/* need to vote current request immediately */
+		to_be_applied_axi_vote = &agg_vote;
+		/* Reset everything, we can start afresh */
+		memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+			(CAM_TFE_TOP_IN_PORT_MAX *
+			CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+		top_priv->last_counter = 0;
+		top_priv->last_vote[top_priv->last_counter] = agg_vote;
+		top_priv->last_counter = (top_priv->last_counter + 1) %
+			(CAM_TFE_TOP_IN_PORT_MAX *
+			CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+	} else {
+		/*
+		 * Find max bw request in last few frames. This will the bw
+		 * that we want to vote to CPAS now.
+		 */
+		to_be_applied_axi_vote =
+			cam_tfe_top_delay_bw_reduction(top_priv,
+			&total_bw_new_vote);
+		if (!to_be_applied_axi_vote) {
+			CAM_ERR(CAM_ISP, "to_be_applied_axi_vote is NULL");
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < to_be_applied_axi_vote->num_paths; i++) {
+		CAM_DBG(CAM_PERF,
+			"tfe[%d] : Apply BW Vote : [%s][%s] [%llu %llu %llu]",
+			top_priv->common_data.hw_intf->hw_idx,
+			cam_cpas_axi_util_path_type_to_string(
+			to_be_applied_axi_vote->axi_path[i].path_data_type),
+			cam_cpas_axi_util_trans_type_to_string(
+			to_be_applied_axi_vote->axi_path[i].transac_type),
+			to_be_applied_axi_vote->axi_path[i].camnoc_bw,
+			to_be_applied_axi_vote->axi_path[i].mnoc_ab_bw,
+			to_be_applied_axi_vote->axi_path[i].mnoc_ib_bw);
+	}
+
+	if ((to_be_applied_axi_vote->num_paths !=
+		top_priv->applied_axi_vote.num_paths) ||
+		(total_bw_new_vote != top_priv->total_bw_applied))
+		apply_bw_update = true;
+
+	CAM_DBG(CAM_PERF,
+		"tfe[%d] : Delayed update: applied_total=%lld, new_total=%lld apply_bw_update=%d, start_stop=%d",
+		top_priv->common_data.hw_intf->hw_idx,
+		top_priv->total_bw_applied, total_bw_new_vote,
+		apply_bw_update, start_stop);
+
+	if (apply_bw_update) {
+		rc = cam_cpas_update_axi_vote(soc_private->cpas_handle,
+			to_be_applied_axi_vote);
+		if (!rc) {
+			memcpy(&top_priv->applied_axi_vote,
+				to_be_applied_axi_vote,
+				sizeof(struct cam_axi_vote));
+			top_priv->total_bw_applied = total_bw_new_vote;
+		} else {
+			CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
+		}
+	}
+
+	return rc;
+}
+
+static int cam_tfe_top_get_base(struct cam_tfe_top_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	uint32_t                          size = 0;
+	uint32_t                          mem_base = 0;
+	struct cam_isp_hw_get_cmd_update *cdm_args  = cmd_args;
+	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+		CAM_ERR(CAM_ISP, "Error Invalid cmd size");
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res || !top_priv ||
+		!top_priv->common_data.soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid args");
+		return -EINVAL;
+	}
+
+	cdm_util_ops =
+		(struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+	if (!cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Invalid CDM ops");
+		return -EINVAL;
+	}
+
+	size = cdm_util_ops->cdm_required_size_changebase();
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->cmd.size) {
+		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+			cdm_args->cmd.size, size);
+		return -EINVAL;
+	}
+
+	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(
+		top_priv->common_data.soc_info, TFE_CORE_BASE_IDX);
+
+	cdm_util_ops->cdm_write_changebase(
+	cdm_args->cmd.cmd_buf_addr, mem_base);
+	cdm_args->cmd.used_bytes = (size * 4);
+
+	return 0;
+}
+
+static int cam_tfe_top_get_reg_update(
+	struct cam_tfe_top_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	uint32_t                          size = 0;
+	uint32_t                          reg_val_pair[2];
+	struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
+	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
+	struct cam_tfe_camif_data        *camif_rsrc_data = NULL;
+	struct cam_tfe_rdi_data          *rdi_rsrc_data = NULL;
+	struct cam_isp_resource_node     *in_res;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+		CAM_ERR(CAM_ISP, "Invalid cmd size");
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return -EINVAL;
+	}
+
+	cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+	if (!cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Invalid CDM ops");
+		return -EINVAL;
+	}
+
+	in_res = cdm_args->res;
+	size = cdm_util_ops->cdm_required_size_reg_random(1);
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->cmd.size) {
+		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+			cdm_args->cmd.size, size);
+		return -EINVAL;
+	}
+
+	if (in_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) {
+		camif_rsrc_data =  in_res->res_priv;
+		reg_val_pair[0] = camif_rsrc_data->camif_reg->reg_update_cmd;
+		reg_val_pair[1] =
+			camif_rsrc_data->reg_data->reg_update_cmd_data;
+	} else if ((in_res->res_id >= CAM_ISP_HW_TFE_IN_RDI0) &&
+		(in_res->res_id <= CAM_ISP_HW_TFE_IN_RDI2)) {
+		rdi_rsrc_data =  in_res->res_priv;
+		reg_val_pair[0] = rdi_rsrc_data->rdi_reg->reg_update_cmd;
+		reg_val_pair[1] = rdi_rsrc_data->reg_data->reg_update_cmd_data;
+	}
+
+	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
+		1, reg_val_pair);
+
+	cdm_args->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+static int cam_tfe_top_clock_update(
+	struct cam_tfe_top_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_tfe_clock_update_args     *clk_update = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   i, rc = 0;
+
+	clk_update =
+		(struct cam_tfe_clock_update_args *)cmd_args;
+	res = clk_update->node_res;
+
+	if (!res || !res->hw_intf->hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid input res %pK", res);
+		return -EINVAL;
+	}
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_TFE_IN ||
+		res->res_id >= CAM_ISP_HW_TFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		if (top_priv->in_rsrc[i].res_id == res->res_id) {
+			top_priv->req_clk_rate[i] = clk_update->clk_rate;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_DBG(CAM_ISP,
+			"TFE:%d Not ready to set clocks yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else
+		rc = cam_tfe_top_set_hw_clk_rate(top_priv);
+
+	return rc;
+}
+
+static int cam_tfe_top_bw_update(
+	struct cam_tfe_top_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_tfe_bw_update_args        *bw_update = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+
+	bw_update = (struct cam_tfe_bw_update_args *)cmd_args;
+	res = bw_update->node_res;
+
+	if (!res || !res->hw_intf || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_TFE_IN ||
+		res->res_id >= CAM_ISP_HW_TFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_ISP_HW_TFE_IN_MAX; i++) {
+		if (top_priv->in_rsrc[i].res_id == res->res_id) {
+			memcpy(&top_priv->req_axi_vote[i], &bw_update->isp_vote,
+				sizeof(struct cam_axi_vote));
+			top_priv->axi_vote_control[i] =
+				CAM_TFE_BW_CONTROL_INCLUDE;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"TFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else {
+		rc = cam_tfe_top_set_axi_bw_vote(top_priv, false);
+	}
+
+	return rc;
+}
+
+static int cam_tfe_top_bw_control(
+	struct cam_tfe_top_priv *top_priv,
+	 void *cmd_args, uint32_t arg_size)
+{
+	struct cam_tfe_bw_control_args       *bw_ctrl = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+
+	bw_ctrl = (struct cam_tfe_bw_control_args *)cmd_args;
+	res = bw_ctrl->node_res;
+
+	if (!res || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_TFE_IN ||
+		res->res_id >= CAM_ISP_HW_TFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		if (top_priv->in_rsrc[i].res_id == res->res_id) {
+			top_priv->axi_vote_control[i] = bw_ctrl->action;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"TFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else {
+		rc = cam_tfe_top_set_axi_bw_vote(top_priv, true);
+	}
+
+	return rc;
+}
+
+static int cam_tfe_top_get_reg_dump(
+	struct cam_tfe_top_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_isp_hw_get_cmd_update  *reg_dump_cmd = cmd_args;
+	struct cam_tfe_soc_private        *soc_private;
+	struct cam_tfe_reg_dump_data      *reg_dump_data;
+	struct cam_hw_soc_info            *soc_info;
+	void __iomem                      *mem_base;
+	int i, j, num_reg_dump_entries;
+	uint32_t val_0, val_1, val_2, val_3, wm_offset, start_offset;
+	uint32_t end_offset, lut_word_size, lut_size, lut_bank_sel, lut_dmi_reg;
+
+	if (!reg_dump_cmd) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if ((reg_dump_cmd->res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) ||
+		(reg_dump_cmd->res->res_state ==
+		CAM_ISP_RESOURCE_STATE_AVAILABLE))
+		return 0;
+
+	soc_info = top_priv->common_data.soc_info;
+	soc_private = top_priv->common_data.soc_info->soc_private;
+	mem_base = soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base;
+	CAM_INFO(CAM_ISP, "dump tfe:%d registers",
+		top_priv->common_data.hw_intf->hw_idx);
+
+	reg_dump_data = top_priv->common_data.reg_dump_data;
+	num_reg_dump_entries = reg_dump_data->num_reg_dump_entries;
+	for (i = 0; i < num_reg_dump_entries; i++) {
+		start_offset = reg_dump_data->reg_entry[i].start_offset;
+		end_offset = reg_dump_data->reg_entry[i].end_offset;
+
+		for (j = start_offset; (j + 0xc) <= end_offset; j += 0x10) {
+			val_0 = cam_io_r_mb(mem_base + j);
+			val_1 = cam_io_r_mb(mem_base + j + 4);
+			val_2 = cam_io_r_mb(mem_base + j + 0x8);
+			val_3 = cam_io_r_mb(mem_base + j + 0xc);
+			CAM_INFO(CAM_ISP, "0x%04x=0x%08x 0x%08x 0x%08x 0x%08x",
+				j, val_0, val_1, val_2, val_3);
+		}
+	}
+
+	num_reg_dump_entries = reg_dump_data->num_lut_dump_entries;
+	for (i = 0; i < num_reg_dump_entries; i++) {
+		lut_bank_sel = reg_dump_data->lut_entry[i].lut_bank_sel;
+		lut_size = reg_dump_data->lut_entry[i].lut_addr_size;
+		lut_word_size = reg_dump_data->lut_entry[i].lut_word_size;
+		lut_dmi_reg = reg_dump_data->lut_entry[i].dmi_reg_offset;
+
+		cam_io_w_mb(lut_bank_sel, mem_base + lut_dmi_reg + 4);
+		cam_io_w_mb(0, mem_base + 0xC28);
+
+		for (j = 0; j < lut_size; j++) {
+			val_0 = cam_io_r_mb(mem_base + 0xC30);
+			CAM_INFO(CAM_ISP, "Bank%d:0x%x LO: 0x%x",
+				lut_bank_sel, j, val_0);
+		}
+	}
+	/* No mem selected */
+	cam_io_w_mb(0, mem_base + 0xC24);
+	cam_io_w_mb(0, mem_base + 0xC28);
+
+	start_offset = reg_dump_data->bus_start_addr;
+	end_offset = reg_dump_data->bus_write_top_end_addr;
+
+	CAM_INFO(CAM_ISP, "bus start addr:0x%x end_offset:0x%x",
+		start_offset, end_offset);
+
+	for (i = start_offset; (i + 0xc) <= end_offset; i += 0x10) {
+		val_0 = cam_io_r_mb(mem_base + i);
+		val_1 = cam_io_r_mb(mem_base + i + 4);
+		val_2 = cam_io_r_mb(mem_base + i + 0x8);
+		val_3 = cam_io_r_mb(mem_base + i + 0xc);
+		CAM_INFO(CAM_ISP, "0x%04x=0x%08x 0x%08x 0x%08x 0x%08x",
+			i, val_0, val_1, val_2, val_3);
+	}
+
+	wm_offset = reg_dump_data->bus_client_start_addr;
+
+	CAM_INFO(CAM_ISP, "bus wm offset:0x%x",
+		wm_offset);
+
+	for (j = 0; j < reg_dump_data->num_bus_clients; j++) {
+		for (i = 0x0; (i + 0xc) <= 0x3C; i += 0x10) {
+			val_0 = cam_io_r_mb(mem_base + wm_offset + i);
+			val_1 = cam_io_r_mb(mem_base + wm_offset + i + 4);
+			val_2 = cam_io_r_mb(mem_base + wm_offset + i + 0x8);
+			val_3 = cam_io_r_mb(mem_base + wm_offset + i + 0xc);
+			CAM_INFO(CAM_ISP, "0x%04x=0x%08x 0x%08x 0x%08x 0x%08x",
+				(wm_offset + i), val_0, val_1, val_2, val_3);
+		}
+		for (i = 0x60; (i + 0xc) <= 0x80; i += 0x10) {
+			val_0 = cam_io_r_mb(mem_base + wm_offset + i);
+			val_1 = cam_io_r_mb(mem_base + wm_offset + i + 4);
+			val_2 = cam_io_r_mb(mem_base + wm_offset + i + 0x8);
+			val_3 = cam_io_r_mb(mem_base + wm_offset + i + 0xc);
+			CAM_INFO(CAM_ISP, "0x%04x=0x%08x 0x%08x 0x%08x 0x%08x",
+				(wm_offset + i), val_0, val_1, val_2, val_3);
+		}
+		wm_offset += reg_dump_data->bus_client_offset;
+	}
+
+	cam_cpas_reg_read(soc_private->cpas_handle,
+		CAM_CPAS_REG_CAMNOC, 0x20, true, &val_0);
+	CAM_INFO(CAM_ISP, "tfe_niu_MaxWr_Low offset 0x20 val 0x%x",
+		val_0);
+
+	/* dump the clock votings */
+	CAM_INFO(CAM_ISP, "TFE:%d clk=%ld",
+		top_priv->common_data.hw_intf->hw_idx,
+		top_priv->hw_clk_rate);
+
+	return 0;
+}
+
+static int cam_tfe_camif_irq_reg_dump(
+	struct cam_tfe_hw_core_info    *core_info,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_tfe_top_priv            *top_priv;
+	struct cam_isp_hw_get_cmd_update   *cmd_update;
+	struct cam_isp_resource_node       *camif_res = NULL;
+	void __iomem                       *mem_base;
+	uint32_t i;
+
+	int rc = 0;
+
+	if (!cmd_args) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+	top_priv = (struct cam_tfe_top_priv  *)core_info->top_priv;
+	cmd_update = (struct cam_isp_hw_get_cmd_update  *)cmd_args;
+	camif_res = cmd_update->res;
+	mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base;
+	if ((camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) ||
+		(camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) {
+		CAM_ERR(CAM_ISP, "Error! Invalid state\n");
+		return 0;
+	}
+
+	for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) {
+		CAM_INFO(CAM_ISP,
+			"Core Id =%d TOP IRQ status[%d ] val 0x%x",
+			core_info->core_index, i,
+			cam_io_r_mb(mem_base +
+			core_info->tfe_hw_info->top_irq_status[i]));
+	}
+
+	for (i = 0; i < CAM_TFE_BUS_MAX_IRQ_REGISTERS; i++) {
+		CAM_INFO(CAM_ISP,
+			"Core Id =%d BUS IRQ status[%d ] val:0x%x",
+			core_info->core_index, i,
+			cam_io_r_mb(mem_base +
+			core_info->tfe_hw_info->bus_irq_status[i]));
+	}
+
+	return rc;
+}
+
+int cam_tfe_top_reserve(void *device_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	struct cam_tfe_top_priv                 *top_priv;
+	struct cam_tfe_acquire_args             *args;
+	struct cam_tfe_hw_tfe_in_acquire_args   *acquire_args;
+	struct cam_tfe_camif_data               *camif_data;
+	struct cam_tfe_rdi_data                 *rdi_data;
+	uint32_t i;
+	int rc = -EINVAL;
+
+	if (!device_priv || !reserve_args) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_tfe_top_priv   *)device_priv;
+	args = (struct cam_tfe_acquire_args *)reserve_args;
+	acquire_args = &args->tfe_in;
+
+	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		CAM_DBG(CAM_ISP, "i :%d res_id:%d state:%d", i,
+			acquire_args->res_id, top_priv->in_rsrc[i].res_state);
+
+		if ((top_priv->in_rsrc[i].res_id == acquire_args->res_id) &&
+			(top_priv->in_rsrc[i].res_state ==
+			CAM_ISP_RESOURCE_STATE_AVAILABLE)) {
+			rc = cam_tfe_validate_pix_pattern(
+				acquire_args->in_port->pix_pattern);
+			if (rc)
+				return rc;
+
+			if (acquire_args->res_id == CAM_ISP_HW_TFE_IN_CAMIF) {
+				camif_data = (struct cam_tfe_camif_data    *)
+					top_priv->in_rsrc[i].res_priv;
+				camif_data->pix_pattern =
+					acquire_args->in_port->pix_pattern;
+				camif_data->dsp_mode =
+					acquire_args->in_port->dsp_mode;
+				camif_data->first_pixel =
+					acquire_args->in_port->left_start;
+				camif_data->last_pixel =
+					acquire_args->in_port->left_end;
+				camif_data->first_line =
+					acquire_args->in_port->line_start;
+				camif_data->last_line =
+					acquire_args->in_port->line_end;
+				camif_data->camif_pd_enable =
+					acquire_args->camif_pd_enable;
+				camif_data->dual_tfe_sync_sel =
+					acquire_args->dual_tfe_sync_sel_idx;
+				camif_data->sync_mode = acquire_args->sync_mode;
+				camif_data->event_cb = args->event_cb;
+				camif_data->priv = args->priv;
+
+				CAM_DBG(CAM_ISP,
+					"TFE:%d pix_pattern:%d dsp_mode=%d",
+					top_priv->in_rsrc[i].hw_intf->hw_idx,
+					camif_data->pix_pattern,
+					camif_data->dsp_mode);
+			} else {
+				rdi_data = (struct cam_tfe_rdi_data      *)
+					top_priv->in_rsrc[i].res_priv;
+				rdi_data->pix_pattern =
+					acquire_args->in_port->pix_pattern;
+				rdi_data->sync_mode = acquire_args->sync_mode;
+				rdi_data->event_cb = args->event_cb;
+				rdi_data->priv = args->priv;
+			}
+
+			top_priv->in_rsrc[i].cdm_ops = acquire_args->cdm_ops;
+			top_priv->in_rsrc[i].tasklet_info = args->tasklet;
+			top_priv->in_rsrc[i].res_state =
+				CAM_ISP_RESOURCE_STATE_RESERVED;
+			top_priv->tasklet_info = args->tasklet;
+			acquire_args->rsrc_node =
+				&top_priv->in_rsrc[i];
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int cam_tfe_top_release(void *device_priv,
+	void *release_args, uint32_t arg_size)
+{
+	struct cam_tfe_top_priv            *top_priv;
+	struct cam_isp_resource_node       *in_res;
+
+	if (!device_priv || !release_args) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_tfe_top_priv   *)device_priv;
+	in_res = (struct cam_isp_resource_node *)release_args;
+
+	CAM_DBG(CAM_ISP, "TFE:%d resource id:%d in state %d",
+		in_res->hw_intf->hw_idx, in_res->res_id,
+		in_res->res_state);
+	if (in_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "TFE:%d Error Resource Invalid res_state :%d",
+			in_res->hw_intf->hw_idx, in_res->res_state);
+		return -EINVAL;
+	}
+	in_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	in_res->cdm_ops = NULL;
+	in_res->tasklet_info = NULL;
+	in_res->rdi_only_ctx = 0;
+
+	return 0;
+}
+
+static int cam_tfe_camif_resource_start(
+	struct cam_tfe_hw_core_info         *core_info,
+	struct cam_isp_resource_node        *camif_res)
+{
+	struct cam_tfe_camif_data           *rsrc_data;
+	struct cam_tfe_soc_private          *soc_private;
+	uint32_t                             val = 0;
+	uint32_t                             epoch0_irq_mask;
+	uint32_t                             epoch1_irq_mask;
+	uint32_t                             computed_epoch_line_cfg;
+
+	if (!camif_res || !core_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "TFE:%d Error Invalid camif res res_state:%d",
+			core_info->core_index, camif_res->res_state);
+		return -EINVAL;
+	}
+
+	rsrc_data = (struct cam_tfe_camif_data  *)camif_res->res_priv;
+	soc_private = rsrc_data->soc_info->soc_private;
+
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "TFE:%d Error soc_private NULL",
+			core_info->core_index);
+		return -ENODEV;
+	}
+
+	/* Camif module config */
+	val = cam_io_r(rsrc_data->mem_base +
+		rsrc_data->camif_reg->module_cfg);
+	val &= ~(rsrc_data->reg_data->pixel_pattern_mask);
+	val |= (rsrc_data->pix_pattern <<
+		rsrc_data->reg_data->pixel_pattern_shift);
+	val |= (1 << rsrc_data->reg_data->module_enable_shift);
+	val |= (1 << rsrc_data->reg_data->pix_out_enable_shift);
+	if (rsrc_data->camif_pd_enable)
+		val |= (1 << rsrc_data->reg_data->pdaf_output_enable_shift);
+
+	cam_io_w_mb(val, rsrc_data->mem_base +
+		rsrc_data->camif_reg->module_cfg);
+
+	CAM_DBG(CAM_ISP, "TFE:%d camif module config val:%d",
+		core_info->core_index, val);
+
+	/* Config tfe core*/
+	val = 0;
+	if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		val = (1 << rsrc_data->reg_data->extern_reg_update_shift);
+
+	if ((rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) ||
+		(rsrc_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)) {
+		val |= (1 << rsrc_data->reg_data->dual_tfe_pix_en_shift);
+		val |= ((rsrc_data->dual_tfe_sync_sel + 1) <<
+			rsrc_data->reg_data->dual_tfe_sync_sel_shift);
+	}
+
+	if (!rsrc_data->camif_pd_enable)
+		val |= (1 << rsrc_data->reg_data->camif_pd_rdi2_src_sel_shift);
+
+	cam_io_w_mb(val, rsrc_data->mem_base +
+		rsrc_data->common_reg->core_cfg_0);
+
+	CAM_DBG(CAM_ISP, "TFE:%d core_cfg 0 val:0x%x", core_info->core_index,
+		val);
+
+	val = cam_io_r(rsrc_data->mem_base +
+		rsrc_data->common_reg->core_cfg_1);
+	val &= ~BIT(0);
+	cam_io_w_mb(val, rsrc_data->mem_base +
+		rsrc_data->common_reg->core_cfg_1);
+	CAM_DBG(CAM_ISP, "TFE:%d core_cfg 1 val:0x%x", core_info->core_index,
+		val);
+
+	/* Epoch config */
+	epoch0_irq_mask = ((rsrc_data->last_line -
+			rsrc_data->first_line) / 2) +
+			rsrc_data->first_line;
+	epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg &
+			0xFFFF;
+	computed_epoch_line_cfg = (epoch0_irq_mask << 16) |
+			epoch1_irq_mask;
+	cam_io_w_mb(computed_epoch_line_cfg,
+			rsrc_data->mem_base +
+			rsrc_data->camif_reg->epoch_irq_cfg);
+	CAM_DBG(CAM_ISP, "TFE:%d first_line: %u\n"
+			"last_line: %u\n"
+			"epoch_line_cfg: 0x%x",
+			core_info->core_index,
+			rsrc_data->first_line,
+			rsrc_data->last_line,
+			computed_epoch_line_cfg);
+
+	camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	/* Reg Update */
+	cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
+		rsrc_data->mem_base + rsrc_data->camif_reg->reg_update_cmd);
+	CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", camif_res->hw_intf->hw_idx,
+		rsrc_data->reg_data->reg_update_cmd_data);
+
+	/* 
Disable sof irq debug flag */
+	rsrc_data->enable_sof_irq_debug = false;
+	rsrc_data->irq_debug_cnt = 0;
+
+	if (rsrc_data->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+		val = cam_io_r_mb(rsrc_data->mem_base +
+			rsrc_data->common_reg->diag_config);
+		val |= rsrc_data->reg_data->enable_diagnostic_hw;
+		cam_io_w_mb(val, rsrc_data->mem_base +
+			rsrc_data->common_reg->diag_config);
+	}
+
+	/* Enable the irq */
+	cam_tfe_irq_config(core_info, rsrc_data->reg_data->subscribe_irq_mask,
+		CAM_TFE_TOP_IRQ_REG_NUM, true);
+
+	/* Program perf counters */
+	val = (1 << rsrc_data->reg_data->perf_cnt_start_cmd_shift) |
+		(1 << rsrc_data->reg_data->perf_cnt_continuous_shift) |
+		(1 << rsrc_data->reg_data->perf_client_sel_shift) |
+		(1 << rsrc_data->reg_data->perf_window_start_shift) |
+		(2 << rsrc_data->reg_data->perf_window_end_shift);
+	cam_io_w_mb(val,
+		rsrc_data->mem_base + rsrc_data->common_reg->perf_cnt_cfg);
+	CAM_DBG(CAM_ISP, "TFE:%d perf_cfg val:%d", core_info->core_index,
+		val);
+
+	/* Enable the top debug registers */
+	cam_io_w_mb(0x1,
+		rsrc_data->mem_base + rsrc_data->common_reg->debug_cfg);
+
+	CAM_DBG(CAM_ISP, "Start Camif TFE %d Done", core_info->core_index);
+	return 0;
+}
+
+int cam_tfe_top_start(struct cam_tfe_hw_core_info *core_info,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_tfe_top_priv                 *top_priv;
+	struct cam_isp_resource_node            *in_res;
+	struct cam_hw_info                      *hw_info = NULL;
+	struct cam_tfe_rdi_data                 *rsrc_rdi_data;
+	uint32_t val;
+	int rc = 0;
+
+	if (!start_args) {
+		CAM_ERR(CAM_ISP, "TFE:%d Error Invalid input arguments",
+			core_info->core_index);
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_tfe_top_priv *)core_info->top_priv;
+	in_res = (struct cam_isp_resource_node *)start_args;
+	hw_info = (struct cam_hw_info  *)in_res->hw_intf->hw_priv;
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "TFE:%d HW not powered up",
+			core_info->core_index);
+		rc = -EPERM;
+		goto end;
+	}
+
+	rc = cam_tfe_top_set_hw_clk_rate(top_priv);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "TFE:%d set_hw_clk_rate failed, rc=%d",
+			hw_info->soc_info.index, rc);
+		return rc;
+	}
+
+	rc = cam_tfe_top_set_axi_bw_vote(top_priv, true);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "TFE:%d set_axi_bw_vote failed, rc=%d",
+			core_info->core_index, rc);
+		return rc;
+	}
+
+	if (in_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) {
+		cam_tfe_camif_resource_start(core_info, in_res);
+	} else if (in_res->res_id >= CAM_ISP_HW_TFE_IN_RDI0 ||
+		in_res->res_id <= CAM_ISP_HW_TFE_IN_RDI2) {
+		rsrc_rdi_data = (struct cam_tfe_rdi_data *) in_res->res_priv;
+		val = (rsrc_rdi_data->pix_pattern <<
+			rsrc_rdi_data->reg_data->pixel_pattern_shift);
+
+		val |= (1 << rsrc_rdi_data->reg_data->rdi_out_enable_shift);
+		cam_io_w_mb(val, rsrc_rdi_data->mem_base +
+			rsrc_rdi_data->rdi_reg->rdi_module_config);
+
+		/* Epoch config */
+		cam_io_w_mb(rsrc_rdi_data->reg_data->epoch_line_cfg,
+			rsrc_rdi_data->mem_base +
+			rsrc_rdi_data->rdi_reg->rdi_epoch_irq);
+
+		/* Reg Update */
+		cam_io_w_mb(rsrc_rdi_data->reg_data->reg_update_cmd_data,
+			rsrc_rdi_data->mem_base +
+			rsrc_rdi_data->rdi_reg->reg_update_cmd);
+		in_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+		/* Enable the irq */
+		if (in_res->rdi_only_ctx)
+			cam_tfe_irq_config(core_info,
+				rsrc_rdi_data->reg_data->subscribe_irq_mask,
+				CAM_TFE_TOP_IRQ_REG_NUM, true);
+
+		CAM_DBG(CAM_ISP, "TFE:%d Start RDI %d", core_info->core_index,
+			in_res->res_id - CAM_ISP_HW_TFE_IN_RDI0);
+	}
+
+	core_info->irq_err_config_cnt++;
+	if (core_info->irq_err_config_cnt == 1)
+		cam_tfe_irq_config(core_info,
+			core_info->tfe_hw_info->error_irq_mask,
+			CAM_TFE_TOP_IRQ_REG_NUM, true);
+
+end:
+	return rc;
+}
+
+int cam_tfe_top_stop(struct cam_tfe_hw_core_info *core_info,
+	void *stop_args, uint32_t arg_size)
+{
+	struct cam_tfe_top_priv                 *top_priv;
+	struct cam_isp_resource_node            *in_res;
+	struct cam_hw_info                      *hw_info = NULL;
+	struct cam_tfe_camif_data               *camif_data;
+	struct cam_tfe_rdi_data                 *rsrc_rdi_data;
+	uint32_t val = 0;
+	int i, rc = 0;
+
+	if (!stop_args) {
+		CAM_ERR(CAM_ISP, "TFE:%d Error Invalid input arguments",
+			core_info->core_index);
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_tfe_top_priv   *)core_info->top_priv;
+	in_res = (struct cam_isp_resource_node *)stop_args;
+	hw_info = (struct cam_hw_info  *)in_res->hw_intf->hw_priv;
+
+	if (in_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED ||
+		in_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)
+		return 0;
+
+	if (in_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) {
+		camif_data = (struct cam_tfe_camif_data *)in_res->res_priv;
+
+		cam_io_w_mb(0, camif_data->mem_base +
+			camif_data->camif_reg->module_cfg);
+
+		cam_tfe_irq_config(core_info,
+			camif_data->reg_data->subscribe_irq_mask,
+			CAM_TFE_TOP_IRQ_REG_NUM, false);
+
+		if (in_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+			in_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+		val = cam_io_r_mb(camif_data->mem_base +
+				camif_data->common_reg->diag_config);
+		if (val & camif_data->reg_data->enable_diagnostic_hw) {
+			val &= ~camif_data->reg_data->enable_diagnostic_hw;
+			cam_io_w_mb(val, camif_data->mem_base +
+				camif_data->common_reg->diag_config);
+		}
+	}  else if ((in_res->res_id >= CAM_ISP_HW_TFE_IN_RDI0) &&
+		(in_res->res_id <= CAM_ISP_HW_TFE_IN_RDI2)) {
+		rsrc_rdi_data = (struct cam_tfe_rdi_data *) in_res->res_priv;
+		cam_io_w_mb(0x0, rsrc_rdi_data->mem_base +
+			rsrc_rdi_data->rdi_reg->rdi_module_config);
+
+		if (in_res->rdi_only_ctx)
+			cam_tfe_irq_config(core_info,
+				rsrc_rdi_data->reg_data->subscribe_irq_mask,
+				CAM_TFE_TOP_IRQ_REG_NUM, false);
+
+		if (in_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+			in_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	} else {
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid res id:%d",
+			core_info->core_index, in_res->res_id);
+		return -EINVAL;
+	}
+
+	if (!rc) {
+		for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+			if (top_priv->in_rsrc[i].res_id == in_res->res_id) {
+				top_priv->req_clk_rate[i] = 0;
+				memset(&top_priv->req_axi_vote[i], 0,
+					sizeof(struct cam_axi_vote));
+				top_priv->axi_vote_control[i] =
+					CAM_TFE_BW_CONTROL_EXCLUDE;
+				break;
+			}
+		}
+	}
+
+	core_info->irq_err_config_cnt--;
+	if (!core_info->irq_err_config_cnt)
+		cam_tfe_irq_config(core_info,
+			core_info->tfe_hw_info->error_irq_mask,
+			CAM_TFE_TOP_IRQ_REG_NUM, false);
+
+	return rc;
+}
+
+int cam_tfe_top_init(
+	struct cam_hw_soc_info                 *soc_info,
+	struct cam_hw_intf                     *hw_intf,
+	void                                   *top_hw_info,
+	struct cam_tfe_hw_core_info            *core_info)
+{
+	struct cam_tfe_top_priv           *top_priv = NULL;
+	struct cam_tfe_top_hw_info        *hw_info = top_hw_info;
+	struct cam_tfe_soc_private        *soc_private = NULL;
+	struct cam_tfe_camif_data         *camif_priv = NULL;
+	struct cam_tfe_rdi_data           *rdi_priv = NULL;
+	int i, j, rc = 0;
+
+	top_priv = kzalloc(sizeof(struct cam_tfe_top_priv),
+		GFP_KERNEL);
+	if (!top_priv) {
+		CAM_DBG(CAM_ISP, "TFE:%DError Failed to alloc for tfe_top_priv",
+			core_info->core_index);
+		rc = -ENOMEM;
+		goto end;
+	}
+	core_info->top_priv = top_priv;
+
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "TFE:%d Error soc_private NULL",
+			core_info->core_index);
+		rc = -ENODEV;
+		goto free_tfe_top_priv;
+	}
+
+	top_priv->hw_clk_rate = 0;
+	memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+		(CAM_TFE_TOP_IN_PORT_MAX *
+		CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+	top_priv->last_counter = 0;
+
+	for (i = 0, j = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		top_priv->in_rsrc[i].res_type = CAM_ISP_RESOURCE_TFE_IN;
+		top_priv->in_rsrc[i].hw_intf = hw_intf;
+		top_priv->in_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		top_priv->req_clk_rate[i] = 0;
+		memset(&top_priv->req_axi_vote[i], 0,
+			sizeof(struct cam_axi_vote));
+		top_priv->axi_vote_control[i] =
+			CAM_TFE_BW_CONTROL_EXCLUDE;
+
+		if (hw_info->in_port[i] == CAM_TFE_CAMIF_VER_1_0) {
+			top_priv->in_rsrc[i].res_id =
+				CAM_ISP_HW_TFE_IN_CAMIF;
+
+			camif_priv = kzalloc(sizeof(struct cam_tfe_camif_data),
+				GFP_KERNEL);
+			if (!camif_priv) {
+				CAM_DBG(CAM_ISP,
+					"TFE:%dError Failed to alloc for camif_priv",
+					core_info->core_index);
+				goto free_tfe_top_priv;
+			}
+
+			top_priv->in_rsrc[i].res_priv = camif_priv;
+
+			camif_priv->mem_base    =
+				soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base;
+			camif_priv->camif_reg   =
+				hw_info->camif_hw_info.camif_reg;
+			camif_priv->common_reg  = hw_info->common_reg;
+			camif_priv->reg_data    =
+				hw_info->camif_hw_info.reg_data;
+			camif_priv->hw_intf     = hw_intf;
+			camif_priv->soc_info    = soc_info;
+
+		} else if (hw_info->in_port[i] ==
+			CAM_TFE_RDI_VER_1_0) {
+			top_priv->in_rsrc[i].res_id =
+				CAM_ISP_HW_TFE_IN_RDI0 + j;
+
+			rdi_priv = kzalloc(sizeof(struct cam_tfe_rdi_data),
+					GFP_KERNEL);
+			if (!rdi_priv) {
+				CAM_DBG(CAM_ISP,
+					"TFE:%d Error Failed to alloc for rdi_priv",
+					core_info->core_index);
+				goto deinit_resources;
+			}
+
+			top_priv->in_rsrc[i].res_priv = rdi_priv;
+
+			rdi_priv->mem_base   =
+				soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base;
+			rdi_priv->hw_intf    = hw_intf;
+			rdi_priv->common_reg = hw_info->common_reg;
+			rdi_priv->rdi_reg    =
+				hw_info->rdi_hw_info[j].rdi_reg;
+			rdi_priv->reg_data =
+				hw_info->rdi_hw_info[j++].reg_data;
+		}  else {
+			CAM_WARN(CAM_ISP, "TFE:%d Invalid inport type: %u",
+				core_info->core_index, hw_info->in_port[i]);
+		}
+	}
+
+	top_priv->common_data.soc_info     = soc_info;
+	top_priv->common_data.hw_intf      = hw_intf;
+	top_priv->common_data.common_reg   = hw_info->common_reg;
+	top_priv->common_data.reg_dump_data = &hw_info->reg_dump_data;
+
+	return rc;
+
+deinit_resources:
+	for (--i; i >= 0; i--) {
+
+		top_priv->in_rsrc[i].start = NULL;
+		top_priv->in_rsrc[i].stop  = NULL;
+		top_priv->in_rsrc[i].process_cmd = NULL;
+		top_priv->in_rsrc[i].top_half_handler = NULL;
+		top_priv->in_rsrc[i].bottom_half_handler = NULL;
+
+		if (!top_priv->in_rsrc[i].res_priv)
+			continue;
+
+		kfree(top_priv->in_rsrc[i].res_priv);
+		top_priv->in_rsrc[i].res_priv = NULL;
+		top_priv->in_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+	}
+free_tfe_top_priv:
+	kfree(core_info->top_priv);
+	core_info->top_priv = NULL;
+end:
+	return rc;
+}
+
+
+int cam_tfe_top_deinit(struct cam_tfe_top_priv  *top_priv)
+{
+	int i, rc = 0;
+
+	if (!top_priv) {
+		CAM_ERR(CAM_ISP, "Error Invalid input");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
+		top_priv->in_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+
+		top_priv->in_rsrc[i].start = NULL;
+		top_priv->in_rsrc[i].stop  = NULL;
+		top_priv->in_rsrc[i].process_cmd = NULL;
+		top_priv->in_rsrc[i].top_half_handler = NULL;
+		top_priv->in_rsrc[i].bottom_half_handler = NULL;
+
+		if (!top_priv->in_rsrc[i].res_priv) {
+			CAM_ERR(CAM_ISP, "Error res_priv is NULL");
+			return -ENODEV;
+		}
+
+		kfree(top_priv->in_rsrc[i].res_priv);
+		top_priv->in_rsrc[i].res_priv = NULL;
+	}
+
+	return rc;
+}
+
+int cam_tfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *tfe_hw  = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	struct cam_tfe_top_priv           *top_priv  = NULL;
+	struct cam_tfe_hw_info            *hw_info = NULL;
+	void __iomem                      *mem_base;
+	uint32_t *reset_reg_args = reset_core_args;
+	uint32_t i, reset_reg_val, irq_status[3];
+	int rc;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	soc_info = &tfe_hw->soc_info;
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+	top_priv = core_info->top_priv;
+	hw_info = core_info->tfe_hw_info;
+	mem_base = tfe_hw->soc_info.reg_map[TFE_CORE_BASE_IDX].mem_base;
+
+	for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++)
+		irq_status[i] = cam_io_r(mem_base +
+			core_info->tfe_hw_info->top_irq_status[i]);
+
+	for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++)
+		cam_io_w(irq_status[i], mem_base +
+			core_info->tfe_hw_info->top_irq_clear[i]);
+
+	cam_io_w_mb(core_info->tfe_hw_info->global_clear_bitmask,
+		mem_base + core_info->tfe_hw_info->top_irq_cmd);
+
+	/* Mask all irq registers */
+	for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++)
+		cam_io_w(0, mem_base +
+			core_info->tfe_hw_info->top_irq_mask[i]);
+
+	cam_tfe_irq_config(core_info, hw_info->reset_irq_mask,
+		CAM_TFE_TOP_IRQ_REG_NUM, true);
+
+	reinit_completion(&core_info->reset_complete);
+
+	CAM_DBG(CAM_ISP, "calling RESET on tfe %d", soc_info->index);
+
+	switch (*reset_reg_args) {
+	case CAM_TFE_HW_RESET_HW_AND_REG:
+		reset_reg_val = CAM_TFE_HW_RESET_HW_AND_REG_VAL;
+		break;
+	default:
+		reset_reg_val = CAM_TFE_HW_RESET_HW_VAL;
+		break;
+	}
+
+	cam_io_w_mb(reset_reg_val, mem_base +
+		top_priv->common_data.common_reg->global_reset_cmd);
+
+	CAM_DBG(CAM_ISP, "TFE:%d waiting for tfe reset complete",
+		core_info->core_index);
+	/* Wait for Completion or Timeout of 500ms */
+	rc = wait_for_completion_timeout(&core_info->reset_complete, 500);
+	if (rc <= 0) {
+		CAM_ERR(CAM_ISP, "TFE:%d Error Reset Timeout",
+			core_info->core_index);
+		rc = -ETIMEDOUT;
+	} else {
+		rc = 0;
+		CAM_DBG(CAM_ISP, "TFE:%d reset complete done (%d)",
+			core_info->core_index, rc);
+	}
+
+	CAM_DBG(CAM_ISP, "TFE:%d reset complete done (%d)",
+		core_info->core_index, rc);
+
+	cam_tfe_irq_config(core_info, hw_info->reset_irq_mask,
+		CAM_TFE_TOP_IRQ_REG_NUM, false);
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+int cam_tfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *tfe_hw = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	struct cam_tfe_top_priv           *top_priv;
+	void __iomem                      *mem_base;
+	int rc = 0;
+	uint32_t                           reset_core_args =
+					CAM_TFE_HW_RESET_HW_AND_REG;
+
+	CAM_DBG(CAM_ISP, "Enter");
+	if (!hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	soc_info = &tfe_hw->soc_info;
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+	top_priv = (struct cam_tfe_top_priv *)core_info->top_priv;
+
+	mutex_lock(&tfe_hw->hw_mutex);
+	tfe_hw->open_count++;
+	if (tfe_hw->open_count > 1) {
+		mutex_unlock(&tfe_hw->hw_mutex);
+		CAM_DBG(CAM_ISP, "TFE:%d has already been initialized cnt %d",
+			core_info->core_index, tfe_hw->open_count);
+		return 0;
+	}
+	mutex_unlock(&tfe_hw->hw_mutex);
+
+	/* Turn ON Regulators, Clocks and other SOC resources */
+	rc = cam_tfe_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Enable SOC failed");
+		rc = -EFAULT;
+		goto decrement_open_cnt;
+	}
+	tfe_hw->hw_state = CAM_HW_STATE_POWER_UP;
+
+	mem_base = tfe_hw->soc_info.reg_map[TFE_CORE_BASE_IDX].mem_base;
+	CAM_DBG(CAM_ISP, "TFE:%d Enable soc done", core_info->core_index);
+
+	/* Do HW Reset */
+	rc = cam_tfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t));
+	if (rc) {
+		CAM_ERR(CAM_ISP, "TFE:%d Reset Failed rc=%d",
+			core_info->core_index, rc);
+		goto disable_soc;
+	}
+
+	top_priv->hw_clk_rate = 0;
+	core_info->irq_err_config_cnt = 0;
+	core_info->irq_err_config = false;
+	rc = core_info->tfe_bus->hw_ops.init(core_info->tfe_bus->bus_priv,
+		NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "TFE:%d Top HW init Failed rc=%d",
+			core_info->core_index, rc);
+		goto disable_soc;
+	}
+
+	return rc;
+
+disable_soc:
+	cam_tfe_disable_soc_resources(soc_info);
+	tfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+
+decrement_open_cnt:
+	mutex_lock(&tfe_hw->hw_mutex);
+	tfe_hw->open_count--;
+	mutex_unlock(&tfe_hw->hw_mutex);
+	return rc;
+}
+
+int cam_tfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *tfe_hw = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	int rc = 0;
+	uint32_t                           reset_core_args =
+					CAM_TFE_HW_RESET_HW_AND_REG;
+
+	CAM_DBG(CAM_ISP, "Enter");
+	if (!hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	soc_info = &tfe_hw->soc_info;
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+
+	mutex_lock(&tfe_hw->hw_mutex);
+	if (!tfe_hw->open_count) {
+		mutex_unlock(&tfe_hw->hw_mutex);
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "TFE:%d Error Unbalanced deinit",
+			core_info->core_index);
+		return -EFAULT;
+	}
+	tfe_hw->open_count--;
+	if (tfe_hw->open_count) {
+		mutex_unlock(&tfe_hw->hw_mutex);
+		CAM_DBG(CAM_ISP, "TFE:%d open_cnt non-zero =%d",
+			core_info->core_index, tfe_hw->open_count);
+		return 0;
+	}
+	mutex_unlock(&tfe_hw->hw_mutex);
+
+	rc = core_info->tfe_bus->hw_ops.deinit(core_info->tfe_bus->bus_priv,
+		NULL, 0);
+	if (rc)
+		CAM_ERR(CAM_ISP, "TFE:%d Bus HW deinit Failed rc=%d",
+			core_info->core_index, rc);
+
+	rc = cam_tfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t));
+
+	/* Turn OFF Regulators, Clocks and other SOC resources */
+	CAM_DBG(CAM_ISP, "TFE:%d Disable SOC resource", core_info->core_index);
+	rc = cam_tfe_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_ISP, " TFE:%d Disable SOC failed",
+			core_info->core_index);
+
+	tfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+int cam_tfe_reserve(void *hw_priv, void *reserve_args, uint32_t arg_size)
+{
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *tfe_hw  = hw_priv;
+	struct cam_tfe_acquire_args       *acquire;
+	int rc = -ENODEV;
+
+	if (!hw_priv || !reserve_args || (arg_size !=
+		sizeof(struct cam_tfe_acquire_args))) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+	acquire = (struct cam_tfe_acquire_args   *)reserve_args;
+
+	CAM_DBG(CAM_ISP, "TFE:%d acquire res type: %d",
+		core_info->core_index, acquire->rsrc_type);
+	mutex_lock(&tfe_hw->hw_mutex);
+	if (acquire->rsrc_type == CAM_ISP_RESOURCE_TFE_IN) {
+		rc = cam_tfe_top_reserve(core_info->top_priv,
+		reserve_args, arg_size);
+	} else if (acquire->rsrc_type == CAM_ISP_RESOURCE_TFE_OUT) {
+		rc = core_info->tfe_bus->hw_ops.reserve(
+			core_info->tfe_bus->bus_priv, acquire,
+			sizeof(*acquire));
+	} else {
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid res type:%d",
+			core_info->core_index, acquire->rsrc_type);
+	}
+
+	mutex_unlock(&tfe_hw->hw_mutex);
+
+	return rc;
+}
+
+
+int cam_tfe_release(void *hw_priv, void *release_args, uint32_t arg_size)
+{
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *tfe_hw  = hw_priv;
+	struct cam_isp_resource_node      *isp_res;
+	int rc = -ENODEV;
+
+	if (!hw_priv || !release_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+	isp_res = (struct cam_isp_resource_node      *) release_args;
+
+	mutex_lock(&tfe_hw->hw_mutex);
+	if (isp_res->res_type == CAM_ISP_RESOURCE_TFE_IN)
+		rc = cam_tfe_top_release(core_info->top_priv, isp_res,
+			sizeof(*isp_res));
+	else if (isp_res->res_type == CAM_ISP_RESOURCE_TFE_OUT) {
+		rc = core_info->tfe_bus->hw_ops.release(
+			core_info->tfe_bus->bus_priv, isp_res,
+			sizeof(*isp_res));
+	} else {
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid res type:%d",
+			core_info->core_index, isp_res->res_type);
+	}
+
+	mutex_unlock(&tfe_hw->hw_mutex);
+
+	return rc;
+}
+
+int cam_tfe_start(void *hw_priv, void *start_args, uint32_t arg_size)
+{
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *tfe_hw  = hw_priv;
+	struct cam_isp_resource_node      *start_res;
+
+	int rc = 0;
+
+	if (!hw_priv || !start_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+	start_res = (struct cam_isp_resource_node  *)start_args;
+	core_info->tasklet_info = start_res->tasklet_info;
+
+	mutex_lock(&tfe_hw->hw_mutex);
+	if (start_res->res_type == CAM_ISP_RESOURCE_TFE_IN) {
+		rc = cam_tfe_top_start(core_info, start_args,
+			arg_size);
+		if (rc)
+			CAM_ERR(CAM_ISP, "TFE:%d Start failed. type:%d",
+				core_info->core_index, start_res->res_type);
+
+	} else if (start_res->res_type == CAM_ISP_RESOURCE_TFE_OUT) {
+		rc = core_info->tfe_bus->hw_ops.start(start_res, NULL, 0);
+	} else {
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid res type:%d",
+			core_info->core_index, start_res->res_type);
+		rc = -EFAULT;
+	}
+
+	mutex_unlock(&tfe_hw->hw_mutex);
+
+	return rc;
+}
+
+int cam_tfe_stop(void *hw_priv, void *stop_args, uint32_t arg_size)
+{
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *tfe_hw  = hw_priv;
+	struct cam_isp_resource_node      *isp_res;
+	int rc = -EINVAL;
+
+	if (!hw_priv || !stop_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+	isp_res = (struct cam_isp_resource_node  *)stop_args;
+
+	mutex_lock(&tfe_hw->hw_mutex);
+	if (isp_res->res_type == CAM_ISP_RESOURCE_TFE_IN) {
+		rc = cam_tfe_top_stop(core_info, isp_res,
+			sizeof(struct cam_isp_resource_node));
+	} else if (isp_res->res_type == CAM_ISP_RESOURCE_TFE_OUT) {
+		rc = core_info->tfe_bus->hw_ops.stop(isp_res, NULL, 0);
+	} else {
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid res type:%d",
+			core_info->core_index, isp_res->res_type);
+	}
+
+	CAM_DBG(CAM_ISP, "TFE:%d stopped res type:%d res id:%d res_state:%d ",
+		core_info->core_index, isp_res->res_type,
+		isp_res->res_id, isp_res->res_state);
+
+	mutex_unlock(&tfe_hw->hw_mutex);
+
+	return rc;
+}
+
+int cam_tfe_read(void *hw_priv, void *read_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_tfe_write(void *hw_priv, void *write_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_tfe_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *tfe_hw = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	struct cam_tfe_hw_info            *hw_info = NULL;
+	int rc = 0;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	soc_info = &tfe_hw->soc_info;
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+	hw_info = core_info->tfe_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
+		rc = cam_tfe_top_get_base(core_info->top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+		rc = cam_tfe_top_get_reg_update(core_info->top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+		rc = cam_tfe_top_clock_update(core_info->top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_BW_UPDATE_V2:
+		rc = cam_tfe_top_bw_update(core_info->top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_BW_CONTROL:
+		rc = cam_tfe_top_bw_control(core_info->top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_REG_DUMP:
+		rc = cam_tfe_top_get_reg_dump(core_info->top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP:
+		rc = cam_tfe_camif_irq_reg_dump(core_info, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA:
+		*((struct cam_hw_soc_info **)cmd_args) = soc_info;
+		break;
+	case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+	case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
+	case CAM_ISP_HW_CMD_STRIPE_UPDATE:
+	case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ:
+	case CAM_ISP_HW_CMD_GET_SECURE_MODE:
+		rc = core_info->tfe_bus->hw_ops.process_cmd(
+			core_info->tfe_bus->bus_priv, cmd_type, cmd_args,
+			arg_size);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "TFE:%d Invalid cmd type:%d",
+			core_info->core_index, cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+int cam_tfe_core_init(struct cam_tfe_hw_core_info  *core_info,
+	struct cam_hw_soc_info                     *soc_info,
+	struct cam_hw_intf                         *hw_intf,
+	struct cam_tfe_hw_info                     *tfe_hw_info)
+{
+	int rc = -EINVAL;
+	int i;
+
+	rc = cam_tfe_top_init(soc_info, hw_intf, tfe_hw_info->top_hw_info,
+		core_info);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "TFE:%d Error cam_tfe_top_init failed",
+			core_info->core_index);
+		goto end;
+	}
+
+	rc = cam_tfe_bus_init(soc_info, hw_intf,
+		tfe_hw_info->bus_hw_info, core_info,
+		&core_info->tfe_bus);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "TFE:%d Error cam_tfe_bus_init failed",
+			core_info->core_index);
+		goto deinit_top;
+	}
+
+	INIT_LIST_HEAD(&core_info->free_payload_list);
+	for (i = 0; i < CAM_TFE_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&core_info->evt_payload[i].list);
+		list_add_tail(&core_info->evt_payload[i].list,
+			&core_info->free_payload_list);
+	}
+
+	core_info->irq_err_config = false;
+	core_info->irq_err_config_cnt = 0;
+	spin_lock_init(&core_info->spin_lock);
+	init_completion(&core_info->reset_complete);
+
+	return rc;
+
+deinit_top:
+	cam_tfe_top_deinit(core_info->top_priv);
+
+end:
+	return rc;
+}
+
+int cam_tfe_core_deinit(struct cam_tfe_hw_core_info  *core_info,
+	struct cam_tfe_hw_info                       *tfe_hw_info)
+{
+	int                rc = -EINVAL;
+	int                i;
+	unsigned long      flags;
+
+	spin_lock_irqsave(&core_info->spin_lock, flags);
+
+	INIT_LIST_HEAD(&core_info->free_payload_list);
+	for (i = 0; i < CAM_TFE_EVT_MAX; i++)
+		INIT_LIST_HEAD(&core_info->evt_payload[i].list);
+
+	rc = cam_tfe_bus_deinit(&core_info->tfe_bus);
+	if (rc)
+		CAM_ERR(CAM_ISP, "TFE:%d Error cam_tfe_bus_deinit failed rc=%d",
+			core_info->core_index, rc);
+
+	rc = cam_tfe_top_deinit(core_info->top_priv);
+	kfree(core_info->top_priv);
+	core_info->top_priv = NULL;
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "Error cam_tfe_top_deinit failed rc=%d", rc);
+
+	spin_unlock_irqrestore(&core_info->spin_lock, flags);
+
+	return rc;
+}

+ 272 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.h

@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef _CAM_TFE_CORE_H_
+#define _CAM_TFE_CORE_H_
+
+#include <linux/spinlock.h>
+#include "cam_hw_intf.h"
+#include "cam_tfe_bus.h"
+#include "cam_tfe_hw_intf.h"
+#include "cam_tfe_irq.h"
+
+#define CAM_TFE_CAMIF_VER_1_0        0x10
+#define CAM_TFE_RDI_VER_1_0          0x1000
+#define CAM_TFE_TOP_1_0              0x1000
+#define CAM_TFE_TOP_IN_PORT_MAX      4
+#define CAM_TFE_RDI_MAX              4
+
+#define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS      BIT(0)
+#define CAM_TFE_EVT_MAX                            256
+
+#define CAM_TFE_MAX_REG_DUMP_ENTRIES  20
+#define CAM_TFE_MAX_LUT_DUMP_ENTRIES  10
+
+enum cam_tfe_lut_word_size {
+	CAM_TFE_LUT_WORD_SIZE_32,
+	CAM_TFE_LUT_WORD_SIZE_64,
+	CAM_TFE_LUT_WORD_SIZE_MAX,
+};
+
+struct cam_tfe_reg_dump_entry {
+	uint32_t     start_offset;
+	uint32_t     end_offset;
+};
+
+struct cam_tfe_lut_dump_entry {
+	enum cam_tfe_lut_word_size  lut_word_size;
+	uint32_t                    lut_bank_sel;
+	uint32_t                    lut_addr_size;
+	uint32_t                    dmi_reg_offset;
+};
+struct cam_tfe_reg_dump_data {
+	uint32_t     num_reg_dump_entries;
+	uint32_t     num_lut_dump_entries;
+	uint32_t     bus_start_addr;
+	uint32_t     bus_write_top_end_addr;
+	uint32_t     bus_client_start_addr;
+	uint32_t     bus_client_offset;
+	uint32_t     num_bus_clients;
+	struct cam_tfe_reg_dump_entry
+		reg_entry[CAM_TFE_MAX_REG_DUMP_ENTRIES];
+	struct cam_tfe_lut_dump_entry
+		lut_entry[CAM_TFE_MAX_LUT_DUMP_ENTRIES];
+};
+
+struct cam_tfe_top_reg_offset_common {
+	uint32_t hw_version;
+	uint32_t hw_capability;
+	uint32_t lens_feature;
+	uint32_t stats_feature;
+	uint32_t zoom_feature;
+	uint32_t global_reset_cmd;
+	uint32_t core_cgc_ctrl;
+	uint32_t ahb_cgc_ctrl;
+	uint32_t core_cfg_0;
+	uint32_t core_cfg_1;
+	uint32_t reg_update_cmd;
+	uint32_t diag_config;
+	uint32_t diag_sensor_status_0;
+	uint32_t diag_sensor_status_1;
+	uint32_t diag_sensor_frame_cnt_status;
+	uint32_t violation_status;
+	uint32_t stats_throttle_cnt_cfg_0;
+	uint32_t stats_throttle_cnt_cfg_1;
+	uint32_t debug_0;
+	uint32_t debug_1;
+	uint32_t debug_2;
+	uint32_t debug_3;
+	uint32_t debug_cfg;
+	uint32_t perf_cnt_cfg;
+	uint32_t perf_pixel_count;
+	uint32_t perf_line_count;
+	uint32_t perf_stall_count;
+	uint32_t perf_always_count;
+	uint32_t perf_count_status;
+};
+
+struct cam_tfe_camif_reg {
+	uint32_t     hw_version;
+	uint32_t     hw_status;
+	uint32_t     module_cfg;
+	uint32_t     pdaf_raw_crop_width_cfg;
+	uint32_t     pdaf_raw_crop_height_cfg;
+	uint32_t     line_skip_pattern;
+	uint32_t     pixel_skip_pattern;
+	uint32_t     period_cfg;
+	uint32_t     irq_subsample_pattern;
+	uint32_t     epoch_irq_cfg;
+	uint32_t     debug_1;
+	uint32_t     debug_0;
+	uint32_t     test_bus_ctrl;
+	uint32_t     spare;
+	uint32_t     reg_update_cmd;
+};
+
+struct cam_tfe_camif_reg_data {
+	uint32_t     extern_reg_update_mask;
+	uint32_t     dual_tfe_pix_en_shift;
+	uint32_t     extern_reg_update_shift;
+	uint32_t     camif_pd_rdi2_src_sel_shift;
+	uint32_t     dual_tfe_sync_sel_shift;
+
+	uint32_t     pixel_pattern_shift;
+	uint32_t     pixel_pattern_mask;
+	uint32_t     module_enable_shift;
+	uint32_t     pix_out_enable_shift;
+	uint32_t     pdaf_output_enable_shift;
+
+	uint32_t     dsp_mode_shift;
+	uint32_t     dsp_mode_mask;
+	uint32_t     dsp_en_shift;
+	uint32_t     dsp_en_mask;
+
+	uint32_t     reg_update_cmd_data;
+	uint32_t     epoch_line_cfg;
+	uint32_t     sof_irq_mask;
+	uint32_t     epoch0_irq_mask;
+	uint32_t     epoch1_irq_mask;
+	uint32_t     eof_irq_mask;
+	uint32_t     reg_update_irq_mask;
+	uint32_t     error_irq_mask0;
+	uint32_t     error_irq_mask2;
+	uint32_t     subscribe_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
+
+	uint32_t     enable_diagnostic_hw;
+	uint32_t     perf_cnt_start_cmd_shift;
+	uint32_t     perf_cnt_continuous_shift;
+	uint32_t     perf_client_sel_shift;
+	uint32_t     perf_window_start_shift;
+	uint32_t     perf_window_end_shift;
+};
+
+struct cam_tfe_camif_hw_info {
+	struct cam_tfe_camif_reg      *camif_reg;
+	struct cam_tfe_camif_reg_data *reg_data;
+};
+
+struct cam_tfe_rdi_reg {
+	uint32_t     rdi_hw_version;
+	uint32_t     rdi_hw_status;
+	uint32_t     rdi_module_config;
+	uint32_t     rdi_skip_period;
+	uint32_t     rdi_irq_subsample_pattern;
+	uint32_t     rdi_epoch_irq;
+	uint32_t     rdi_debug_1;
+	uint32_t     rdi_debug_0;
+	uint32_t     rdi_test_bus_ctrl;
+	uint32_t     rdi_spare;
+	uint32_t     reg_update_cmd;
+};
+
+struct cam_tfe_rdi_reg_data {
+	uint32_t     reg_update_cmd_data;
+	uint32_t     epoch_line_cfg;
+
+	uint32_t     pixel_pattern_shift;
+	uint32_t     pixel_pattern_mask;
+	uint32_t     rdi_out_enable_shift;
+
+	uint32_t     sof_irq_mask;
+	uint32_t     epoch0_irq_mask;
+	uint32_t     epoch1_irq_mask;
+	uint32_t     eof_irq_mask;
+	uint32_t     error_irq_mask0;
+	uint32_t     error_irq_mask2;
+	uint32_t     subscribe_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
+	uint32_t     enable_diagnostic_hw;
+};
+
+struct cam_tfe_rdi_hw_info {
+	struct cam_tfe_rdi_reg              *rdi_reg;
+	struct cam_tfe_rdi_reg_data         *reg_data;
+};
+
+struct cam_tfe_top_hw_info {
+	struct cam_tfe_top_reg_offset_common  *common_reg;
+	struct cam_tfe_camif_hw_info           camif_hw_info;
+	struct cam_tfe_rdi_hw_info             rdi_hw_info[CAM_TFE_RDI_MAX];
+	uint32_t in_port[CAM_TFE_TOP_IN_PORT_MAX];
+	struct cam_tfe_reg_dump_data           reg_dump_data;
+};
+
+struct cam_tfe_hw_info {
+	uint32_t                 top_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
+	uint32_t                 top_irq_clear[CAM_TFE_TOP_IRQ_REG_NUM];
+	uint32_t                 top_irq_status[CAM_TFE_TOP_IRQ_REG_NUM];
+	uint32_t                 top_irq_cmd;
+	uint32_t                 global_clear_bitmask;
+
+	uint32_t                 bus_irq_mask[CAM_TFE_BUS_MAX_IRQ_REGISTERS];
+	uint32_t                 bus_irq_clear[CAM_TFE_BUS_MAX_IRQ_REGISTERS];
+	uint32_t                 bus_irq_status[CAM_TFE_BUS_MAX_IRQ_REGISTERS];
+	uint32_t                 bus_irq_cmd;
+
+	uint32_t                 bus_violation_reg;
+	uint32_t                 bus_overflow_reg;
+	uint32_t                 bus_image_size_vilation_reg;
+	uint32_t                 bus_overflow_clear_cmd;
+	uint32_t                 debug_status_top;
+
+	uint32_t                 reset_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
+	uint32_t                 error_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
+	uint32_t                 bus_reg_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
+
+	uint32_t                 top_version;
+	void                    *top_hw_info;
+
+	uint32_t                 bus_version;
+	void                    *bus_hw_info;
+};
+
+struct cam_tfe_hw_core_info {
+	uint32_t                            core_index;
+	struct cam_tfe_hw_info             *tfe_hw_info;
+	void                               *top_priv;
+	struct cam_tfe_bus                 *tfe_bus;
+	void                               *tasklet_info;
+	struct cam_tfe_irq_evt_payload  evt_payload[CAM_TFE_EVT_MAX];
+	struct list_head                    free_payload_list;
+	bool                                irq_err_config;
+	uint32_t                            irq_err_config_cnt;
+	spinlock_t                          spin_lock;
+	struct completion                   reset_complete;
+};
+
+int cam_tfe_get_hw_caps(void *device_priv,
+	void *get_hw_cap_args, uint32_t arg_size);
+int cam_tfe_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_tfe_deinit_hw(void *hw_priv,
+	void *deinit_hw_args, uint32_t arg_size);
+int cam_tfe_reset(void *device_priv,
+	void *reset_core_args, uint32_t arg_size);
+int cam_tfe_reserve(void *device_priv,
+	void *reserve_args, uint32_t arg_size);
+int cam_tfe_release(void *device_priv,
+	void *reserve_args, uint32_t arg_size);
+int cam_tfe_start(void *device_priv,
+	void *start_args, uint32_t arg_size);
+int cam_tfe_stop(void *device_priv,
+	void *stop_args, uint32_t arg_size);
+int cam_tfe_read(void *device_priv,
+	void *read_args, uint32_t arg_size);
+int cam_tfe_write(void *device_priv,
+	void *write_args, uint32_t arg_size);
+int cam_tfe_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_tfe_irq(int irq_num, void *data);
+
+int cam_tfe_core_init(struct cam_tfe_hw_core_info *core_info,
+	struct cam_hw_soc_info             *soc_info,
+	struct cam_hw_intf                 *hw_intf,
+	struct cam_tfe_hw_info             *tfe_hw_info);
+
+int cam_tfe_core_deinit(struct cam_tfe_hw_core_info *core_info,
+	struct cam_tfe_hw_info             *tfe_hw_info);
+
+#endif /* _CAM_TFE_CORE_H_ */

+ 197 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.c

@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_tfe_dev.h"
+#include "cam_tfe_core.h"
+#include "cam_tfe_soc.h"
+#include "cam_debug_util.h"
+
+static struct cam_hw_intf *cam_tfe_hw_list[CAM_TFE_HW_NUM_MAX] = {0, 0, 0};
+
+static char tfe_dev_name[8];
+
+int cam_tfe_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info                *tfe_hw = NULL;
+	struct cam_hw_intf                *tfe_hw_intf = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	struct cam_tfe_hw_info            *hw_info = NULL;
+	int                                rc = 0;
+
+	tfe_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!tfe_hw_intf) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &tfe_hw_intf->hw_idx);
+
+	tfe_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!tfe_hw) {
+		rc = -ENOMEM;
+		goto free_tfe_hw_intf;
+	}
+
+	memset(tfe_dev_name, 0, sizeof(tfe_dev_name));
+	snprintf(tfe_dev_name, sizeof(tfe_dev_name),
+		"tfe%1u", tfe_hw_intf->hw_idx);
+
+	tfe_hw->soc_info.pdev = pdev;
+	tfe_hw->soc_info.dev = &pdev->dev;
+	tfe_hw->soc_info.dev_name = tfe_dev_name;
+	tfe_hw_intf->hw_priv = tfe_hw;
+	tfe_hw_intf->hw_ops.get_hw_caps = cam_tfe_get_hw_caps;
+	tfe_hw_intf->hw_ops.init = cam_tfe_init_hw;
+	tfe_hw_intf->hw_ops.deinit = cam_tfe_deinit_hw;
+	tfe_hw_intf->hw_ops.reset = cam_tfe_reset;
+	tfe_hw_intf->hw_ops.reserve = cam_tfe_reserve;
+	tfe_hw_intf->hw_ops.release = cam_tfe_release;
+	tfe_hw_intf->hw_ops.start = cam_tfe_start;
+	tfe_hw_intf->hw_ops.stop = cam_tfe_stop;
+	tfe_hw_intf->hw_ops.read = cam_tfe_read;
+	tfe_hw_intf->hw_ops.write = cam_tfe_write;
+	tfe_hw_intf->hw_ops.process_cmd = cam_tfe_process_cmd;
+	tfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_TFE;
+
+	CAM_DBG(CAM_ISP, "type %d index %d",
+		tfe_hw_intf->hw_type, tfe_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, tfe_hw_intf);
+
+	tfe_hw->core_info = kzalloc(sizeof(struct cam_tfe_hw_core_info),
+		GFP_KERNEL);
+	if (!tfe_hw->core_info) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for core");
+		rc = -ENOMEM;
+		goto free_tfe_hw;
+	}
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_ISP, "Of_match Failed");
+		rc = -EINVAL;
+		goto free_core_info;
+	}
+	hw_info = (struct cam_tfe_hw_info *)match_dev->data;
+	core_info->tfe_hw_info = hw_info;
+	core_info->core_index = tfe_hw_intf->hw_idx;
+
+	rc = cam_tfe_init_soc_resources(&tfe_hw->soc_info, cam_tfe_irq,
+		tfe_hw);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "Failed to init soc rc=%d", rc);
+		goto free_core_info;
+	}
+
+	rc = cam_tfe_core_init(core_info, &tfe_hw->soc_info,
+		tfe_hw_intf, hw_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "Failed to init core rc=%d", rc);
+		goto deinit_soc;
+	}
+
+	tfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&tfe_hw->hw_mutex);
+	spin_lock_init(&tfe_hw->hw_lock);
+	init_completion(&tfe_hw->hw_complete);
+
+	if (tfe_hw_intf->hw_idx < CAM_TFE_HW_NUM_MAX)
+		cam_tfe_hw_list[tfe_hw_intf->hw_idx] = tfe_hw_intf;
+
+	cam_tfe_init_hw(tfe_hw, NULL, 0);
+	cam_tfe_deinit_hw(tfe_hw, NULL, 0);
+
+	CAM_DBG(CAM_ISP, "TFE%d probe successful", tfe_hw_intf->hw_idx);
+
+	return rc;
+
+deinit_soc:
+	if (cam_tfe_deinit_soc_resources(&tfe_hw->soc_info))
+		CAM_ERR(CAM_ISP, "Failed to deinit soc");
+free_core_info:
+	kfree(tfe_hw->core_info);
+free_tfe_hw:
+	kfree(tfe_hw);
+free_tfe_hw_intf:
+	kfree(tfe_hw_intf);
+end:
+	return rc;
+}
+
+int cam_tfe_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info                *tfe_hw = NULL;
+	struct cam_hw_intf                *tfe_hw_intf = NULL;
+	struct cam_tfe_hw_core_info       *core_info = NULL;
+	int                                rc = 0;
+
+	tfe_hw_intf = platform_get_drvdata(pdev);
+	if (!tfe_hw_intf) {
+		CAM_ERR(CAM_ISP, "Error! No data in pdev");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "type %d index %d",
+		tfe_hw_intf->hw_type, tfe_hw_intf->hw_idx);
+
+	if (tfe_hw_intf->hw_idx < CAM_TFE_HW_NUM_MAX)
+		cam_tfe_hw_list[tfe_hw_intf->hw_idx] = NULL;
+
+	tfe_hw = tfe_hw_intf->hw_priv;
+	if (!tfe_hw) {
+		CAM_ERR(CAM_ISP, "Error! HW data is NULL");
+		rc = -ENODEV;
+		goto free_tfe_hw_intf;
+	}
+
+	core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info;
+	if (!core_info) {
+		CAM_ERR(CAM_ISP, "Error! core data NULL");
+		rc = -EINVAL;
+		goto deinit_soc;
+	}
+
+	rc = cam_tfe_core_deinit(core_info, core_info->tfe_hw_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP, "Failed to deinit core rc=%d", rc);
+
+	kfree(tfe_hw->core_info);
+
+deinit_soc:
+	rc = cam_tfe_deinit_soc_resources(&tfe_hw->soc_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP, "Failed to deinit soc rc=%d", rc);
+
+	mutex_destroy(&tfe_hw->hw_mutex);
+	kfree(tfe_hw);
+
+	CAM_DBG(CAM_ISP, "TFE%d remove successful", tfe_hw_intf->hw_idx);
+
+free_tfe_hw_intf:
+	kfree(tfe_hw_intf);
+
+	return rc;
+}
+
+int cam_tfe_hw_init(struct cam_hw_intf **tfe_hw, uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_tfe_hw_list[hw_idx]) {
+		*tfe_hw = cam_tfe_hw_list[hw_idx];
+		rc = 0;
+	} else {
+		*tfe_hw = NULL;
+		rc = -ENODEV;
+	}
+	return rc;
+}

+ 38 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.h

@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_DEV_H_
+#define _CAM_TFE_DEV_H_
+
+#include <linux/platform_device.h>
+
+/*
+ * cam_tfe_probe()
+ *
+ * @brief:                   Driver probe function called on Boot
+ *
+ * @pdev:                    Platform Device pointer
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_probe(struct platform_device *pdev);
+
+/*
+ * cam_tfe_remove()
+ *
+ * @brief:                   Driver remove function
+ *
+ * @pdev:                    Platform Device pointer
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_remove(struct platform_device *pdev);
+
+int cam_tfe_init_module(void);
+void cam_tfe_exit_module(void);
+
+#endif /* _CAM_TFE_DEV_H_ */

+ 31 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_irq.h

@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_IRQ_H_
+#define _CAM_TFE_IRQ_H_
+
+#include <linux/platform_device.h>
+
+#define CAM_TFE_TOP_IRQ_REG_NUM 3
+
+/*
+ * cam_tfe_irq_config()
+ *
+ * @brief:                   Tfe hw irq configuration
+ *
+ * @tfe_core_data:           tfe core pointer
+ * @irq_mask:                Irq mask for enable interrupts or disable
+ * @num_reg:                 Number irq mask registers
+ * @enable:                  enable = 1, enable the given irq mask interrupts
+ *                           enable = 0 disable the given irq mask interrupts
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_irq_config(void     *tfe_core_data,
+	uint32_t  *irq_mask, uint32_t num_reg, bool enable);
+
+
+#endif /* _CAM_TFE_IRQ_H_ */

+ 240 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.c

@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "cam_cpas_api.h"
+#include "cam_tfe_soc.h"
+#include "cam_debug_util.h"
+
+static bool cam_tfe_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	bool error_handled = false;
+
+	if (!irq_data)
+		return error_handled;
+
+	CAM_DBG(CAM_ISP, "CPSS error type=%d ",
+		irq_data->irq_type);
+
+	return error_handled;
+}
+
+int cam_tfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t tfe_irq_handler, void *irq_data)
+{
+	int                               rc = 0;
+	struct cam_tfe_soc_private       *soc_private;
+	struct cam_cpas_register_params   cpas_register_param;
+
+	soc_private = kzalloc(sizeof(struct cam_tfe_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed");
+		return -ENOMEM;
+	}
+	soc_info->soc_private = soc_private;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! get DT properties failed rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	rc = cam_soc_util_get_option_clk_by_name(soc_info,
+		CAM_TFE_DSP_CLK_NAME, &soc_private->dsp_clk,
+		&soc_private->dsp_clk_index, &soc_private->dsp_clk_rate);
+	if (rc)
+		CAM_WARN(CAM_ISP, "Option clk get failed with rc %d", rc);
+
+	rc = cam_soc_util_request_platform_resource(soc_info, tfe_irq_handler,
+		irq_data);
+
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP,
+			"Error! Request platform resources failed rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "tfe",
+		CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = soc_info->dev;
+	cpas_register_param.cam_cpas_client_cb = cam_tfe_cpas_cb;
+	cpas_register_param.userdata = soc_info;
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+		goto release_soc;
+	}
+
+	soc_private->cpas_handle = cpas_register_param.client_handle;
+
+	return rc;
+
+release_soc:
+	cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+	kfree(soc_private);
+
+	return rc;
+}
+
+int cam_tfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int                               rc = 0;
+	struct cam_tfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error! soc_info NULL");
+		return -ENODEV;
+	}
+
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error! soc_private NULL");
+		return -ENODEV;
+	}
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPAS0 unregistration failed rc=%d", rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		CAM_ERR(CAM_ISP,
+			"Error! Release platform resource failed rc=%d", rc);
+
+
+	rc = cam_soc_util_clk_put(&soc_private->dsp_clk);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP,
+			"Error Put dsp clk failed rc=%d", rc);
+
+	kfree(soc_private);
+
+	return rc;
+}
+
+int cam_tfe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int                               rc = 0;
+	struct cam_tfe_soc_private       *soc_private;
+	struct cam_ahb_vote               ahb_vote;
+	struct cam_axi_vote               axi_vote = {0};
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error! Invalid params");
+		rc = -EINVAL;
+		goto end;
+	}
+	soc_private = soc_info->soc_private;
+
+	ahb_vote.type       = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.num_paths = 1;
+	axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_IFE_VID;
+	axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE;
+	axi_vote.axi_path[0].camnoc_bw = 10640000000L;
+	axi_vote.axi_path[0].mnoc_ab_bw = 10640000000L;
+	axi_vote.axi_path[0].mnoc_ib_bw = 10640000000L;
+
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! CPAS0 start failed rc=%d", rc);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_TURBO_VOTE, true);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! enable platform failed rc=%d", rc);
+		goto stop_cpas;
+	}
+
+	return rc;
+
+stop_cpas:
+	cam_cpas_stop(soc_private->cpas_handle);
+end:
+	return rc;
+}
+
+int cam_tfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name)
+{
+	int  rc = 0;
+	struct cam_tfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		rc = -EINVAL;
+		return rc;
+	}
+	soc_private = soc_info->soc_private;
+
+	if (strcmp(clk_name, CAM_TFE_DSP_CLK_NAME) == 0) {
+		rc = cam_soc_util_clk_enable(soc_private->dsp_clk,
+			CAM_TFE_DSP_CLK_NAME, soc_private->dsp_clk_rate);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+			"Error enable dsp clk failed rc=%d", rc);
+	}
+
+	return rc;
+}
+
+int cam_tfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name)
+{
+	int  rc = 0;
+	struct cam_tfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		rc = -EINVAL;
+		return rc;
+	}
+	soc_private = soc_info->soc_private;
+
+	if (strcmp(clk_name, CAM_TFE_DSP_CLK_NAME) == 0) {
+		rc = cam_soc_util_clk_disable(soc_private->dsp_clk,
+			CAM_TFE_DSP_CLK_NAME);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+			"Error enable dsp clk failed rc=%d", rc);
+	}
+
+	return rc;
+}
+
+
+int cam_tfe_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_tfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error! Invalid params");
+		rc = -EINVAL;
+		return rc;
+	}
+	soc_private = soc_info->soc_private;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Disable platform failed rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}

+ 117 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.h

@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TFE_SOC_H_
+#define _CAM_TFE_SOC_H_
+
+#include "cam_soc_util.h"
+#include "cam_isp_hw.h"
+
+#define CAM_TFE_DSP_CLK_NAME "tfe_dsp_clk"
+
+enum cam_cpas_handle_id {
+	CAM_CPAS_HANDLE_CAMIF,
+	CAM_CPAS_HANDLE_RAW,
+	CAM_CPAS_HANDLE_MAX,
+};
+
+/*
+ * struct cam_tfe_soc_private:
+ *
+ * @Brief:                   Private SOC data specific to TFE HW Driver
+ *
+ * @cpas_handle:             Handle returned on registering with CPAS driver.
+ *                           This handle is used for all further interface
+ *                           with CPAS.
+ * @cpas_version:            Has cpas version read from Hardware
+ */
+struct cam_tfe_soc_private {
+	uint32_t    cpas_handle;
+	uint32_t    cpas_version;
+	struct clk *dsp_clk;
+	int32_t     dsp_clk_index;
+	int32_t     dsp_clk_rate;
+};
+
+/*
+ * cam_tfe_init_soc_resources()
+ *
+ * @Brief:                   Initialize SOC resources including private data
+ *
+ * @soc_info:                Device soc information
+ * @handler:                 Irq handler function pointer
+ * @irq_data:                Irq handler function Callback data
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t tfe_irq_handler, void *irq_data);
+
+/*
+ * cam_tfe_deinit_soc_resources()
+ *
+ * @Brief:                   Deinitialize SOC resources including private data
+ *
+ * @soc_info:                Device soc information
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_tfe_enable_soc_resources()
+ *
+ * @brief:                   Enable regulator, irq resources, start CPAS
+ *
+ * @soc_info:                Device soc information
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_tfe_disable_soc_resources()
+ *
+ * @brief:                   Disable regulator, irq resources, stop CPAS
+ *
+ * @soc_info:                Device soc information
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_tfe_soc_enable_clk()
+ *
+ * @brief:                   Enable clock with given name
+ *
+ * @soc_info:                Device soc information
+ * @clk_name:                Name of clock to enable
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name);
+
+/*
+ * cam_tfe_soc_disable_dsp_clk()
+ *
+ * @brief:                   Disable clock with given name
+ *
+ * @soc_info:                Device soc information
+ * @clk_name:                Name of clock to enable
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_tfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name);
+
+#endif /* _CAM_TFE_SOC_H_ */

+ 15 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/Makefile

@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cdm/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_top_tpg_dev.o cam_top_tpg_soc.o cam_top_tpg_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_top_tpg_v1.o

+ 671 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_core.c

@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <media/cam_tfe.h>
+#include <media/cam_defs.h>
+
+#include "cam_top_tpg_core.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+
+
+static uint32_t tpg_num_dt_map[CAM_TOP_TPG_MAX_SUPPORTED_DT] = {
+	0,
+	3,
+	1,
+	2
+};
+
+static int cam_top_tpg_get_format(uint32_t in_format,
+	uint32_t *tpg_encode_format)
+{
+	int rc = 0;
+
+	switch (in_format) {
+	case CAM_FORMAT_MIPI_RAW_6:
+		*tpg_encode_format = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_8:
+		*tpg_encode_format = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_10:
+		*tpg_encode_format = 2;
+		break;
+	case CAM_FORMAT_MIPI_RAW_12:
+		*tpg_encode_format = 3;
+		break;
+	case CAM_FORMAT_MIPI_RAW_14:
+		*tpg_encode_format = 4;
+		break;
+	case CAM_FORMAT_MIPI_RAW_16:
+		*tpg_encode_format = 4;
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Unsupported input encode format %d",
+			in_format);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int cam_top_tpg_get_hw_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_top_tpg_hw_caps           *hw_caps;
+	struct cam_top_tpg_hw                *tpg_hw;
+	struct cam_hw_info                   *tpg_hw_info;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_ISP, "TPG: Invalid args");
+		return -EINVAL;
+	}
+
+	tpg_hw_info = (struct cam_hw_info  *)hw_priv;
+	tpg_hw = (struct cam_top_tpg_hw   *)tpg_hw_info->core_info;
+	hw_caps = (struct cam_top_tpg_hw_caps *) get_hw_cap_args;
+
+	hw_caps->major_version = tpg_hw->tpg_info->tpg_reg->major_version;
+	hw_caps->minor_version = tpg_hw->tpg_info->tpg_reg->minor_version;
+	hw_caps->version_incr = tpg_hw->tpg_info->tpg_reg->version_incr;
+
+	CAM_DBG(CAM_ISP,
+		"TPG:%d major:%d minor:%d ver :%d",
+		tpg_hw->hw_intf->hw_idx, hw_caps->major_version,
+		hw_caps->minor_version, hw_caps->version_incr);
+
+	return rc;
+}
+
+static int cam_top_tpg_reserve(void *hw_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_top_tpg_hw                        *tpg_hw;
+	struct cam_hw_info                           *tpg_hw_info;
+	struct cam_top_tpg_hw_reserve_resource_args  *reserv;
+	struct cam_top_tpg_cfg                       *tpg_data;
+	uint32_t                                      encode_format = 0;
+	uint32_t i;
+
+	if (!hw_priv || !reserve_args || (arg_size !=
+		sizeof(struct cam_top_tpg_hw_reserve_resource_args))) {
+		CAM_ERR(CAM_ISP, "TPG: Invalid args");
+		return -EINVAL;
+	}
+
+	tpg_hw_info = (struct cam_hw_info *)hw_priv;
+	tpg_hw = (struct cam_top_tpg_hw *)tpg_hw_info->core_info;
+	reserv = (struct cam_top_tpg_hw_reserve_resource_args  *)reserve_args;
+
+	if (reserv->num_inport <= 0 ||
+		reserv->num_inport > CAM_TOP_TPG_MAX_SUPPORTED_DT) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "TPG: %u invalid input num port:%d",
+			tpg_hw->hw_intf->hw_idx, reserv->num_inport);
+		return -EINVAL;
+	}
+
+	mutex_lock(&tpg_hw->hw_info->hw_mutex);
+	if (tpg_hw->tpg_res.res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		mutex_unlock(&tpg_hw->hw_info->hw_mutex);
+		return -EINVAL;
+	}
+
+	if ((reserv->in_port[0]->vc > 0xF) ||
+		(reserv->in_port[0]->lane_num <= 0 ||
+		reserv->in_port[0]->lane_num > 4) ||
+		(reserv->in_port[0]->pix_pattern > 4) ||
+		(reserv->in_port[0]->lane_type >= 2)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "TPG:%u invalid input %d %d %d %d",
+			tpg_hw->hw_intf->hw_idx, reserv->in_port[0]->vc,
+			reserv->in_port[0]->lane_num,
+			reserv->in_port[0]->pix_pattern,
+			reserv->in_port[0]->lane_type);
+		mutex_unlock(&tpg_hw->hw_info->hw_mutex);
+		return -EINVAL;
+	}
+	rc = cam_top_tpg_get_format(reserv->in_port[0]->format,
+		&encode_format);
+	if (rc)
+		goto error;
+
+	CAM_DBG(CAM_ISP, "TPG: %u enter", tpg_hw->hw_intf->hw_idx);
+
+	tpg_data = (struct cam_top_tpg_cfg *)tpg_hw->tpg_res.res_priv;
+	tpg_data->vc_num = reserv->in_port[0]->vc;
+	tpg_data->phy_sel = reserv->in_port[0]->lane_type;
+	tpg_data->num_active_lanes = reserv->in_port[0]->lane_num;
+	tpg_data->h_blank_count = reserv->in_port[0]->sensor_hbi;
+	tpg_data->v_blank_count = reserv->in_port[0]->sensor_vbi;
+	tpg_data->pix_pattern = reserv->in_port[0]->pix_pattern;
+	tpg_data->dt_cfg[0].data_type = reserv->in_port[0]->dt;
+	tpg_data->dt_cfg[0].frame_height = reserv->in_port[0]->height;
+	if (reserv->in_port[0]->usage_type)
+		tpg_data->dt_cfg[0].frame_width =
+			((reserv->in_port[0]->right_end -
+				reserv->in_port[0]->left_start) + 1);
+	else
+		tpg_data->dt_cfg[0].frame_width =
+			reserv->in_port[0]->left_width;
+	tpg_data->dt_cfg[0].encode_format = encode_format;
+	tpg_data->num_active_dts = 1;
+
+	CAM_DBG(CAM_ISP,
+		"TPG:%u vc_num:%d dt:%d phy:%d lines:%d pattern:%d format:%d",
+		tpg_hw->hw_intf->hw_idx,
+		tpg_data->vc_num, tpg_data->dt_cfg[0].data_type,
+		tpg_data->phy_sel, tpg_data->num_active_lanes,
+		tpg_data->pix_pattern,
+		tpg_data->dt_cfg[0].encode_format);
+
+	CAM_DBG(CAM_ISP, "TPG:%u height:%d width:%d h blank:%d v blank:%d",
+		tpg_hw->hw_intf->hw_idx,
+		tpg_data->dt_cfg[0].frame_height,
+		tpg_data->dt_cfg[0].frame_width,
+		tpg_data->h_blank_count,
+		tpg_data->v_blank_count);
+
+	if (reserv->num_inport == 1)
+		goto end;
+
+	for (i = 1; i < reserv->num_inport; i++) {
+		if ((tpg_data->vc_num != reserv->in_port[i]->vc) ||
+			(tpg_data->phy_sel != reserv->in_port[i]->lane_type) ||
+			(tpg_data->num_active_lanes !=
+				reserv->in_port[i]->lane_num) ||
+			(tpg_data->pix_pattern !=
+			reserv->in_port[i]->pix_pattern)) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"TPG: %u invalid DT config for tpg",
+				tpg_hw->hw_intf->hw_idx);
+			rc = -EINVAL;
+			goto error;
+		}
+		rc = cam_top_tpg_get_format(reserv->in_port[0]->format,
+			&encode_format);
+		if (rc)
+			return rc;
+
+		tpg_data->dt_cfg[i].data_type = reserv->in_port[i]->dt;
+		tpg_data->dt_cfg[i].frame_height =
+			reserv->in_port[i]->height;
+		tpg_data->dt_cfg[i].frame_width =
+			reserv->in_port[i]->left_width;
+		tpg_data->dt_cfg[i].encode_format = encode_format;
+		tpg_data->num_active_dts++;
+
+		CAM_DBG(CAM_ISP, "TPG:%u height:%d width:%d dt:%d format:%d",
+			tpg_hw->hw_intf->hw_idx,
+			tpg_data->dt_cfg[i].frame_height,
+			tpg_data->dt_cfg[i].frame_width,
+			tpg_data->dt_cfg[i].data_type,
+			tpg_data->dt_cfg[i].encode_format);
+
+	}
+end:
+	reserv->node_res = &tpg_hw->tpg_res;
+	tpg_hw->tpg_res.res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+error:
+	mutex_unlock(&tpg_hw->hw_info->hw_mutex);
+	CAM_DBG(CAM_ISP, "exit rc %u", rc);
+
+	return rc;
+}
+
+static int cam_top_tpg_release(void *hw_priv,
+	void *release_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_top_tpg_hw           *tpg_hw;
+	struct cam_hw_info              *tpg_hw_info;
+	struct cam_top_tpg_cfg          *tpg_data;
+	struct cam_isp_resource_node    *tpg_res;
+
+	if (!hw_priv || !release_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "TPG: Invalid args");
+		return -EINVAL;
+	}
+
+	tpg_hw_info = (struct cam_hw_info  *)hw_priv;
+	tpg_hw = (struct cam_top_tpg_hw   *)tpg_hw_info->core_info;
+	tpg_res = (struct cam_isp_resource_node *)release_args;
+
+	mutex_lock(&tpg_hw->hw_info->hw_mutex);
+	if ((tpg_res->res_type != CAM_ISP_RESOURCE_TPG) ||
+		(tpg_res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE)) {
+		CAM_ERR(CAM_ISP, "TPG:%d Invalid res type:%d res_state:%d",
+			tpg_hw->hw_intf->hw_idx, tpg_res->res_type,
+			tpg_res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "TPG:%d res type :%d",
+		tpg_hw->hw_intf->hw_idx, tpg_res->res_type);
+
+	tpg_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	tpg_data = (struct cam_top_tpg_cfg *)tpg_res->res_priv;
+	memset(tpg_data, 0, sizeof(struct cam_top_tpg_cfg));
+
+end:
+	mutex_unlock(&tpg_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_top_tpg_init_hw(void *hw_priv,
+	void *init_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_top_tpg_hw                  *tpg_hw;
+	struct cam_hw_info                     *tpg_hw_info;
+	struct cam_isp_resource_node           *tpg_res;
+	const struct cam_top_tpg_reg_offset    *tpg_reg;
+	struct cam_hw_soc_info                 *soc_info;
+	uint32_t val, clk_lvl;
+
+	if (!hw_priv || !init_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "TPG: Invalid args");
+		return -EINVAL;
+	}
+
+	tpg_hw_info = (struct cam_hw_info  *)hw_priv;
+	tpg_hw = (struct cam_top_tpg_hw   *)tpg_hw_info->core_info;
+	tpg_res      = (struct cam_isp_resource_node *)init_args;
+	tpg_reg = tpg_hw->tpg_info->tpg_reg;
+	soc_info = &tpg_hw->hw_info->soc_info;
+
+	if (tpg_res->res_type != CAM_ISP_RESOURCE_TPG) {
+		CAM_ERR(CAM_ISP, "TPG:%d Invalid res type state %d",
+			tpg_hw->hw_intf->hw_idx,
+			tpg_res->res_type);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "TPG:%d init HW res type :%d",
+		tpg_hw->hw_intf->hw_idx, tpg_res->res_type);
+	mutex_lock(&tpg_hw->hw_info->hw_mutex);
+	/* overflow check before increment */
+	if (tpg_hw->hw_info->open_count == UINT_MAX) {
+		CAM_ERR(CAM_ISP, "TPG:%d Open count reached max",
+			tpg_hw->hw_intf->hw_idx);
+		mutex_unlock(&tpg_hw->hw_info->hw_mutex);
+		return -EINVAL;
+	}
+
+	/* Increment ref Count */
+	tpg_hw->hw_info->open_count++;
+	if (tpg_hw->hw_info->open_count > 1) {
+		CAM_DBG(CAM_ISP, "TPG hw has already been enabled");
+		mutex_unlock(&tpg_hw->hw_info->hw_mutex);
+		return rc;
+	}
+
+	rc = cam_soc_util_get_clk_level(soc_info, tpg_hw->clk_rate,
+		soc_info->src_clk_idx, &clk_lvl);
+	CAM_DBG(CAM_ISP, "TPG phy clock level %u", clk_lvl);
+
+	rc = cam_top_tpg_enable_soc_resources(soc_info, clk_lvl);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "TPG:%d Enable SOC failed",
+			tpg_hw->hw_intf->hw_idx);
+		goto err;
+	}
+
+	tpg_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			tpg_reg->tpg_hw_version);
+	CAM_DBG(CAM_ISP, "TPG:%d TPG HW version: 0x%x",
+		tpg_hw->hw_intf->hw_idx, val);
+
+	mutex_unlock(&tpg_hw->hw_info->hw_mutex);
+	return rc;
+
+err:
+	tpg_hw->hw_info->open_count--;
+	mutex_unlock(&tpg_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_top_tpg_deinit_hw(void *hw_priv,
+	void *deinit_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_top_tpg_hw                 *tpg_hw;
+	struct cam_hw_info                    *tpg_hw_info;
+	struct cam_isp_resource_node          *tpg_res;
+	struct cam_hw_soc_info                *soc_info;
+
+	if (!hw_priv || !deinit_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "TPG:Invalid arguments");
+		return -EINVAL;
+	}
+
+	tpg_res = (struct cam_isp_resource_node *)deinit_args;
+	tpg_hw_info = (struct cam_hw_info  *)hw_priv;
+	tpg_hw = (struct cam_top_tpg_hw   *)tpg_hw_info->core_info;
+
+	if (tpg_res->res_type != CAM_ISP_RESOURCE_TPG) {
+		CAM_ERR(CAM_ISP, "TPG:%d Invalid Res type %d",
+			tpg_hw->hw_intf->hw_idx,
+			tpg_res->res_type);
+		return -EINVAL;
+	}
+
+	mutex_lock(&tpg_hw->hw_info->hw_mutex);
+	/* Check for refcount */
+	if (!tpg_hw->hw_info->open_count) {
+		CAM_WARN(CAM_ISP, "Unbalanced disable_hw");
+		goto end;
+	}
+
+	/* Decrement ref Count */
+	tpg_hw->hw_info->open_count--;
+	if (tpg_hw->hw_info->open_count) {
+		rc = 0;
+		goto end;
+	}
+
+	soc_info = &tpg_hw->hw_info->soc_info;
+	rc = cam_top_tpg_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_ISP, "TPG:%d Disable SOC failed",
+			tpg_hw->hw_intf->hw_idx);
+
+	tpg_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	CAM_DBG(CAM_ISP, "TPG:%d deint completed", tpg_hw->hw_intf->hw_idx);
+
+end:
+	mutex_unlock(&tpg_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_top_tpg_start(void *hw_priv, void *start_args,
+			uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_top_tpg_hw                  *tpg_hw;
+	struct cam_hw_info                     *tpg_hw_info;
+	struct cam_hw_soc_info                 *soc_info;
+	struct cam_isp_resource_node           *tpg_res;
+	const struct cam_top_tpg_reg_offset    *tpg_reg;
+	struct cam_top_tpg_cfg                 *tpg_data;
+	uint32_t i, val;
+
+	if (!hw_priv || !start_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "TPG: Invalid args");
+		return -EINVAL;
+	}
+
+	tpg_hw_info = (struct cam_hw_info  *)hw_priv;
+	tpg_hw = (struct cam_top_tpg_hw   *)tpg_hw_info->core_info;
+	tpg_reg = tpg_hw->tpg_info->tpg_reg;
+	tpg_res = (struct cam_isp_resource_node *)start_args;
+	tpg_data = (struct cam_top_tpg_cfg  *)tpg_res->res_priv;
+	soc_info = &tpg_hw->hw_info->soc_info;
+
+	if ((tpg_res->res_type != CAM_ISP_RESOURCE_TPG) ||
+		(tpg_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
+		CAM_ERR(CAM_ISP, "TPG:%d Invalid Res type:%d res_state:%d",
+			tpg_hw->hw_intf->hw_idx,
+			tpg_res->res_type, tpg_res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+	cam_io_w_mb(0x12345678, soc_info->reg_map[0].mem_base +
+		tpg_reg->tpg_lfsr_seed);
+
+	for (i = 0; i < tpg_data->num_active_dts; i++) {
+		val = (((tpg_data->dt_cfg[i].frame_width & 0xFFFF) << 16) |
+			(tpg_data->dt_cfg[i].frame_height & 0x3FFF));
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			tpg_reg->tpg_dt_0_cfg_0 + 0x10 * i);
+		cam_io_w_mb(tpg_data->dt_cfg[i].data_type,
+			soc_info->reg_map[0].mem_base +
+			tpg_reg->tpg_dt_0_cfg_1 + 0x10 * i);
+		val = ((tpg_data->dt_cfg[i].encode_format & 0xF) <<
+			tpg_reg->tpg_dt_encode_format_shift) |
+			tpg_reg->tpg_payload_mode_color;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			tpg_reg->tpg_dt_0_cfg_2 + 0x10 * i);
+	}
+
+	val = (tpg_num_dt_map[tpg_data->num_active_dts-1] <<
+		 tpg_reg->tpg_num_dts_shift_val) | tpg_data->vc_num;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc_cfg0);
+
+	/* HBlank count 500 and V blank count is 600 */
+	cam_io_w_mb(0x2581F4,
+		soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc_cfg1);
+
+	val = (1 << tpg_reg->tpg_split_en_shift);
+	cam_io_w_mb(tpg_data->pix_pattern, soc_info->reg_map[0].mem_base +
+		tpg_reg->tpg_common_gen_cfg);
+	cam_io_w_mb(0xAFFF,
+		soc_info->reg_map[0].mem_base + tpg_reg->tpg_vbi_cfg);
+	CAM_DBG(CAM_ISP, "TPG:%d set TPG VBI to  0xAFFF",
+		tpg_hw->hw_intf->hw_idx);
+
+	/* Set the TOP tpg mux sel*/
+	cam_io_w_mb((1 << tpg_hw->hw_intf->hw_idx),
+		soc_info->reg_map[1].mem_base + tpg_reg->top_mux_reg_offset);
+
+	val = ((tpg_data->num_active_lanes - 1) <<
+		tpg_reg->tpg_num_active_lines_shift) |
+		(1 << tpg_reg->tpg_fe_pkt_en_shift) |
+		(1 << tpg_reg->tpg_fs_pkt_en_shift) |
+		(tpg_data->phy_sel << tpg_reg->tpg_phy_sel_shift_val) |
+		(1 << tpg_reg->tpg_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl);
+
+	tpg_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	CAM_DBG(CAM_ISP, "TPG:%d started", tpg_hw->hw_intf->hw_idx);
+
+end:
+	return rc;
+}
+
+static int cam_top_tpg_stop(void *hw_priv,
+	void *stop_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_top_tpg_hw                  *tpg_hw;
+	struct cam_hw_info                     *tpg_hw_info;
+	struct cam_hw_soc_info                 *soc_info;
+	struct cam_isp_resource_node           *tpg_res;
+	const struct cam_top_tpg_reg_offset    *tpg_reg;
+	struct cam_top_tpg_cfg                 *tpg_data;
+
+	if (!hw_priv || !stop_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "TPG: Invalid args");
+		return -EINVAL;
+	}
+
+	tpg_hw_info = (struct cam_hw_info  *)hw_priv;
+	tpg_hw = (struct cam_top_tpg_hw   *)tpg_hw_info->core_info;
+	tpg_reg = tpg_hw->tpg_info->tpg_reg;
+	tpg_res = (struct cam_isp_resource_node  *) stop_args;
+	tpg_data = (struct cam_top_tpg_cfg  *)tpg_res->res_state;
+	soc_info = &tpg_hw->hw_info->soc_info;
+
+	if ((tpg_res->res_type != CAM_ISP_RESOURCE_TPG) ||
+		(tpg_res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING)) {
+		CAM_DBG(CAM_ISP, "TPG:%d Invalid Res type:%d res_state:%d",
+			tpg_hw->hw_intf->hw_idx,
+			tpg_res->res_type, tpg_res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		tpg_reg->tpg_ctrl);
+
+	tpg_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	CAM_DBG(CAM_ISP, "TPG:%d stopped", tpg_hw->hw_intf->hw_idx);
+end:
+	return rc;
+}
+
+static int cam_top_tpg_read(void *hw_priv,
+	void *read_args, uint32_t arg_size)
+{
+	CAM_ERR(CAM_ISP, "TPG: un supported");
+
+	return -EINVAL;
+}
+
+static int cam_top_tpg_write(void *hw_priv,
+	void *write_args, uint32_t arg_size)
+{
+	CAM_ERR(CAM_ISP, "TPG: un supported");
+	return -EINVAL;
+}
+
+static int cam_top_tpg_set_phy_clock(
+	struct cam_top_tpg_hw *csid_hw, void *cmd_args)
+{
+	struct cam_top_tpg_clock_update_args *clk_update = NULL;
+
+	if (!csid_hw)
+		return -EINVAL;
+
+	clk_update =
+		(struct cam_top_tpg_clock_update_args *)cmd_args;
+
+	csid_hw->clk_rate = clk_update->clk_rate;
+	CAM_DBG(CAM_ISP, "CSI PHY clock rate %llu", csid_hw->clk_rate);
+
+	return 0;
+}
+
+static int cam_top_tpg_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_top_tpg_hw               *tpg_hw;
+	struct cam_hw_info                  *tpg_hw_info;
+
+	if (!hw_priv || !cmd_args) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
+		return -EINVAL;
+	}
+
+	tpg_hw_info = (struct cam_hw_info  *)hw_priv;
+	tpg_hw = (struct cam_top_tpg_hw   *)tpg_hw_info->core_info;
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_TPG_PHY_CLOCK_UPDATE:
+		rc = cam_top_tpg_set_phy_clock(tpg_hw, cmd_args);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "TPG:%d unsupported cmd:%d",
+			tpg_hw->hw_intf->hw_idx, cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+int cam_top_tpg_hw_probe_init(struct cam_hw_intf  *tpg_hw_intf,
+	uint32_t tpg_idx)
+{
+	int rc = -EINVAL;
+	struct cam_top_tpg_cfg             *tpg_data;
+	struct cam_hw_info                 *tpg_hw_info;
+	struct cam_top_tpg_hw              *tpg_hw = NULL;
+	uint32_t val = 0;
+
+	if (tpg_idx >= CAM_TOP_TPG_HW_NUM_MAX) {
+		CAM_ERR(CAM_ISP, "Invalid tpg index:%d", tpg_idx);
+		return rc;
+	}
+
+	tpg_hw_info = (struct cam_hw_info  *)tpg_hw_intf->hw_priv;
+	tpg_hw      = (struct cam_top_tpg_hw  *)tpg_hw_info->core_info;
+
+	tpg_hw->hw_intf = tpg_hw_intf;
+	tpg_hw->hw_info = tpg_hw_info;
+
+	CAM_DBG(CAM_ISP, "type %d index %d",
+		tpg_hw->hw_intf->hw_type, tpg_idx);
+
+	tpg_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&tpg_hw->hw_info->hw_mutex);
+	spin_lock_init(&tpg_hw->hw_info->hw_lock);
+	spin_lock_init(&tpg_hw->lock_state);
+	init_completion(&tpg_hw->hw_info->hw_complete);
+
+	init_completion(&tpg_hw->tpg_complete);
+
+	rc = cam_top_tpg_init_soc_resources(&tpg_hw->hw_info->soc_info,
+			tpg_hw);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "TPG:%d Failed to init_soc", tpg_idx);
+		goto err;
+	}
+
+	tpg_hw->hw_intf->hw_ops.get_hw_caps = cam_top_tpg_get_hw_caps;
+	tpg_hw->hw_intf->hw_ops.init        = cam_top_tpg_init_hw;
+	tpg_hw->hw_intf->hw_ops.deinit      = cam_top_tpg_deinit_hw;
+	tpg_hw->hw_intf->hw_ops.reset       = NULL;
+	tpg_hw->hw_intf->hw_ops.reserve     = cam_top_tpg_reserve;
+	tpg_hw->hw_intf->hw_ops.release     = cam_top_tpg_release;
+	tpg_hw->hw_intf->hw_ops.start       = cam_top_tpg_start;
+	tpg_hw->hw_intf->hw_ops.stop        = cam_top_tpg_stop;
+	tpg_hw->hw_intf->hw_ops.read        = cam_top_tpg_read;
+	tpg_hw->hw_intf->hw_ops.write       = cam_top_tpg_write;
+	tpg_hw->hw_intf->hw_ops.process_cmd = cam_top_tpg_process_cmd;
+
+	tpg_hw->tpg_res.res_type = CAM_ISP_RESOURCE_TPG;
+	tpg_hw->tpg_res.res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	tpg_hw->tpg_res.hw_intf = tpg_hw->hw_intf;
+	tpg_data = kzalloc(sizeof(*tpg_data), GFP_KERNEL);
+	if (!tpg_data) {
+		rc = -ENOMEM;
+		goto err;
+	}
+	tpg_hw->tpg_res.res_priv = tpg_data;
+
+	cam_top_tpg_enable_soc_resources(&tpg_hw->hw_info->soc_info,
+		CAM_SVS_VOTE);
+
+	val = cam_io_r_mb(tpg_hw->hw_info->soc_info.reg_map[0].mem_base +
+			tpg_hw->tpg_info->tpg_reg->tpg_hw_version);
+	CAM_DBG(CAM_ISP, "TPG:%d TPG HW version: 0x%x",
+		tpg_hw->hw_intf->hw_idx, val);
+
+	cam_top_tpg_disable_soc_resources(&tpg_hw->hw_info->soc_info);
+err:
+
+	return rc;
+}
+
+int cam_top_tpg_hw_deinit(struct cam_top_tpg_hw *top_tpg_hw)
+{
+	int rc = -EINVAL;
+
+	if (!top_tpg_hw) {
+		CAM_ERR(CAM_ISP, "Invalid param");
+		return rc;
+	}
+
+	/* release the privdate data memory from resources */
+	kfree(top_tpg_hw->tpg_res.res_priv);
+	cam_top_tpg_deinit_soc_resources(&top_tpg_hw->hw_info->soc_info);
+
+	return 0;
+}

+ 153 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_core.h

@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TOP_TPG_HW_H_
+#define _CAM_TOP_TPG_HW_H_
+
+#include "cam_hw.h"
+#include "cam_top_tpg_hw_intf.h"
+#include "cam_top_tpg_soc.h"
+
+enum cam_top_tpg_encode_format {
+	CAM_TOP_TPG_ENCODE_FORMAT_RAW6,
+	CAM_TOP_TPG_ENCODE_FORMAT_RAW8,
+	CAM_TOP_TPG_ENCODE_FORMAT_RAW10,
+	CAM_TOP_TPG_ENCODE_FORMAT_RAW12,
+	CAM_TOP_TPG_ENCODE_FORMAT_RAW14,
+	CAM_TOP_TPG_ENCODE_FORMAT_RAW16,
+	CAM_TOP_TPG_ENCODE_FORMAT_MAX,
+};
+
+struct cam_top_tpg_reg_offset {
+	uint32_t tpg_hw_version;
+	uint32_t tpg_hw_status;
+	uint32_t tpg_ctrl;
+	uint32_t tpg_vc_cfg0;
+	uint32_t tpg_vc_cfg1;
+	uint32_t tpg_lfsr_seed;
+	uint32_t tpg_dt_0_cfg_0;
+	uint32_t tpg_dt_1_cfg_0;
+	uint32_t tpg_dt_2_cfg_0;
+	uint32_t tpg_dt_3_cfg_0;
+	uint32_t tpg_dt_0_cfg_1;
+	uint32_t tpg_dt_1_cfg_1;
+	uint32_t tpg_dt_2_cfg_1;
+	uint32_t tpg_dt_3_cfg_1;
+	uint32_t tpg_dt_0_cfg_2;
+	uint32_t tpg_dt_1_cfg_2;
+	uint32_t tpg_dt_2_cfg_2;
+	uint32_t tpg_dt_3_cfg_2;
+	uint32_t tpg_color_bar_cfg;
+	uint32_t tpg_common_gen_cfg;
+	uint32_t tpg_vbi_cfg;
+	uint32_t tpg_test_bus_crtl;
+	uint32_t tpg_spare;
+	/* configurations */
+	uint32_t major_version;
+	uint32_t minor_version;
+	uint32_t version_incr;
+	uint32_t tpg_en_shift_val;
+	uint32_t tpg_phy_sel_shift_val;
+	uint32_t tpg_num_active_lines_shift;
+	uint32_t tpg_fe_pkt_en_shift;
+	uint32_t tpg_fs_pkt_en_shift;
+	uint32_t tpg_line_interleaving_mode_shift;
+	uint32_t tpg_num_dts_shift_val;
+	uint32_t tpg_v_blank_cnt_shift;
+	uint32_t tpg_dt_encode_format_shift;
+	uint32_t tpg_payload_mode_color;
+	uint32_t tpg_split_en_shift;
+	uint32_t top_mux_reg_offset;
+};
+
+/**
+ * struct cam_top_tpg_hw_info- tpg hardware info
+ *
+ * @tpg_reg:         tpg register offsets
+ * @hw_dts_version:  HW DTS version
+ * @csid_max_clk:    maximum csid clock
+ * @phy_max_clk      maximum phy clock
+ *
+ */
+struct cam_top_tpg_hw_info {
+	const struct cam_top_tpg_reg_offset    *tpg_reg;
+	uint32_t                                hw_dts_version;
+	uint32_t                                csid_max_clk;
+	uint32_t                                phy_max_clk;
+};
+
+/**
+ * struct cam_top_tpg_dt_cfg- tpg data type(dt) configuration
+ *
+ * @frame_width:     frame width in pixel
+ * @frame_height:    frame height in pixel
+ * @data_type:       data type(dt) value
+ * @encode_format:   encode format for this data type
+ * @payload_mode     payload data, such color bar, color box etc
+ *
+ */
+
+struct cam_top_tpg_dt_cfg {
+	uint32_t                               frame_width;
+	uint32_t                               frame_height;
+	uint32_t                               data_type;
+	uint32_t                               encode_format;
+	uint32_t                               payload_mode;
+};
+
+/**
+ * struct cam_top_tpg_cfg- tpg congiguration
+ * @pix_pattern :    pixel pattern output of the tpg
+ * @phy_sel :        phy selection 0:dphy or 1:cphy
+ * @num_active_lanes Number of active lines
+ * @vc_num:          Virtual channel number
+ * @h_blank_count:   horizontal blanking count value
+ * @h_blank_count:   vertical blanking count value
+ * @vbi_cnt:         vbi count
+ * @num_active_dts:  number of active dts need to configure
+ * @dt_cfg:          dt configuration values
+ *
+ */
+struct cam_top_tpg_cfg {
+	uint32_t                        pix_pattern;
+	uint32_t                        phy_sel;
+	uint32_t                        num_active_lanes;
+	uint32_t                        vc_num;
+	uint32_t                        v_blank_count;
+	uint32_t                        h_blank_count;
+	uint32_t                        vbi_cnt;
+	uint32_t                        num_active_dts;
+	struct cam_top_tpg_dt_cfg       dt_cfg[4];
+};
+
+/**
+ * struct cam_top_tpg_hw- tpg hw device resources data
+ *
+ * @hw_intf:                  contain the tpg hw interface information
+ * @hw_info:                  tpg hw device information
+ * @tpg_info:                 tpg hw specific information
+ * @tpg_res:                  tpg resource
+ * @tpg_cfg:                  tpg configuration
+ * @clk_rate                  clock rate
+ * @lock_state                lock state
+ * @tpg_complete              tpg completion
+ *
+ */
+struct cam_top_tpg_hw {
+	struct cam_hw_intf              *hw_intf;
+	struct cam_hw_info              *hw_info;
+	struct cam_top_tpg_hw_info      *tpg_info;
+	struct cam_isp_resource_node     tpg_res;
+	uint64_t                         clk_rate;
+	spinlock_t                       lock_state;
+	struct completion                tpg_complete;
+};
+
+int cam_top_tpg_hw_probe_init(struct cam_hw_intf  *tpg_hw_intf,
+	uint32_t tpg_idx);
+
+int cam_top_tpg_hw_deinit(struct cam_top_tpg_hw *top_tpg_hw);
+
+#endif /* _CAM_TOP_TPG_HW_H_ */

+ 140 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_dev.c

@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_top_tpg_core.h"
+#include "cam_top_tpg_dev.h"
+#include "cam_top_tpg_hw_intf.h"
+#include "cam_debug_util.h"
+
+static struct cam_hw_intf *cam_top_tpg_hw_list[CAM_TOP_TPG_HW_NUM_MAX] = {
+	0, 0};
+
+static char tpg_dev_name[8];
+
+int cam_top_tpg_probe(struct platform_device *pdev)
+{
+
+	struct cam_hw_intf             *tpg_hw_intf;
+	struct cam_hw_info             *tpg_hw_info;
+	struct cam_top_tpg_hw          *tpg_dev = NULL;
+	const struct of_device_id      *match_dev = NULL;
+	struct cam_top_tpg_hw_info     *tpg_hw_data = NULL;
+	uint32_t                        tpg_dev_idx;
+	int                             rc = 0;
+
+	CAM_DBG(CAM_ISP, "probe called");
+
+	tpg_hw_intf = kzalloc(sizeof(*tpg_hw_intf), GFP_KERNEL);
+	if (!tpg_hw_intf) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	tpg_hw_info = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!tpg_hw_info) {
+		rc = -ENOMEM;
+		goto free_hw_intf;
+	}
+
+	tpg_dev = kzalloc(sizeof(struct cam_top_tpg_hw), GFP_KERNEL);
+	if (!tpg_dev) {
+		rc = -ENOMEM;
+		goto free_hw_info;
+	}
+
+	/* get top tpg hw index */
+	of_property_read_u32(pdev->dev.of_node, "cell-index", &tpg_dev_idx);
+	/* get top tpg hw information */
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_ISP, "No matching table for the top tpg hw");
+		rc = -EINVAL;
+		goto free_dev;
+	}
+
+	memset(tpg_dev_name, 0, sizeof(tpg_dev_name));
+	snprintf(tpg_dev_name, sizeof(tpg_dev_name),
+		"tpg%1u", tpg_dev_idx);
+
+	tpg_hw_intf->hw_idx = tpg_dev_idx;
+	tpg_hw_intf->hw_type = CAM_ISP_HW_TYPE_TPG;
+	tpg_hw_intf->hw_priv = tpg_hw_info;
+
+	tpg_hw_info->core_info = tpg_dev;
+	tpg_hw_info->soc_info.pdev = pdev;
+	tpg_hw_info->soc_info.dev = &pdev->dev;
+	tpg_hw_info->soc_info.dev_name = tpg_dev_name;
+	tpg_hw_info->soc_info.index = tpg_dev_idx;
+
+	tpg_hw_data = (struct cam_top_tpg_hw_info  *)match_dev->data;
+	/* need to setup the pdev before call the tfe hw probe init */
+	tpg_dev->tpg_info = tpg_hw_data;
+
+	rc = cam_top_tpg_hw_probe_init(tpg_hw_intf, tpg_dev_idx);
+	if (rc)
+		goto free_dev;
+
+	platform_set_drvdata(pdev, tpg_dev);
+	CAM_DBG(CAM_ISP, "TPG:%d probe successful",
+		tpg_hw_intf->hw_idx);
+
+
+	if (tpg_hw_intf->hw_idx < CAM_TOP_TPG_HW_NUM_MAX)
+		cam_top_tpg_hw_list[tpg_hw_intf->hw_idx] = tpg_hw_intf;
+	else
+		goto free_dev;
+
+	return 0;
+
+free_dev:
+	kfree(tpg_dev);
+free_hw_info:
+	kfree(tpg_hw_info);
+free_hw_intf:
+	kfree(tpg_hw_intf);
+err:
+	return rc;
+}
+
+int cam_top_tpg_remove(struct platform_device *pdev)
+{
+	struct cam_top_tpg_hw          *tpg_dev = NULL;
+	struct cam_hw_intf             *tpg_hw_intf;
+	struct cam_hw_info             *tpg_hw_info;
+
+	tpg_dev = (struct cam_top_tpg_hw *)platform_get_drvdata(pdev);
+	tpg_hw_intf = tpg_dev->hw_intf;
+	tpg_hw_info = tpg_dev->hw_info;
+
+	CAM_DBG(CAM_ISP, "TPG:%d remove",
+		tpg_dev->hw_intf->hw_idx);
+
+	cam_top_tpg_hw_deinit(tpg_dev);
+
+	/*release the tpg device memory */
+	kfree(tpg_dev);
+	kfree(tpg_hw_info);
+	kfree(tpg_hw_intf);
+	return 0;
+}
+
+int cam_top_tpg_hw_init(struct cam_hw_intf **top_tpg_hw,
+	uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_top_tpg_hw_list[hw_idx]) {
+		*top_tpg_hw = cam_top_tpg_hw_list[hw_idx];
+	} else {
+		*top_tpg_hw = NULL;
+		rc = -1;
+	}
+
+	return rc;
+}

+ 12 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_dev.h

@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TOP_TPG_DEV_H_
+#define _CAM_TOP_TPG_DEV_H_
+
+int cam_top_tpg_probe(struct platform_device *pdev);
+int cam_top_tpg_remove(struct platform_device *pdev);
+
+#endif /*_CAM_TOP_TPG_DEV_H_ */

+ 152 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_soc.c

@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+#include <linux/slab.h>
+#include "cam_top_tpg_soc.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+int cam_top_tpg_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	void *irq_data)
+{
+	int rc = 0;
+	struct cam_cpas_register_params   cpas_register_param;
+	struct cam_top_tpg_soc_private    *soc_private;
+
+	soc_private = kzalloc(sizeof(struct cam_top_tpg_soc_private),
+		GFP_KERNEL);
+	if (!soc_private)
+		return -ENOMEM;
+
+	soc_info->soc_private = soc_private;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	/* Need to see if we want post process the clock list */
+	rc = cam_soc_util_request_platform_resource(soc_info, NULL,
+		irq_data);
+
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP,
+			"Error Request platform resources failed rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "tpg",
+		CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = soc_info->dev;
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+		goto release_soc;
+	} else {
+		soc_private->cpas_handle = cpas_register_param.client_handle;
+	}
+
+	return rc;
+
+release_soc:
+	cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+	kfree(soc_private);
+
+	return rc;
+}
+
+int cam_top_tpg_deinit_soc_resources(
+	struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_top_tpg_soc_private       *soc_private;
+
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -ENODEV;
+	}
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+
+	return rc;
+}
+
+int cam_top_tpg_enable_soc_resources(
+	struct cam_hw_soc_info *soc_info, enum cam_vote_level clk_level)
+{
+	int rc = 0;
+	struct cam_top_tpg_soc_private       *soc_private;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote = {0};
+
+	soc_private = soc_info->soc_private;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.num_paths = 1;
+	axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL;
+	axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE;
+
+	axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+	CAM_DBG(CAM_ISP, "csid camnoc_bw:%lld mnoc_ab_bw:%lld mnoc_ib_bw:%lld ",
+		axi_vote.axi_path[0].camnoc_bw,
+		axi_vote.axi_path[0].mnoc_ab_bw,
+		axi_vote.axi_path[0].mnoc_ib_bw);
+
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error CPAS start failed");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		clk_level, false);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "enable platform failed");
+		goto stop_cpas;
+	}
+
+	return rc;
+
+stop_cpas:
+	cam_cpas_stop(soc_private->cpas_handle);
+end:
+	return rc;
+}
+
+int cam_top_tpg_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_top_tpg_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+	soc_private = soc_info->soc_private;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Disable platform failed");
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error CPAS stop failed rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+

+ 78 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_soc.h

@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TOP_TPG_SOC_H_
+#define _CAM_TOP_TPG_SOC_H_
+
+#include "cam_isp_hw.h"
+
+/*
+ * struct cam_top_tpg_soc_private:
+ *
+ * @Brief:                   Private SOC data specific to TPG HW Driver
+ *
+ * @cpas_handle:             Handle returned on registering with CPAS driver.
+ *                           This handle is used for all further interface
+ *                           with CPAS.
+ */
+struct cam_top_tpg_soc_private {
+	uint32_t cpas_handle;
+};
+
+/**
+ * struct cam_top_tpg_device_soc_info - tpg soc SOC info object
+ *
+ * @csi_vdd_voltage:       csi vdd voltage value
+ *
+ */
+struct cam_top_tpg_device_soc_info {
+	int                             csi_vdd_voltage;
+};
+
+/**
+ * cam_top_tpg_init_soc_resources()
+ *
+ * @brief:                 csid initialization function for the soc info
+ *
+ * @soc_info:              soc info structure pointer
+ * @irq_data:              irq data for the callback function
+ *
+ */
+int cam_top_tpg_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	void *irq_data);
+
+/**
+ * cam_top_tpg_deinit_soc_resources()
+ *
+ * @brief:                 tpg de initialization function for the soc info
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_top_tpg_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_top_tpg_enable_soc_resources()
+ *
+ * @brief:                 tpg soc resource enable function
+ *
+ * @soc_info:              soc info structure pointer
+ * @clk_lvl:               vote level to start with
+ *
+ */
+int cam_top_tpg_enable_soc_resources(struct cam_hw_soc_info  *soc_info,
+	uint32_t clk_lvl);
+
+/**
+ * cam_top_tpg_disable_soc_resources()
+ *
+ * @brief:                 csid soc resource disable function
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_top_tpg_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_TOP_TPG_SOC_H_ */

+ 53 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_v1.c

@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include "cam_top_tpg_core.h"
+#include "cam_top_tpg_v1.h"
+#include "cam_top_tpg_dev.h"
+
+#define CAM_TOP_TPG_DRV_NAME                    "tpg_v1"
+#define CAM_TOP_TPG_VERSION_V1                  0x10000000
+
+static struct cam_top_tpg_hw_info cam_top_tpg_v1_hw_info = {
+	.tpg_reg = &cam_top_tpg_v1_reg_offset,
+	.hw_dts_version = CAM_TOP_TPG_VERSION_V1,
+	.csid_max_clk = 426400000,
+	.phy_max_clk = 384000000,
+};
+
+static const struct of_device_id cam_top_tpg_v1_dt_match[] = {
+	{
+		.compatible = "qcom,tpgv1",
+		.data = &cam_top_tpg_v1_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_top_tpg_v1_dt_match);
+
+static struct platform_driver cam_top_tpg_v1_driver = {
+	.probe = cam_top_tpg_probe,
+	.remove = cam_top_tpg_remove,
+	.driver = {
+		.name = CAM_TOP_TPG_DRV_NAME,
+		.of_match_table = cam_top_tpg_v1_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+int cam_top_tpg_v1_init_module(void)
+{
+	return platform_driver_register(&cam_top_tpg_v1_driver);
+}
+
+void cam_top_tpg_v1_exit_module(void)
+{
+	platform_driver_unregister(&cam_top_tpg_v1_driver);
+}
+
+MODULE_DESCRIPTION("CAM TOP TPG driver");
+MODULE_LICENSE("GPL v2");

+ 56 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/top_tpg/cam_top_tpg_v1.h

@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TOP_TPG_V1_H_
+#define _CAM_TOP_TPG_V1_H_
+
+#include "cam_top_tpg_core.h"
+
+static struct cam_top_tpg_reg_offset   cam_top_tpg_v1_reg_offset = {
+	.tpg_hw_version = 0x0,
+	.tpg_hw_status = 0x4,
+	.tpg_ctrl = 0x60,
+	.tpg_vc_cfg0 = 0x64,
+	.tpg_vc_cfg1 = 0x68,
+	.tpg_lfsr_seed = 0x6c,
+	.tpg_dt_0_cfg_0 = 0x70,
+	.tpg_dt_1_cfg_0 = 0x74,
+	.tpg_dt_2_cfg_0 = 0x78,
+	.tpg_dt_3_cfg_0 = 0x7C,
+	.tpg_dt_0_cfg_1 = 0x80,
+	.tpg_dt_1_cfg_1 = 0x84,
+	.tpg_dt_2_cfg_1 = 0x88,
+	.tpg_dt_3_cfg_1 = 0x8C,
+	.tpg_dt_0_cfg_2 = 0x90,
+	.tpg_dt_1_cfg_2 = 0x94,
+	.tpg_dt_2_cfg_2 = 0x98,
+	.tpg_dt_3_cfg_2 = 0x9C,
+	.tpg_color_bar_cfg = 0xA0,
+	.tpg_common_gen_cfg = 0xA4,
+	.tpg_vbi_cfg = 0xA8,
+	.tpg_test_bus_crtl = 0xF8,
+	.tpg_spare = 0xFC,
+	/* configurations */
+	.major_version = 1,
+	.minor_version = 0,
+	.version_incr = 0,
+	.tpg_en_shift_val = 0,
+	.tpg_phy_sel_shift_val = 3,
+	.tpg_num_active_lines_shift = 4,
+	.tpg_fe_pkt_en_shift = 2,
+	.tpg_fs_pkt_en_shift = 1,
+	.tpg_line_interleaving_mode_shift = 10,
+	.tpg_num_dts_shift_val = 8,
+	.tpg_v_blank_cnt_shift = 12,
+	.tpg_dt_encode_format_shift = 16,
+	.tpg_payload_mode_color = 0x8,
+	.tpg_split_en_shift = 5,
+	.top_mux_reg_offset = 0x1C,
+};
+
+int cam_top_tpg_v1_init_module(void);
+void cam_top_tpg_v1_exit_module(void);
+
+#endif /*_CAM_TOP_TPG_V1_H_ */

+ 17 - 0
drivers/camera_main.c

@@ -50,6 +50,10 @@
 
 #include "ope_dev_intf.h"
 
+#include "cam_top_tpg_v1.h"
+#include "cam_tfe_dev.h"
+#include "cam_tfe_csid530.h"
+
 struct camera_submodule_component {
 	int (*init)(void);
 	void (*exit)(void);
@@ -79,6 +83,14 @@ static const struct camera_submodule_component camera_isp[] = {
 #endif
 };
 
+static const struct camera_submodule_component camera_tfe[] = {
+#if IS_ENABLED(CONFIG_SPECTRA_TFE)
+	{&cam_top_tpg_v1_init_module, &cam_top_tpg_v1_exit_module},
+	{&cam_tfe_init_module, &cam_tfe_exit_module},
+	{&cam_tfe_csid530_init_module, &cam_tfe_csid530_exit_module},
+#endif
+};
+
 static const struct camera_submodule_component camera_sensor[] = {
 #ifdef CONFIG_SPECTRA_SENSOR
 	{&cam_res_mgr_init, &cam_res_mgr_exit},
@@ -151,6 +163,11 @@ static const struct camera_submodule submodule_table[] = {
 		.num_component = ARRAY_SIZE(camera_isp),
 		.component = camera_isp,
 	},
+	{
+		.name = "Camera TFE",
+		.num_component = ARRAY_SIZE(camera_tfe),
+		.component = camera_tfe,
+	},
 	{
 		.name = "Camera SENSOR",
 		.num_component = ARRAY_SIZE(camera_sensor),