Browse Source

msm: camera: cre: Add CRE driver changes

Add new camera driver Camera Reformat Engine (CRE).

CRs-Fixed: 2893978
Change-Id: Ia39b222493393bf7bb72525c86f0060ca0dc77c2
Signed-off-by: Vikram Sharma <[email protected]>
Vikram Sharma 4 years ago
parent
commit
23762a67ed
27 changed files with 7513 additions and 70 deletions
  1. 11 0
      Kbuild
  2. 287 0
      drivers/cam_cre/cam_cre_context.c
  3. 67 0
      drivers/cam_cre/cam_cre_context.h
  4. 232 0
      drivers/cam_cre/cam_cre_dev.c
  5. 43 0
      drivers/cam_cre/cam_cre_dev.h
  6. 2689 0
      drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.c
  7. 447 0
      drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.h
  8. 619 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.c
  9. 98 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.h
  10. 613 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.c
  11. 98 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.h
  12. 580 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.c
  13. 84 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.h
  14. 321 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev.c
  15. 135 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev_intf.h
  16. 340 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw.h
  17. 239 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw_100.h
  18. 83 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.c
  19. 33 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.h
  20. 49 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_intf.h
  21. 18 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_mgr_intf.h
  22. 315 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.c
  23. 46 0
      drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.h
  24. 1 0
      drivers/cam_utils/cam_debug_util.h
  25. 13 0
      drivers/camera_main.c
  26. 6 0
      drivers/camera_main.h
  27. 46 70
      include/uapi/camera/media/cam_cre.h

+ 11 - 0
Kbuild

@@ -241,6 +241,17 @@ camera-$(CONFIG_SPECTRA_OPE) += \
 	drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.o\
 	drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.o
 
+camera-$(CONFIG_SPECTRA_CRE) += \
+	drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.o \
+	drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.o \
+	drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev.o \
+	drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.o \
+	drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.o \
+	drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.o \
+	drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.o \
+	drivers/cam_cre/cam_cre_dev.o \
+	drivers/cam_cre/cam_cre_context.o
+
 camera-$(CONFIG_SPECTRA_TFE) += \
 	drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_core.o \
 	drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_dev.o \

+ 287 - 0
drivers/cam_cre/cam_cre_context.c

@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_trace.h"
+#include "cam_mem_mgr.h"
+#include "cam_cre_context.h"
+#include "cam_context_utils.h"
+#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+#include "cam_context.h"
+
+static const char cre_dev_name[] = "cam-cre";
+
+static int __cam_cre_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (!rc) {
+		ctx->state = CAM_CTX_READY;
+		trace_cam_context_state("CRE", ctx);
+	}
+
+	return rc;
+}
+
+
+static int __cam_cre_ctx_flush_dev_in_ready(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_CRE, "Failed to flush device");
+
+	return rc;
+}
+
+static int __cam_cre_ctx_dump_dev_in_ready(struct cam_context *ctx,
+	struct cam_dump_req_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_dump_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_CRE, "Failed to dump device");
+
+	return rc;
+}
+
+static int __cam_cre_ctx_config_dev_in_ready(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+	size_t len;
+	uintptr_t packet_addr;
+
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		&packet_addr, &len);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "[%s][%d] Can not get packet address",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+
+	if (rc)
+		CAM_ERR(CAM_CRE, "Failed to prepare device");
+
+	return rc;
+}
+
+static int __cam_cre_ctx_stop_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc)
+		CAM_ERR(CAM_CRE, "Failed to stop device");
+
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("CRE", ctx);
+	return rc;
+}
+
+static int __cam_cre_ctx_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_CRE, "Unable to release device %d", rc);
+
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_cre_ctx_release_dev_in_ready(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = __cam_cre_ctx_stop_dev_in_ready(ctx, NULL);
+	if (rc)
+		CAM_ERR(CAM_CRE, "Failed to stop device");
+
+	rc = __cam_cre_ctx_release_dev_in_acquired(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_CRE, "Failed to release device");
+
+	return rc;
+}
+
+static int __cam_cre_ctx_handle_buf_done_in_ready(void *ctx,
+	uint32_t evt_id, void *done)
+{
+	return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+static int cam_cre_context_dump_active_request(void *data,
+	struct cam_smmu_pf_info *pf_info)
+{
+
+	struct cam_context *ctx = (struct cam_context *)data;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp = NULL;
+	struct cam_hw_mgr_dump_pf_data  *pf_dbg_entry = NULL;
+	uint32_t  resource_type = 0;
+	int rc = 0;
+	int closest_port;
+	bool b_mem_found = false, b_ctx_found = false;
+
+
+	if (!ctx) {
+		CAM_ERR(CAM_CRE, "Invalid ctx");
+		return -EINVAL;
+	}
+
+	CAM_INFO(CAM_CRE, "iommu fault for cre ctx %d state %d",
+		ctx->ctx_id, ctx->state);
+
+	list_for_each_entry_safe(req, req_temp,
+			&ctx->active_req_list, list) {
+		pf_dbg_entry = &(req->pf_data);
+		closest_port = -1;
+		CAM_INFO(CAM_CRE, "req_id : %lld ", req->request_id);
+
+		rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+			&b_mem_found, &b_ctx_found, &resource_type, pf_info);
+		if (rc)
+			CAM_ERR(CAM_CRE, "Failed to dump pf info");
+
+		if (b_mem_found)
+			CAM_ERR(CAM_CRE, "Found page fault in req %lld %d",
+				req->request_id, rc);
+	}
+	return rc;
+}
+
+static int __cam_cre_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_CRE, "Unable to Acquire device %d", rc);
+	else
+		ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_cre_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = { },
+		.crm_ops = { },
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_cre_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = { },
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_cre_ctx_release_dev_in_acquired,
+			.start_dev = __cam_cre_start_dev_in_acquired,
+			.config_dev = __cam_cre_ctx_config_dev_in_ready,
+			.flush_dev = __cam_cre_ctx_flush_dev_in_ready,
+			.dump_dev = __cam_cre_ctx_dump_dev_in_ready,
+		},
+		.crm_ops = { },
+		.irq_ops = __cam_cre_ctx_handle_buf_done_in_ready,
+		.pagefault_ops = cam_cre_context_dump_active_request,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_cre_ctx_stop_dev_in_ready,
+			.release_dev = __cam_cre_ctx_release_dev_in_ready,
+			.config_dev = __cam_cre_ctx_config_dev_in_ready,
+			.flush_dev = __cam_cre_ctx_flush_dev_in_ready,
+			.dump_dev = __cam_cre_ctx_dump_dev_in_ready,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_cre_ctx_handle_buf_done_in_ready,
+		.pagefault_ops = cam_cre_context_dump_active_request,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+		.pagefault_ops = cam_cre_context_dump_active_request,
+	},
+};
+
+int cam_cre_context_init(struct cam_cre_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id)
+{
+	int rc;
+	int i;
+
+	if (!ctx || !ctx_base) {
+		CAM_ERR(CAM_CRE, "Invalid Context");
+		rc = -EFAULT;
+		goto err;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->base = ctx_base;
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++)
+		ctx->req_base[i].req_priv = ctx;
+
+	rc = cam_context_init(ctx_base, cre_dev_name, CAM_CRE, ctx_id,
+		NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "Camera Context Base init failed");
+		goto err;
+	}
+
+	ctx_base->state_machine = cam_cre_ctx_state_machine;
+	ctx_base->ctx_priv = ctx;
+
+err:
+	return rc;
+}
+
+int cam_cre_context_deinit(struct cam_cre_context *ctx)
+{
+	if (!ctx || !ctx->base) {
+		CAM_ERR(CAM_CRE, "Invalid params: %pK", ctx);
+		return -EINVAL;
+	}
+
+	cam_context_deinit(ctx->base);
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}

+ 67 - 0
drivers/cam_cre/cam_cre_context.h

@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CRE_CONTEXT_H_
+#define _CAM_CRE_CONTEXT_H_
+
+#include <media/cam_cre.h>
+
+#include "cam_context.h"
+#include "cam_cre_hw_mgr_intf.h"
+
+#define CAM_CRE_HW_EVENT_MAX 20
+
+/**
+ * struct cam_cre_context - CRE context
+ * @base: Base cre cam context object
+ * @req_base: Common request structure
+ */
+struct cam_cre_context {
+	struct cam_context *base;
+	struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+};
+
+/* cam cre context irq handling function type */
+typedef int (*cam_cre_hw_event_cb_func)(
+	struct cam_cre_context *ctx_cre,
+	void *evt_data);
+
+/**
+ * struct cam_cre_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops: Array of handle function pointers.
+ *
+ */
+struct cam_cre_ctx_irq_ops {
+	cam_cre_hw_event_cb_func irq_ops[CAM_CRE_HW_EVENT_MAX];
+};
+
+/**
+ * cam_cre_context_init()
+ *
+ * @brief: Initialization function for the CRE context
+ *
+ * @ctx: CRE context obj to be initialized
+ * @ctx_base: Context base from cam_context
+ * @hw_intf: CRE hw manager interface
+ * @ctx_id: ID for this context
+ *
+ */
+int cam_cre_context_init(struct cam_cre_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id);
+
+/**
+ * cam_cre_context_deinit()
+ *
+ * @brief: Deinitialize function for the CRE context
+ *
+ * @ctx: CRE context obj to be deinitialized
+ *
+ */
+int cam_cre_context_deinit(struct cam_cre_context *ctx);
+
+#endif  /* __CAM_CRE_CONTEXT_H__ */

+ 232 - 0
drivers/cam_cre/cam_cre_dev.c

@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_node.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_cre_hw_mgr.h"
+#include "cam_cre_dev.h"
+#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
+#include "camera_main.h"
+
+#define CAM_CRE_DEV_NAME "cam-cre"
+
+struct cam_cre_subdev {
+	struct cam_subdev sd;
+	struct cam_node *node;
+	struct cam_context ctx[CRE_CTX_MAX];
+	struct cam_cre_context ctx_cre[CRE_CTX_MAX];
+	struct mutex cre_lock;
+	int32_t open_cnt;
+	int32_t reserved;
+};
+static struct cam_cre_subdev g_cre_dev;
+
+static void cam_cre_dev_iommu_fault_handler(
+	struct cam_smmu_pf_info *pf_info)
+{
+	int i = 0;
+	struct cam_node *node = NULL;
+
+	if (!pf_info || !pf_info->token) {
+		CAM_ERR(CAM_ISP, "invalid token in page handler cb");
+		return;
+	}
+
+	node = (struct cam_node *)pf_info->token;
+
+	for (i = 0; i < node->ctx_size; i++)
+		cam_context_dump_pf_info(&(node->ctx_list[i]), pf_info);
+}
+
+static int cam_cre_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+
+	mutex_lock(&g_cre_dev.cre_lock);
+	g_cre_dev.open_cnt++;
+	mutex_unlock(&g_cre_dev.cre_lock);
+
+	return 0;
+}
+
+static int cam_cre_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+
+	mutex_lock(&g_cre_dev.cre_lock);
+	if (g_cre_dev.open_cnt <= 0) {
+		CAM_DBG(CAM_CRE, "CRE subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	g_cre_dev.open_cnt--;
+
+	if (!node) {
+		CAM_ERR(CAM_CRE, "Node ptr is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (g_cre_dev.open_cnt == 0)
+		cam_node_shutdown(node);
+
+end:
+	mutex_unlock(&g_cre_dev.cre_lock);
+	return rc;
+}
+
+static const struct v4l2_subdev_internal_ops cam_cre_subdev_internal_ops = {
+	.close = cam_cre_subdev_close,
+	.open = cam_cre_subdev_open,
+};
+
+static int cam_cre_subdev_component_bind(struct device *dev,
+	struct device *master_dev, void *data)
+{
+	int rc;
+	int i;
+	struct cam_hw_mgr_intf hw_mgr_intf;
+	struct cam_node *node;
+	int iommu_hdl = -1;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	g_cre_dev.sd.internal_ops = &cam_cre_subdev_internal_ops;
+	rc = cam_subdev_probe(&g_cre_dev.sd, pdev, CAM_CRE_DEV_NAME,
+		CAM_CRE_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "CRE cam_subdev_probe failed %d", rc);
+		goto err;
+	}
+	node = (struct cam_node *)g_cre_dev.sd.token;
+
+	rc = cam_cre_hw_mgr_init(pdev->dev.of_node,
+		(uint64_t *)&hw_mgr_intf, &iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "Can not initialize CRE HWmanager %d", rc);
+		goto unregister;
+	}
+
+	for (i = 0; i < CAM_CRE_CTX_MAX; i++) {
+		rc = cam_cre_context_init(&g_cre_dev.ctx_cre[i],
+			&g_cre_dev.ctx[i],
+			&node->hw_mgr_intf,
+			i);
+		if (rc) {
+			CAM_ERR(CAM_CRE, "CRE context init failed %d %d",
+				i, rc);
+			goto ctx_init_fail;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_cre_dev.ctx, CAM_CRE_CTX_MAX,
+		CAM_CRE_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "CRE node init failed %d", rc);
+		goto ctx_init_fail;
+	}
+
+	cam_smmu_set_client_page_fault_handler(iommu_hdl,
+		cam_cre_dev_iommu_fault_handler, node);
+
+	mutex_init(&g_cre_dev.cre_lock);
+
+	CAM_DBG(CAM_CRE, "Component bound successfully");
+
+	return rc;
+
+ctx_init_fail:
+	for (--i; i >= 0; i--)
+		if (cam_cre_context_deinit(&g_cre_dev.ctx_cre[i]))
+			CAM_ERR(CAM_CRE, "deinit fail %d %d", i, rc);
+unregister:
+	if (cam_subdev_remove(&g_cre_dev.sd))
+		CAM_ERR(CAM_CRE, "remove fail %d", rc);
+err:
+	return rc;
+}
+
+static void cam_cre_subdev_component_unbind(struct device *dev,
+	struct device *master_dev, void *data)
+{
+	int rc;
+	int i;
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_cre_context_deinit(&g_cre_dev.ctx_cre[i]);
+		if (rc)
+			CAM_ERR(CAM_CRE, "CRE context %d deinit failed %d",
+				i, rc);
+	}
+
+	rc = cam_subdev_remove(&g_cre_dev.sd);
+	if (rc)
+		CAM_ERR(CAM_CRE, "Unregister failed %d", rc);
+}
+
+const static struct component_ops cam_cre_subdev_component_ops = {
+	.bind = cam_cre_subdev_component_bind,
+	.unbind = cam_cre_subdev_component_unbind,
+};
+
+static int cam_cre_subdev_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &cam_cre_subdev_component_ops);
+	return 0;
+}
+
+static int cam_cre_subdev_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_CRE, "Adding CRE component");
+	rc = component_add(&pdev->dev, &cam_cre_subdev_component_ops);
+	if (rc)
+		CAM_ERR(CAM_CRE, "failed to add component rc: %d", rc);
+
+	return rc;
+
+}
+
+static const struct of_device_id cam_cre_subdev_dt_match[] = {
+	{
+		.compatible = "qcom,cam-cre",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_cre_subdev_dt_match);
+
+struct platform_driver cam_cre_subdev_driver = {
+	.probe = cam_cre_subdev_probe,
+	.remove = cam_cre_subdev_remove,
+	.driver = {
+		.name = "cam_cre",
+		.of_match_table = cam_cre_subdev_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+int cam_cre_subdev_init_module(void)
+{
+	return platform_driver_register(&cam_cre_driver);
+}
+
+void cam_cre_subdev_exit_module(void)
+{
+	platform_driver_unregister(&cam_cre_driver);
+}
+
+MODULE_DESCRIPTION("MSM CRE driver");
+MODULE_LICENSE("GPL v2");

+ 43 - 0
drivers/cam_cre/cam_cre_dev.h

@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CRE_DEV_H_
+#define _CAM_CRE_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_cre_context.h"
+
+/**
+ * struct cam_cre_dev - Camera CRE V4l2 device node
+ *
+ * @sd: Commone camera subdevice node
+ * @node: Pointer to cre subdevice
+ * @ctx: CRE base context storage
+ * @ctx_cre: CRE private context storage
+ * @cre_mutex: CRE dev mutex
+ * @open_cnt: Open device count
+ */
+struct cam_cre_dev {
+	struct cam_subdev sd;
+	struct cam_node *node;
+	struct cam_context ctx[CAM_CRE_CTX_MAX];
+	struct cam_cre_context ctx_cre[CAM_CRE_CTX_MAX];
+	struct mutex cre_mutex;
+	int32_t open_cnt;
+};
+
+/**
+ * @brief : API to register CRE dev to platform framework.
+ * @return struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+int cam_cre_dev_init_module(void);
+
+/**
+ * @brief : API to remove CRE dev from platform framework.
+ */
+void cam_cre_dev_exit_module(void);
+#endif /* __CAM_CRE_DEV_H__ */

+ 2689 - 0
drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.c

@@ -0,0 +1,2689 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_cre.h>
+#include <media/cam_cpas.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_cre_hw_mgr_intf.h"
+#include "cam_cre_hw_mgr.h"
+#include "cre_hw.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_debug_util.h"
+#include "cam_soc_util.h"
+#include "cam_cpas_api.h"
+#include "cam_common_util.h"
+#include "cre_dev_intf.h"
+#include "cam_compat.h"
+
+#define CAM_CRE_FE_IRQ 0x4
+#define CAM_CRE_WE_IRQ 0x2
+
+static struct cam_cre_hw_mgr *cre_hw_mgr;
+
+static struct cam_cre_io_buf_info *cam_cre_mgr_get_rsc(
+	struct cam_cre_ctx *ctx_data,
+	struct cam_buf_io_cfg *in_io_buf)
+{
+	int k = 0;
+
+	if (in_io_buf->direction == CAM_BUF_INPUT) {
+		for (k = 0; k < CRE_MAX_IN_RES; k++) {
+			if (ctx_data->cre_acquire.in_res[k].res_id ==
+				in_io_buf->resource_type)
+				return &ctx_data->cre_acquire.in_res[k];
+		}
+		if (k == CRE_MAX_IN_RES) {
+			CAM_ERR(CAM_CRE, "Invalid res_id %d",
+				in_io_buf->resource_type);
+			goto end;
+		}
+	} else if (in_io_buf->direction == CAM_BUF_OUTPUT) {
+		for (k = 0; k < CRE_MAX_OUT_RES; k++) {
+			if (ctx_data->cre_acquire.out_res[k].res_id ==
+				in_io_buf->resource_type)
+				return &ctx_data->cre_acquire.out_res[k];
+		}
+		if (k == CRE_MAX_OUT_RES) {
+			CAM_ERR(CAM_CRE, "Invalid res_id %d",
+				in_io_buf->resource_type);
+			goto end;
+		}
+	}
+
+end:
+	return NULL;
+}
+
+static int cam_cre_mgr_update_reg_set(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_cre_request *cre_req)
+{
+	struct cam_cre_dev_reg_set_update reg_set_upd_cmd;
+	int i;
+
+	reg_set_upd_cmd.cre_reg_buf = cre_req->cre_reg_buf;
+
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv,
+			CRE_HW_REG_SET_UPDATE,
+			&reg_set_upd_cmd, sizeof(reg_set_upd_cmd));
+	}
+
+	return 0;
+}
+
+static void cam_cre_free_io_config(struct cam_cre_request *req)
+{
+	int i, j;
+
+	for (i = 0; i < CRE_MAX_BATCH_SIZE; i++) {
+		for (j = 0; j < CRE_MAX_IO_BUFS; j++) {
+			if (req->io_buf[i][j]) {
+				cam_free_clear(req->io_buf[i][j]);
+				req->io_buf[i][j] = NULL;
+			}
+		}
+	}
+}
+
+static int cam_cre_mgr_process_cmd_io_buf_req(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_packet *packet, struct cam_cre_ctx *ctx_data,
+	uint32_t req_idx)
+{
+	int rc = 0;
+	int i, j, k;
+	dma_addr_t iova_addr;
+
+	size_t len;
+	struct cam_cre_request *cre_request;
+	struct cre_io_buf *io_buf;
+	struct plane_info *plane_info;
+
+	uint32_t alignment;
+	bool     is_secure;
+	struct   cam_buf_io_cfg *io_cfg_ptr = NULL;
+	struct   cam_cre_io_buf_info *acq_io_buf;
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+			packet->io_configs_offset / 4);
+
+	cre_request = ctx_data->req_list[req_idx];
+	cre_request->num_batch = ctx_data->cre_acquire.batch_size;
+
+	for (i = 0; i < cre_request->num_batch; i++) {
+		for (j = 0; j < packet->num_io_configs; j++) {
+			cre_request->num_io_bufs[i]++;
+			acq_io_buf = cam_cre_mgr_get_rsc(ctx_data, &io_cfg_ptr[i]);
+
+			cre_request->io_buf[i][j] =
+				kzalloc(sizeof(struct cre_io_buf), GFP_KERNEL);
+			if (!cre_request->io_buf[i][j]) {
+				CAM_ERR(CAM_CRE,
+					"IO config allocation failure");
+				cam_cre_free_io_config(cre_request);
+				return -ENOMEM;
+			}
+
+			io_buf = cre_request->io_buf[i][j];
+			io_buf->num_planes = acq_io_buf->num_planes;
+			io_buf->resource_type = acq_io_buf->res_id;
+			io_buf->direction = acq_io_buf->direction;
+			io_buf->fence = acq_io_buf->fence;
+			io_buf->format = acq_io_buf->format;
+			alignment = acq_io_buf->alignment;
+
+			for (k = 0; k < io_buf->num_planes; k++) {
+				is_secure = cam_mem_is_secure_buf(
+					io_cfg_ptr[i].mem_handle[k]);
+				if (is_secure)
+					rc = cam_mem_get_io_buf(
+						io_cfg_ptr[i].mem_handle[k],
+						hw_mgr->iommu_sec_hdl,
+						&iova_addr, &len);
+				else
+					rc = cam_mem_get_io_buf(
+						io_cfg_ptr[i].mem_handle[k],
+						hw_mgr->iommu_hdl,
+						&iova_addr, &len);
+
+				if (rc) {
+					CAM_ERR(CAM_CRE, "get buf failed: %d",
+						rc);
+					return -EINVAL;
+				}
+				iova_addr += io_cfg_ptr[i].offsets[k];
+				plane_info = &io_buf->p_info[k];
+
+				plane_info->offset    = io_cfg_ptr[i].offsets[k];
+				plane_info->format    = io_buf->format;
+				plane_info->iova_addr = iova_addr;
+				plane_info->width     =
+					io_cfg_ptr[i].planes[k].width;
+				plane_info->height    =
+					io_cfg_ptr[i].planes[k].height;
+				plane_info->stride    =
+					io_cfg_ptr[i].planes[k].plane_stride;
+				plane_info->len       = len;
+				plane_info->alignment = alignment;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static void cam_cre_device_timer_reset(struct cam_cre_hw_mgr *hw_mgr)
+{
+
+	if (hw_mgr->clk_info.watch_dog) {
+		CAM_DBG(CAM_CRE, "reset timer");
+		crm_timer_reset(hw_mgr->clk_info.watch_dog);
+			hw_mgr->clk_info.watch_dog_reset_counter++;
+	}
+}
+
+static int cam_cre_mgr_reset_hw(void)
+{
+	struct cam_cre_hw_mgr *hw_mgr = cre_hw_mgr;
+	int i, rc = 0;
+
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_RESET,
+			NULL, 0);
+		if (rc) {
+			CAM_ERR(CAM_CRE, "CRE Reset failed: %d", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+
+static void cam_cre_ctx_wait_for_idle_irq(struct cam_cre_ctx *ctx,
+		struct cam_cre_request *cre_req, uint32_t cookie)
+{
+	int rc;
+
+	if (ctx->ctx_state != CRE_CTX_STATE_ACQUIRED) {
+		CAM_ERR(CAM_CRE, "ctx %u is in %d state",
+			ctx->ctx_id, ctx->ctx_state);
+		mutex_unlock(&ctx->ctx_mutex);
+		return;
+	}
+
+	rc = wait_for_completion_timeout(
+		&ctx->cre_top->idle_done, msecs_to_jiffies(30000));
+	if (!rc) {
+		cam_cre_device_timer_reset(cre_hw_mgr);
+	} else {
+		CAM_INFO(CAM_CRE, "After reset of CRE, reapply req");
+		rc = cam_cre_mgr_reset_hw();
+	}
+
+	if (!test_bit(cookie, ctx->bitmap)) {
+		CAM_ERR(CAM_CRE, "Req not present reqIdx = %d for ctx_id = %d",
+			cookie, ctx->ctx_id);
+		goto end;
+	}
+	CAM_DBG(CAM_REQ, "req_id= %llu ctx_id= %d lcb=%llu",
+		cre_req->request_id, ctx->ctx_id);
+
+	ctx->req_cnt--;
+
+	cre_req->request_id = 0;
+	cam_cre_free_io_config(ctx->req_list[cookie]);
+	cam_free_clear((void *)ctx->req_list[cookie]);
+	ctx->req_list[cookie] = NULL;
+	clear_bit(cookie, ctx->bitmap);
+end:
+	mutex_unlock(&ctx->ctx_mutex);
+}
+
+
+static int cam_cre_mgr_create_cre_reg_buf(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_packet *packet,
+	struct cam_hw_prepare_update_args *prepare_args,
+	struct cam_cre_ctx *ctx_data, uint32_t req_idx)
+{
+	int i, rc = 0;
+	struct cam_cre_dev_prepare_req prepare_req;
+
+	prepare_req.ctx_data = ctx_data;
+	prepare_req.hw_mgr = hw_mgr;
+	prepare_req.packet = packet;
+	prepare_req.prepare_args = prepare_args;
+	prepare_req.req_idx = req_idx;
+
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv,
+			CRE_HW_PREPARE, &prepare_req, sizeof(prepare_req));
+		if (rc) {
+			CAM_ERR(CAM_CRE, "CRE Dev prepare failed: %d", rc);
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int cam_cre_mgr_calculate_num_path(
+	struct cam_cre_clk_bw_req_internal_v2 *clk_info,
+	struct cam_cre_ctx *ctx_data)
+{
+	int i, path_index = 0;
+
+	for (i = 0; i < CAM_CRE_MAX_PER_PATH_VOTES; i++) {
+		if ((clk_info->axi_path[i].path_data_type <
+			CAM_AXI_PATH_DATA_CRE_START_OFFSET) ||
+			(clk_info->axi_path[i].path_data_type >
+			CAM_AXI_PATH_DATA_CRE_MAX_OFFSET) ||
+			((clk_info->axi_path[i].path_data_type -
+			CAM_AXI_PATH_DATA_CRE_START_OFFSET) >=
+			CAM_CRE_MAX_PER_PATH_VOTES)) {
+			CAM_DBG(CAM_CRE,
+				"Invalid path %d, start offset=%d, max=%d",
+				ctx_data->clk_info.axi_path[i].path_data_type,
+				CAM_AXI_PATH_DATA_CRE_START_OFFSET,
+				CAM_CRE_MAX_PER_PATH_VOTES);
+			continue;
+		}
+
+		path_index = clk_info->axi_path[i].path_data_type -
+			CAM_AXI_PATH_DATA_CRE_START_OFFSET;
+
+		CAM_DBG(CAM_CRE,
+			"clk_info: i[%d]: [%s %s] bw [%lld %lld] num_path: %d",
+			i,
+			cam_cpas_axi_util_trans_type_to_string(
+			clk_info->axi_path[i].transac_type),
+			cam_cpas_axi_util_path_type_to_string(
+			clk_info->axi_path[i].path_data_type),
+			clk_info->axi_path[i].camnoc_bw,
+			clk_info->axi_path[i].mnoc_ab_bw,
+			clk_info->num_paths);
+	}
+	return 0;
+}
+
+static int cam_cre_update_cpas_vote(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_cre_ctx *ctx_data)
+{
+	int i = 0;
+	struct cam_cre_clk_info *clk_info;
+	struct cam_cre_dev_bw_update bw_update = {{0}, {0}, 0, 0};
+
+	clk_info = &hw_mgr->clk_info;
+
+	bw_update.ahb_vote.type = CAM_VOTE_DYNAMIC;
+	bw_update.ahb_vote.vote.freq = 0;
+	bw_update.ahb_vote_valid = false;
+
+	bw_update.axi_vote.num_paths = clk_info->num_paths;
+	memcpy(&bw_update.axi_vote.axi_path[0],
+		&clk_info->axi_path[0],
+		bw_update.axi_vote.num_paths *
+		sizeof(struct cam_axi_per_path_bw_vote));
+
+	bw_update.axi_vote_valid = true;
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv,
+			CRE_HW_BW_UPDATE,
+			&bw_update, sizeof(bw_update));
+	}
+	return 0;
+}
+
+static int cam_cre_mgr_remove_bw(struct cam_cre_hw_mgr *hw_mgr, int ctx_id)
+{
+	int path_index, i, rc = 0;
+	struct cam_cre_ctx *ctx_data = NULL;
+	struct cam_cre_clk_info *hw_mgr_clk_info;
+
+	ctx_data = &hw_mgr->ctx[ctx_id];
+	hw_mgr_clk_info = &hw_mgr->clk_info;
+	for (i = 0; i < ctx_data->clk_info.num_paths; i++) {
+		path_index =
+		ctx_data->clk_info.axi_path[i].path_data_type -
+			CAM_AXI_PATH_DATA_CRE_START_OFFSET;
+		if (path_index >= CAM_CRE_MAX_PER_PATH_VOTES) {
+			CAM_WARN(CAM_CRE,
+				"Invalid path %d, start offset=%d, max=%d",
+				ctx_data->clk_info.axi_path[i].path_data_type,
+				CAM_AXI_PATH_DATA_CRE_START_OFFSET,
+				CAM_CRE_MAX_PER_PATH_VOTES);
+			continue;
+		}
+
+		hw_mgr_clk_info->axi_path[path_index].camnoc_bw -=
+			ctx_data->clk_info.axi_path[i].camnoc_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw -=
+			ctx_data->clk_info.axi_path[i].mnoc_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw -=
+			ctx_data->clk_info.axi_path[i].mnoc_ib_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw -=
+			ctx_data->clk_info.axi_path[i].ddr_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw -=
+			ctx_data->clk_info.axi_path[i].ddr_ib_bw;
+	}
+
+	rc = cam_cre_update_cpas_vote(hw_mgr, ctx_data);
+
+	return rc;
+}
+
+static bool cam_cre_update_bw_v2(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_cre_ctx *ctx_data,
+	struct cam_cre_clk_info *hw_mgr_clk_info,
+	struct cam_cre_clk_bw_req_internal_v2 *clk_info,
+	bool busy)
+{
+	int i, path_index;
+	bool update_required = true;
+
+	for (i = 0; i < clk_info->num_paths; i++)
+		CAM_DBG(CAM_CRE, "clk_info camnoc = %lld busy = %d",
+			clk_info->axi_path[i].camnoc_bw, busy);
+
+	if (clk_info->num_paths == ctx_data->clk_info.num_paths) {
+		update_required = false;
+		for (i = 0; i < clk_info->num_paths; i++) {
+			if ((clk_info->axi_path[i].transac_type ==
+			ctx_data->clk_info.axi_path[i].transac_type) &&
+				(clk_info->axi_path[i].path_data_type ==
+			ctx_data->clk_info.axi_path[i].path_data_type) &&
+				(clk_info->axi_path[i].camnoc_bw ==
+			ctx_data->clk_info.axi_path[i].camnoc_bw) &&
+				(clk_info->axi_path[i].mnoc_ab_bw ==
+			ctx_data->clk_info.axi_path[i].mnoc_ab_bw)) {
+				continue;
+			} else {
+				update_required = true;
+				break;
+			}
+		}
+	}
+	if (!update_required) {
+		CAM_DBG(CAM_CRE,
+		"Incoming BW hasn't changed, no update required");
+		return false;
+	}
+
+	/*
+	 * Remove previous vote of this context from hw mgr first.
+	 * hw_mgr_clk_info has all valid paths, with each path in its own index
+	 */
+	for (i = 0; i < ctx_data->clk_info.num_paths; i++) {
+		path_index =
+		ctx_data->clk_info.axi_path[i].path_data_type -
+			CAM_AXI_PATH_DATA_CRE_START_OFFSET;
+
+		if (path_index >= CAM_CRE_MAX_PER_PATH_VOTES) {
+			CAM_WARN(CAM_CRE,
+				"Invalid path %d, start offset=%d, max=%d",
+				ctx_data->clk_info.axi_path[i].path_data_type,
+				CAM_AXI_PATH_DATA_CRE_START_OFFSET,
+				CAM_CRE_MAX_PER_PATH_VOTES);
+			continue;
+		}
+
+		hw_mgr_clk_info->axi_path[path_index].camnoc_bw -=
+			ctx_data->clk_info.axi_path[i].camnoc_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw -=
+			ctx_data->clk_info.axi_path[i].mnoc_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw -=
+			ctx_data->clk_info.axi_path[i].mnoc_ib_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw -=
+			ctx_data->clk_info.axi_path[i].ddr_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw -=
+			ctx_data->clk_info.axi_path[i].ddr_ib_bw;
+	}
+
+	ctx_data->clk_info.num_paths =
+		cam_cre_mgr_calculate_num_path(clk_info, ctx_data);
+
+	memcpy(&ctx_data->clk_info.axi_path[0],
+		&clk_info->axi_path[0],
+		clk_info->num_paths * sizeof(struct cam_axi_per_path_bw_vote));
+
+	/*
+	 * Add new vote of this context in hw mgr.
+	 * hw_mgr_clk_info has all paths, with each path in its own index
+	 */
+	for (i = 0; i < ctx_data->clk_info.num_paths; i++) {
+		path_index =
+		ctx_data->clk_info.axi_path[i].path_data_type -
+			CAM_AXI_PATH_DATA_CRE_START_OFFSET;
+
+		if (path_index >= CAM_CRE_MAX_PER_PATH_VOTES) {
+			CAM_WARN(CAM_CRE,
+				"Invalid path %d, start offset=%d, max=%d",
+				ctx_data->clk_info.axi_path[i].path_data_type,
+				CAM_AXI_PATH_DATA_CRE_START_OFFSET,
+				CAM_CRE_MAX_PER_PATH_VOTES);
+			continue;
+		}
+
+		hw_mgr_clk_info->axi_path[path_index].path_data_type =
+			ctx_data->clk_info.axi_path[i].path_data_type;
+		hw_mgr_clk_info->axi_path[path_index].transac_type =
+			ctx_data->clk_info.axi_path[i].transac_type;
+		hw_mgr_clk_info->axi_path[path_index].camnoc_bw +=
+			ctx_data->clk_info.axi_path[i].camnoc_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw +=
+			ctx_data->clk_info.axi_path[i].mnoc_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw +=
+			ctx_data->clk_info.axi_path[i].mnoc_ib_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw +=
+			ctx_data->clk_info.axi_path[i].ddr_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw +=
+			ctx_data->clk_info.axi_path[i].ddr_ib_bw;
+		CAM_DBG(CAM_CRE,
+			"Consolidate Path Vote : Dev[%s] i[%d] path_idx[%d] : [%s %s] [%lld %lld]",
+			ctx_data->cre_acquire.dev_name,
+			i, path_index,
+			cam_cpas_axi_util_trans_type_to_string(
+			hw_mgr_clk_info->axi_path[path_index].transac_type),
+			cam_cpas_axi_util_path_type_to_string(
+			hw_mgr_clk_info->axi_path[path_index].path_data_type),
+			hw_mgr_clk_info->axi_path[path_index].camnoc_bw,
+			hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw);
+	}
+
+	if (hw_mgr_clk_info->num_paths < ctx_data->clk_info.num_paths)
+		hw_mgr_clk_info->num_paths = ctx_data->clk_info.num_paths;
+
+	return true;
+}
+
+static bool cam_cre_check_bw_update(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_cre_ctx *ctx_data, int idx)
+{
+	bool busy = false, bw_updated = false;
+	int i;
+	struct cam_cre_clk_bw_req_internal_v2 *clk_info_v2;
+	struct cam_cre_clk_info *hw_mgr_clk_info;
+	uint64_t req_id;
+
+	hw_mgr_clk_info = &hw_mgr->clk_info;
+	req_id = ctx_data->req_list[idx]->request_id;
+	if (ctx_data->req_cnt > 1)
+		busy = true;
+
+	clk_info_v2 = &ctx_data->req_list[idx]->clk_info_v2;
+
+	bw_updated = cam_cre_update_bw_v2(hw_mgr, ctx_data,
+		hw_mgr_clk_info, clk_info_v2, busy);
+	for (i = 0; i < hw_mgr_clk_info->num_paths; i++) {
+		CAM_DBG(CAM_CRE,
+			"Final path_type: %s, transac_type: %s, camnoc_bw = %lld mnoc_ab_bw = %lld, mnoc_ib_bw = %lld, device: %s",
+			cam_cpas_axi_util_path_type_to_string(
+			hw_mgr_clk_info->axi_path[i].path_data_type),
+			cam_cpas_axi_util_trans_type_to_string(
+			hw_mgr_clk_info->axi_path[i].transac_type),
+			hw_mgr_clk_info->axi_path[i].camnoc_bw,
+			hw_mgr_clk_info->axi_path[i].mnoc_ab_bw,
+			hw_mgr_clk_info->axi_path[i].mnoc_ib_bw,
+			ctx_data->cre_acquire.dev_name);
+	}
+
+	return bw_updated;
+}
+
+static int cam_cre_mgr_handle_config_err(
+	struct cam_hw_config_args *config_args,
+	struct cam_cre_ctx *ctx_data)
+{
+	struct cam_hw_done_event_data buf_data;
+	struct cam_cre_request *cre_req;
+	uint32_t req_idx;
+
+	cre_req = config_args->priv;
+
+	buf_data.request_id = cre_req->request_id;
+	buf_data.evt_param = CAM_SYNC_CRE_EVENT_CONFIG_ERR;
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_CTX_EVT_ID_ERROR,
+		&buf_data);
+
+	req_idx = cre_req->req_idx;
+	cre_req->request_id = 0;
+	cam_cre_free_io_config(ctx_data->req_list[req_idx]);
+	cam_free_clear(ctx_data->req_list[req_idx]);
+	ctx_data->req_list[req_idx] = NULL;
+	clear_bit(req_idx, ctx_data->bitmap);
+	return 0;
+}
+
+static bool cam_cre_is_pending_request(struct cam_cre_ctx *ctx_data)
+{
+	return !bitmap_empty(ctx_data->bitmap, CAM_CTX_REQ_MAX);
+}
+
+static int cam_cre_supported_clk_rates(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_cre_ctx *ctx_data)
+{
+	int i;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_hw_info *dev = NULL;
+
+	dev_intf = hw_mgr->cre_dev_intf[0];
+	if (!dev_intf) {
+		CAM_ERR(CAM_CRE, "dev_intf is invalid");
+		return -EINVAL;
+	}
+
+	dev = (struct cam_hw_info *)dev_intf->hw_priv;
+	soc_info = &dev->soc_info;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++) {
+		ctx_data->clk_info.clk_rate[i] =
+			soc_info->clk_rate[i][soc_info->src_clk_idx];
+		CAM_DBG(CAM_CRE, "clk_info[%d] = %d",
+			i, ctx_data->clk_info.clk_rate[i]);
+	}
+
+	return 0;
+}
+
+static int cam_cre_ctx_clk_info_init(struct cam_cre_ctx *ctx_data)
+{
+	int i;
+
+	ctx_data->clk_info.curr_fc = 0;
+	ctx_data->clk_info.base_clk = 0;
+
+	for (i = 0; i < CAM_CRE_MAX_PER_PATH_VOTES; i++) {
+		ctx_data->clk_info.axi_path[i].camnoc_bw = 0;
+		ctx_data->clk_info.axi_path[i].mnoc_ab_bw = 0;
+		ctx_data->clk_info.axi_path[i].mnoc_ib_bw = 0;
+	}
+
+	cam_cre_supported_clk_rates(cre_hw_mgr, ctx_data);
+
+	return 0;
+}
+
+static int32_t cam_cre_deinit_idle_clk(void *priv, void *data)
+{
+	struct cam_cre_hw_mgr *hw_mgr = (struct cam_cre_hw_mgr *)priv;
+	struct cre_clk_work_data *task_data = (struct cre_clk_work_data *)data;
+	struct cam_cre_clk_info *clk_info =
+		(struct cam_cre_clk_info *)task_data->data;
+	uint32_t id;
+	uint32_t i;
+	struct cam_cre_ctx *ctx_data;
+	struct cam_hw_intf *dev_intf = NULL;
+	int rc = 0;
+	bool busy = false;
+
+	clk_info->base_clk = 0;
+	clk_info->curr_clk = 0;
+	clk_info->over_clked = 0;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	for (i = 0; i < CRE_CTX_MAX; i++) {
+		ctx_data = &hw_mgr->ctx[i];
+		mutex_lock(&ctx_data->ctx_mutex);
+		if (ctx_data->ctx_state == CRE_CTX_STATE_ACQUIRED) {
+			busy = cam_cre_is_pending_request(ctx_data);
+			if (busy) {
+				mutex_unlock(&ctx_data->ctx_mutex);
+				break;
+			}
+			cam_cre_ctx_clk_info_init(ctx_data);
+		}
+		mutex_unlock(&ctx_data->ctx_mutex);
+	}
+
+	if (busy) {
+		cam_cre_device_timer_reset(hw_mgr);
+		rc = -EBUSY;
+		goto done;
+	}
+
+	dev_intf = hw_mgr->cre_dev_intf[0];
+	id = CRE_HW_CLK_DISABLE;
+
+	CAM_DBG(CAM_CRE, "Disable %d", clk_info->hw_type);
+
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,	NULL, 0);
+
+done:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static void cam_cre_device_timer_cb(struct timer_list *timer_data)
+{
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct cre_clk_work_data *task_data;
+	struct cam_req_mgr_timer *timer =
+		container_of(timer_data, struct cam_req_mgr_timer, sys_timer);
+
+	spin_lock_irqsave(&cre_hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(cre_hw_mgr->timer_work);
+	if (!task) {
+		CAM_ERR(CAM_CRE, "no empty task");
+		spin_unlock_irqrestore(&cre_hw_mgr->hw_mgr_lock, flags);
+		return;
+	}
+
+	task_data = (struct cre_clk_work_data *)task->payload;
+	task_data->data = timer->parent;
+	task_data->type = CRE_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_cre_deinit_idle_clk;
+	cam_req_mgr_workq_enqueue_task(task, cre_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&cre_hw_mgr->hw_mgr_lock, flags);
+}
+
+static int cam_cre_device_timer_start(struct cam_cre_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < CLK_HW_MAX; i++)  {
+		if (!hw_mgr->clk_info.watch_dog) {
+			rc = crm_timer_init(&hw_mgr->clk_info.watch_dog,
+				CRE_DEVICE_IDLE_TIMEOUT, &hw_mgr->clk_info,
+				&cam_cre_device_timer_cb);
+
+			if (rc)
+				CAM_ERR(CAM_CRE, "Failed to start timer %d", i);
+
+			hw_mgr->clk_info.watch_dog_reset_counter = 0;
+		}
+	}
+
+	return rc;
+}
+
+static void cam_cre_device_timer_stop(struct cam_cre_hw_mgr *hw_mgr)
+{
+	if (hw_mgr->clk_info.watch_dog) {
+		hw_mgr->clk_info.watch_dog_reset_counter = 0;
+		crm_timer_exit(&hw_mgr->clk_info.watch_dog);
+		hw_mgr->clk_info.watch_dog = NULL;
+	}
+}
+
+static int cam_cre_mgr_process_cmd(void *priv, void *data)
+{
+	int rc = 0;
+	struct cre_cmd_work_data *task_data = NULL;
+	struct cam_cre_ctx *ctx_data;
+	struct cam_cre_request *cre_req;
+	struct cam_cre_hw_mgr *hw_mgr = cre_hw_mgr;
+	uint32_t active_req_idx;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_CRE, "Invalid params%pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	ctx_data = priv;
+	task_data = (struct cre_cmd_work_data *)data;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	if (ctx_data->ctx_state != CRE_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_CRE, "ctx id :%u is not in use",
+			ctx_data->ctx_id);
+		return -EINVAL;
+	}
+
+	active_req_idx = task_data->req_idx;
+
+	if (active_req_idx >= CAM_CTX_REQ_MAX) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_CRE, "Invalid reqIdx = %llu",
+				active_req_idx);
+		return -EINVAL;
+	}
+
+	cre_req = ctx_data->req_list[task_data->req_idx];
+	if (cre_req->request_id > ctx_data->last_flush_req)
+		ctx_data->last_flush_req = 0;
+
+	if (cre_req->request_id <= ctx_data->last_flush_req) {
+		CAM_WARN(CAM_CRE,
+			"request %lld has been flushed, reject packet",
+			cre_req->request_id, ctx_data->last_flush_req);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	if (!cam_cre_is_pending_request(ctx_data)) {
+		CAM_WARN(CAM_CRE, "no pending req, req %lld last flush %lld",
+			cre_req->request_id, ctx_data->last_flush_req);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	hw_mgr = task_data->data;
+	ctx_data->active_req = cre_req;
+	cam_cre_mgr_update_reg_set(hw_mgr, cre_req);
+	cam_cre_ctx_wait_for_idle_irq(ctx_data, cre_req,
+			active_req_idx);
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_get_valid_ctx_id(void)
+{
+	struct cam_cre_hw_mgr *hw_mgr = cre_hw_mgr;
+	int i;
+
+	for (i = 0; i < CRE_CTX_MAX; i++) {
+		if (hw_mgr->ctx[i].ctx_state == CRE_CTX_STATE_ACQUIRED)
+			break;
+	}
+
+	if (i == CRE_CTX_MAX)
+		return -EINVAL;
+
+	return i;
+}
+
+static int32_t cam_cre_mgr_process_msg(void *priv, void *data)
+{
+	struct cre_msg_work_data *task_data;
+	struct cam_hw_done_event_data buf_data;
+	struct cam_cre_hw_mgr *hw_mgr;
+	struct cam_cre_ctx *ctx;
+	struct cam_cre_irq_data irq_data;
+	int32_t ctx_id;
+	uint32_t evt_id;
+	int rc = 0;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_CRE, "Invalid data");
+		return -EINVAL;
+	}
+
+	task_data = data;
+	hw_mgr = priv;
+	ctx_id = cam_get_valid_ctx_id();
+	if (ctx_id < 0) {
+		CAM_ERR(CAM_CRE, "No valid context to handle error");
+		return ctx_id;
+	}
+
+	ctx = &hw_mgr->ctx[ctx_id];
+
+	mutex_lock(&ctx->ctx_mutex);
+	irq_data = task_data->irq_data;
+	if (ctx->ctx_state != CRE_CTX_STATE_ACQUIRED) {
+		CAM_DBG(CAM_CRE, "ctx id: %d not in right state: %d",
+			ctx_id, ctx->ctx_state);
+		mutex_unlock(&ctx->ctx_mutex);
+		return -EINVAL;
+	}
+
+	if (irq_data.error) {
+		evt_id = CAM_CTX_EVT_ID_ERROR;
+		buf_data.evt_param = CAM_SYNC_CRE_EVENT_HW_ERR;
+		buf_data.request_id = ctx->active_req->request_id;
+		ctx->ctxt_event_cb(ctx->context_priv, evt_id, &buf_data);
+		rc = cam_cre_mgr_reset_hw();
+	} else if ((irq_data.top_irq_status & CAM_CRE_WE_IRQ)
+		&& (irq_data.wr_buf_done)) {
+		/* Signal Buf done */
+		evt_id = CAM_CTX_EVT_ID_SUCCESS;
+		buf_data.evt_param = CAM_SYNC_COMMON_EVENT_SUCCESS;
+		buf_data.request_id = ctx->active_req->request_id;
+		ctx->ctxt_event_cb(ctx->context_priv, evt_id, &buf_data);
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+	return rc;
+}
+
+static bool cam_cre_check_clk_update(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_cre_ctx *ctx_data, int idx)
+{
+	bool rc = false;
+	struct cam_cre_clk_bw_request *clk_info;
+	uint64_t req_id;
+	struct cam_cre_clk_info *hw_mgr_clk_info;
+
+	cam_cre_device_timer_reset(hw_mgr);
+	hw_mgr_clk_info = &hw_mgr->clk_info;
+	req_id = ctx_data->req_list[idx]->request_id;
+
+	CAM_DBG(CAM_CRE, "req_id = %lld", req_id);
+
+	clk_info = &ctx_data->req_list[idx]->clk_info;
+
+	/* Calculate base clk rate */
+	ctx_data->clk_info.rt_flag = clk_info->rt_flag;
+
+	return rc;
+}
+
+static int cam_cre_mgr_update_clk_rate(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_cre_ctx *ctx_data)
+{
+	struct cam_cre_dev_clk_update clk_upd_cmd;
+	int i;
+
+	clk_upd_cmd.clk_rate = hw_mgr->clk_info.curr_clk;
+
+	CAM_DBG(CAM_PERF, "clk_rate %u for dev_type %d", clk_upd_cmd.clk_rate,
+		ctx_data->cre_acquire.dev_type);
+
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv,
+			CRE_HW_CLK_UPDATE,
+			&clk_upd_cmd, sizeof(clk_upd_cmd));
+	}
+
+	return 0;
+}
+
+static int cam_cre_mgr_cre_clk_remove(struct cam_cre_hw_mgr *hw_mgr, int ctx_id)
+{
+	struct cam_cre_ctx *ctx_data = NULL;
+	struct cam_cre_clk_info *hw_mgr_clk_info;
+
+	ctx_data = &hw_mgr->ctx[ctx_id];
+	hw_mgr_clk_info = &hw_mgr->clk_info;
+
+	if (hw_mgr_clk_info->base_clk >= ctx_data->clk_info.base_clk)
+		hw_mgr_clk_info->base_clk -= ctx_data->clk_info.base_clk;
+
+	/* reset clock info */
+	ctx_data->clk_info.curr_fc = 0;
+	ctx_data->clk_info.base_clk = 0;
+	return 0;
+}
+
+
+static int cam_cre_mgr_cre_clk_update(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_cre_ctx *ctx_data, int idx)
+{
+	int rc = 0;
+
+	if (cam_cre_check_clk_update(hw_mgr, ctx_data, idx))
+		rc = cam_cre_mgr_update_clk_rate(hw_mgr, ctx_data);
+
+	if (cam_cre_check_bw_update(hw_mgr, ctx_data, idx))
+		rc |= cam_cre_update_cpas_vote(hw_mgr, ctx_data);
+
+	return rc;
+}
+
+int32_t cam_cre_hw_mgr_cb(void *irq_data, int32_t result_size, void *data)
+{
+	int32_t rc = 0;
+	unsigned long flags;
+	struct cam_cre_hw_mgr *hw_mgr = data;
+	struct crm_workq_task *task;
+	struct cre_msg_work_data *task_data;
+	struct cam_cre_irq_data *local_irq_data = irq_data;
+
+	if (!data) {
+		CAM_ERR(CAM_CRE, "irq cb data is NULL");
+		return rc;
+	}
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(cre_hw_mgr->msg_work);
+	if (!task) {
+		CAM_ERR(CAM_CRE, "no empty task");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+		return -ENOMEM;
+	}
+
+	task_data = (struct cre_msg_work_data *)task->payload;
+	task_data->data = hw_mgr;
+	task_data->irq_data = *local_irq_data;
+	task_data->type = CRE_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_cre_mgr_process_msg;
+	rc = cam_req_mgr_workq_enqueue_task(task, cre_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+	return rc;
+}
+
+static int cam_cre_mgr_process_io_cfg(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_packet *packet,
+	struct cam_cre_ctx *ctx_data,
+	uint32_t req_idx,
+	struct cam_hw_prepare_update_args *prep_arg)
+{
+	int i, j = 0, k = 0, l, rc = 0;
+	struct cre_io_buf *io_buf;
+	int32_t sync_in_obj[CRE_MAX_IN_RES];
+	int32_t merged_sync_in_obj;
+	struct cam_cre_request *cre_request;
+
+	prep_arg->pf_data->packet = packet;
+
+	rc = cam_cre_mgr_process_cmd_io_buf_req(hw_mgr, packet, ctx_data,
+		req_idx);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "Process CRE cmd io request is failed: %d",
+			rc);
+		goto end;
+	}
+
+	cre_request = ctx_data->req_list[req_idx];
+	prep_arg->num_out_map_entries = 0;
+	prep_arg->num_in_map_entries = 0;
+
+	CAM_DBG(CAM_CRE, "E: req_idx = %u %x", req_idx, packet);
+
+	for (i = 0; i < cre_request->num_batch; i++) {
+		for (l = 0; l < cre_request->num_io_bufs[i]; l++) {
+			io_buf = cre_request->io_buf[i][l];
+			if (io_buf->direction == CAM_BUF_INPUT) {
+				if (io_buf->fence != -1) {
+					if (j < CRE_MAX_IN_RES) {
+						sync_in_obj[j++] =
+							io_buf->fence;
+						prep_arg->num_in_map_entries++;
+					} else {
+						CAM_ERR(CAM_CRE,
+						"reached max in_res %d %d",
+						io_buf->resource_type,
+						cre_request->request_id);
+					}
+				} else {
+					CAM_ERR(CAM_CRE, "Invalid fence %d %d",
+						io_buf->resource_type,
+						cre_request->request_id);
+				}
+			} else {
+				if (io_buf->fence != -1) {
+					prep_arg->out_map_entries[k].sync_id =
+						io_buf->fence;
+					k++;
+					prep_arg->num_out_map_entries++;
+				}
+			}
+			CAM_DBG(CAM_REQ,
+				"ctx_id: %u req_id: %llu dir[%d] %u, fence: %d",
+				ctx_data->ctx_id, packet->header.request_id, i,
+				io_buf->direction, io_buf->fence);
+			CAM_DBG(CAM_REQ, "rsc_type = %u fmt = %d",
+				io_buf->resource_type,
+				io_buf->format);
+		}
+	}
+
+	if (prep_arg->num_in_map_entries > 1 &&
+		prep_arg->num_in_map_entries <= CRE_MAX_IN_RES)
+		prep_arg->num_in_map_entries =
+			cam_common_util_remove_duplicate_arr(
+			sync_in_obj, prep_arg->num_in_map_entries);
+
+	if (prep_arg->num_in_map_entries > 1 &&
+		prep_arg->num_in_map_entries <= CRE_MAX_IN_RES) {
+		rc = cam_sync_merge(&sync_in_obj[0],
+			prep_arg->num_in_map_entries, &merged_sync_in_obj);
+		if (rc) {
+			prep_arg->num_out_map_entries = 0;
+			prep_arg->num_in_map_entries = 0;
+			return rc;
+		}
+
+		cre_request->in_resource = merged_sync_in_obj;
+
+		prep_arg->in_map_entries[0].sync_id = merged_sync_in_obj;
+		prep_arg->num_in_map_entries = 1;
+		CAM_DBG(CAM_REQ, "ctx_id: %u req_id: %llu Merged Sync obj: %d",
+			ctx_data->ctx_id, packet->header.request_id,
+			merged_sync_in_obj);
+	} else if (prep_arg->num_in_map_entries == 1) {
+		prep_arg->in_map_entries[0].sync_id = sync_in_obj[0];
+		prep_arg->num_in_map_entries = 1;
+		cre_request->in_resource = 0;
+		CAM_DBG(CAM_CRE, "fence = %d", sync_in_obj[0]);
+	} else {
+		CAM_DBG(CAM_CRE, "Invalid count of input fences, count: %d",
+			prep_arg->num_in_map_entries);
+		prep_arg->num_in_map_entries = 0;
+		cre_request->in_resource = 0;
+		rc = -EINVAL;
+	}
+end:
+	return rc;
+}
+
+static bool cam_cre_mgr_is_valid_inconfig(struct cam_packet *packet)
+{
+	int i, num_in_map_entries = 0;
+	bool in_config_valid = false;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+					packet->io_configs_offset/4);
+
+	for (i = 0 ; i < packet->num_io_configs; i++)
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT)
+			num_in_map_entries++;
+
+	if (num_in_map_entries <= CRE_MAX_IN_RES) {
+		in_config_valid = true;
+	} else {
+		CAM_ERR(CAM_CRE, "In config entries(%u) more than allowed(%u)",
+				num_in_map_entries, CRE_MAX_IN_RES);
+	}
+
+	CAM_DBG(CAM_CRE, "number of in_config info: %u %u %u %u",
+			packet->num_io_configs, CRE_MAX_IO_BUFS,
+			num_in_map_entries, CRE_MAX_IN_RES);
+
+	return in_config_valid;
+}
+
+static bool cam_cre_mgr_is_valid_outconfig(struct cam_packet *packet)
+{
+	int i, num_out_map_entries = 0;
+	bool out_config_valid = false;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+					packet->io_configs_offset/4);
+
+	for (i = 0 ; i < packet->num_io_configs; i++)
+		if (io_cfg_ptr[i].direction == CAM_BUF_OUTPUT)
+			num_out_map_entries++;
+
+	if (num_out_map_entries <= CRE_MAX_OUT_RES) {
+		out_config_valid = true;
+	} else {
+		CAM_ERR(CAM_CRE, "Out config entries(%u) more than allowed(%u)",
+				num_out_map_entries, CRE_MAX_OUT_RES);
+	}
+
+	CAM_DBG(CAM_CRE, "number of out_config info: %u %u %u %u",
+			packet->num_io_configs, CRE_MAX_IO_BUFS,
+			num_out_map_entries, CRE_MAX_OUT_RES);
+
+	return out_config_valid;
+}
+
+static int cam_cre_mgr_pkt_validation(struct cam_packet *packet)
+{
+	if ((packet->header.op_code & 0xff) !=
+		CAM_CRE_OPCODE_CONFIG) {
+		CAM_ERR(CAM_CRE, "Invalid Opcode in pkt: %d",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+
+	if (packet->num_io_configs > CRE_MAX_IO_BUFS) {
+		CAM_ERR(CAM_CRE, "Invalid number of io configs: %d %d",
+			CRE_MAX_IO_BUFS, packet->num_io_configs);
+		return -EINVAL;
+	}
+
+	if (packet->num_cmd_buf > CRE_PACKET_MAX_CMD_BUFS) {
+		CAM_ERR(CAM_CRE, "Invalid number of cmd buffers: %d %d",
+			CRE_PACKET_MAX_CMD_BUFS, packet->num_cmd_buf);
+		return -EINVAL;
+	}
+
+	if (!cam_cre_mgr_is_valid_inconfig(packet) ||
+		!cam_cre_mgr_is_valid_outconfig(packet)) {
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_CRE, "number of cmd/patch info: %u %u %u %u",
+			packet->num_cmd_buf,
+			packet->num_io_configs, CRE_MAX_IO_BUFS,
+			packet->num_patches);
+	return 0;
+}
+
+static int cam_cre_validate_acquire_res_info(
+	struct cam_cre_acquire_dev_info *cre_acquire)
+{
+	int i;
+
+	if (cre_acquire->num_out_res > CRE_MAX_OUT_RES) {
+		CAM_ERR(CAM_CRE, "num of out resources exceeding : %u",
+			cre_acquire->num_out_res);
+		return -EINVAL;
+	}
+
+	if (cre_acquire->num_in_res > CRE_MAX_IN_RES) {
+		CAM_ERR(CAM_CRE, "num of in resources exceeding : %u",
+			cre_acquire->num_in_res);
+		return -EINVAL;
+	}
+
+	if (cre_acquire->dev_type >= CRE_DEV_MAX) {
+		CAM_ERR(CAM_CRE, "Invalid device type: %d",
+			cre_acquire->dev_type);
+		return -EFAULT;
+	}
+
+	/*
+	 * TODO: Confirm this with CRE HW folks
+	 * Reffering CRE HPG supported input formats are
+	 * CAM_FORMAT_MIPI_RAW_10
+	 * CAM_FORMAT_MIPI_RAW_12
+	 * CAM_FORMAT_MIPI_RAW_14
+	 * CAM_FORMAT_MIPI_RAW_16
+	 * CAM_FORMAT_MIPI_RAW_20
+	 */
+	for (i = 0; i < cre_acquire->num_in_res; i++) {
+		if ((cre_acquire->in_res[i].format <
+			CAM_FORMAT_MIPI_RAW_10) ||
+			(cre_acquire->in_res[i].format >
+			 CAM_FORMAT_MIPI_RAW_20)) {
+			CAM_ERR(CAM_CRE, "Invalid Input format");
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * TODO: Confirm this with CRE HW folks
+	 * Reffering CRE HPG supported output formats are
+	 * CAM_FORMAT_PLAIN16_8
+	 * CAM_FORMAT_PLAIN16_10
+	 * CAM_FORMAT_PLAIN16_12
+	 * CAM_FORMAT_PLAIN16_14
+	 * CAM_FORMAT_PLAIN16_16
+	 * CAM_FORMAT_PLAIN32_20
+	 */
+	for (i = 0; i < cre_acquire->num_out_res; i++) {
+		if ((cre_acquire->out_res[i].format <
+			CAM_FORMAT_PLAIN16_8) ||
+			(cre_acquire->out_res[i].format >
+			 CAM_FORMAT_PLAIN32_20)) {
+			CAM_ERR(CAM_CRE, "Invalid output format");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int cam_cre_get_acquire_info(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_hw_acquire_args *args,
+	struct cam_cre_ctx *ctx)
+{
+	int i = 0;
+
+	if (args->num_acq > CRE_DEV_MAX) {
+		CAM_ERR(CAM_CRE, "Invalid number of resources: %d",
+			args->num_acq);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&ctx->cre_acquire,
+		(void __user *)args->acquire_info,
+		sizeof(struct cam_cre_acquire_dev_info))) {
+		CAM_ERR(CAM_CRE, "Failed in acquire");
+		return -EFAULT;
+	}
+
+	if (cam_cre_validate_acquire_res_info(&ctx->cre_acquire))
+		return -EINVAL;
+
+	CAM_DBG(CAM_CRE, "top: %u %s %u %u %u",
+		ctx->cre_acquire.dev_type,
+		ctx->cre_acquire.dev_name,
+		ctx->cre_acquire.secure_mode,
+		ctx->cre_acquire.num_in_res, ctx->cre_acquire.num_out_res);
+
+	for (i = 0; i < ctx->cre_acquire.num_in_res; i++) {
+		CAM_DBG(CAM_CRE, "IN: %u %u %u %u",
+		ctx->cre_acquire.in_res[i].res_id,
+		ctx->cre_acquire.in_res[i].width,
+		ctx->cre_acquire.in_res[i].height,
+		ctx->cre_acquire.in_res[i].format);
+	}
+
+	for (i = 0; i < ctx->cre_acquire.num_out_res; i++) {
+		CAM_DBG(CAM_CRE, "OUT: %u %u %u %u",
+		ctx->cre_acquire.out_res[i].res_id,
+		ctx->cre_acquire.out_res[i].width,
+		ctx->cre_acquire.out_res[i].height,
+		ctx->cre_acquire.out_res[i].format);
+	}
+
+	return 0;
+}
+
+static int cam_cre_get_free_ctx(struct cam_cre_hw_mgr *hw_mgr)
+{
+	int i;
+
+	i = find_first_zero_bit(hw_mgr->ctx_bitmap, hw_mgr->ctx_bits);
+	if (i >= CRE_CTX_MAX || i < 0) {
+		CAM_ERR(CAM_CRE, "Invalid ctx id = %d", i);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx[i].ctx_mutex);
+	if (hw_mgr->ctx[i].ctx_state != CRE_CTX_STATE_FREE) {
+		CAM_ERR(CAM_CRE, "Invalid ctx %d state %d",
+			i, hw_mgr->ctx[i].ctx_state);
+		mutex_unlock(&hw_mgr->ctx[i].ctx_mutex);
+		return -EINVAL;
+	}
+	set_bit(i, hw_mgr->ctx_bitmap);
+	mutex_unlock(&hw_mgr->ctx[i].ctx_mutex);
+
+	return i;
+}
+
+
+static int cam_cre_put_free_ctx(struct cam_cre_hw_mgr *hw_mgr, uint32_t ctx_id)
+{
+	if (ctx_id >= CRE_CTX_MAX) {
+		CAM_ERR(CAM_CRE, "Invalid ctx_id: %d", ctx_id);
+		return 0;
+	}
+
+	hw_mgr->ctx[ctx_id].ctx_state = CRE_CTX_STATE_FREE;
+	clear_bit(ctx_id, hw_mgr->ctx_bitmap);
+
+	return 0;
+}
+
+static int cam_cre_mgr_get_hw_caps(void *hw_priv, void *hw_caps_args)
+{
+	struct cam_cre_hw_mgr *hw_mgr;
+	struct cam_query_cap_cmd *query_cap = hw_caps_args;
+	struct cam_cre_hw_ver hw_ver;
+	int rc = 0, i;
+
+	if (!hw_priv || !hw_caps_args) {
+		CAM_ERR(CAM_CRE, "Invalid args: %x %x", hw_priv, hw_caps_args);
+		return -EINVAL;
+	}
+
+	hw_mgr = hw_priv;
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (copy_from_user(&hw_mgr->cre_caps,
+		u64_to_user_ptr(query_cap->caps_handle),
+		sizeof(struct cam_cre_query_cap_cmd))) {
+		CAM_ERR(CAM_CRE, "copy_from_user failed: size = %d",
+			sizeof(struct cam_cre_query_cap_cmd));
+		rc = -EFAULT;
+		goto end;
+	}
+
+	for (i = 0; i < hw_mgr->num_cre; i++) {
+		rc = hw_mgr->cre_dev_intf[i]->hw_ops.get_hw_caps(
+			hw_mgr->cre_dev_intf[i]->hw_priv,
+			&hw_ver, sizeof(hw_ver));
+		if (rc)
+			goto end;
+
+		hw_mgr->cre_caps.dev_ver[i] = hw_ver;
+	}
+
+	hw_mgr->cre_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
+	hw_mgr->cre_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
+
+	CAM_DBG(CAM_CRE, "iommu sec %d iommu ns %d",
+		hw_mgr->cre_caps.dev_iommu_handle.secure,
+		hw_mgr->cre_caps.dev_iommu_handle.non_secure);
+
+	if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
+		&hw_mgr->cre_caps, sizeof(struct cam_cre_query_cap_cmd))) {
+		CAM_ERR(CAM_CRE, "copy_to_user failed: size = %d",
+			sizeof(struct cam_cre_query_cap_cmd));
+		rc = -EFAULT;
+	}
+
+end:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_cre_mgr_acquire_hw(void *hw_priv, void *hw_acquire_args)
+{
+	int rc = 0, i;
+	int ctx_id;
+	struct cam_cre_hw_mgr *hw_mgr = hw_priv;
+	struct cam_cre_ctx *ctx;
+	struct cam_hw_acquire_args *args = hw_acquire_args;
+	struct cam_cre_dev_acquire cre_dev_acquire;
+	struct cam_cre_dev_release cre_dev_release;
+	struct cam_cre_dev_init init;
+	struct cam_cre_dev_clk_update clk_update;
+	struct cam_cre_dev_bw_update *bw_update;
+	struct cam_cre_set_irq_cb irq_cb;
+	struct cam_hw_info *dev = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	int32_t idx;
+
+	if ((!hw_priv) || (!hw_acquire_args)) {
+		CAM_ERR(CAM_CRE, "Invalid args: %x %x",
+			hw_priv, hw_acquire_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_id = cam_cre_get_free_ctx(hw_mgr);
+	if (ctx_id < 0) {
+		CAM_ERR(CAM_CRE, "No free ctx");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return ctx_id;
+	}
+
+	ctx = &hw_mgr->ctx[ctx_id];
+	ctx->ctx_id = ctx_id;
+	mutex_lock(&ctx->ctx_mutex);
+	rc = cam_cre_get_acquire_info(hw_mgr, args, ctx);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRE, "get_acquire info failed: %d", rc);
+		goto end;
+	}
+
+	if (!hw_mgr->cre_ctx_cnt) {
+		for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+			rc = hw_mgr->cre_dev_intf[i]->hw_ops.init(
+				hw_mgr->cre_dev_intf[i]->hw_priv, &init,
+				sizeof(init));
+			if (rc) {
+				CAM_ERR(CAM_CRE, "CRE Dev init failed: %d", rc);
+				goto end;
+			}
+		}
+
+		/* Install IRQ CB */
+		irq_cb.cre_hw_mgr_cb = cam_cre_hw_mgr_cb;
+		irq_cb.data = hw_mgr;
+		for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+			rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+				hw_mgr->cre_dev_intf[i]->hw_priv,
+				CRE_HW_SET_IRQ_CB,
+				&irq_cb, sizeof(irq_cb));
+			if (rc) {
+				CAM_ERR(CAM_CRE, "CRE Dev init failed: %d", rc);
+				goto cre_irq_set_failed;
+			}
+		}
+
+		dev = (struct cam_hw_info *)hw_mgr->cre_dev_intf[0]->hw_priv;
+		soc_info = &dev->soc_info;
+		idx = soc_info->src_clk_idx;
+
+		hw_mgr->clk_info.base_clk =
+			soc_info->clk_rate[CAM_TURBO_VOTE][idx];
+		hw_mgr->clk_info.threshold = 5;
+		hw_mgr->clk_info.over_clked = 0;
+
+		for (i = 0; i < CAM_CRE_MAX_PER_PATH_VOTES; i++) {
+			hw_mgr->clk_info.axi_path[i].camnoc_bw = 0;
+			hw_mgr->clk_info.axi_path[i].mnoc_ab_bw = 0;
+			hw_mgr->clk_info.axi_path[i].mnoc_ib_bw = 0;
+			hw_mgr->clk_info.axi_path[i].ddr_ab_bw = 0;
+			hw_mgr->clk_info.axi_path[i].ddr_ib_bw = 0;
+		}
+	}
+
+	cre_dev_acquire.ctx_id = ctx_id;
+	cre_dev_acquire.cre_acquire = &ctx->cre_acquire;
+
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_ACQUIRE,
+			&cre_dev_acquire, sizeof(cre_dev_acquire));
+		if (rc) {
+			CAM_ERR(CAM_CRE, "CRE Dev acquire failed: %d", rc);
+			goto cre_dev_acquire_failed;
+		}
+	}
+
+	ctx->cre_top = cre_dev_acquire.cre_top;
+
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		dev = (struct cam_hw_info *)hw_mgr->cre_dev_intf[i]->hw_priv;
+		soc_info = &dev->soc_info;
+		idx = soc_info->src_clk_idx;
+		clk_update.clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][idx];
+		hw_mgr->clk_info.curr_clk =
+			soc_info->clk_rate[CAM_TURBO_VOTE][idx];
+
+		rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_CLK_UPDATE,
+			&clk_update, sizeof(clk_update));
+		if (rc) {
+			CAM_ERR(CAM_CRE, "CRE Dev clk update failed: %d", rc);
+			goto cre_clk_update_failed;
+		}
+	}
+
+	bw_update = kzalloc(sizeof(struct cam_cre_dev_bw_update), GFP_KERNEL);
+	if (!bw_update) {
+		CAM_ERR(CAM_ISP, "Out of memory");
+		goto cre_clk_update_failed;
+	}
+	bw_update->ahb_vote_valid = false;
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		bw_update->axi_vote.num_paths = 1;
+		bw_update->axi_vote_valid = true;
+		bw_update->axi_vote.axi_path[0].camnoc_bw = 600000000;
+		bw_update->axi_vote.axi_path[0].mnoc_ab_bw = 600000000;
+		bw_update->axi_vote.axi_path[0].mnoc_ib_bw = 600000000;
+		bw_update->axi_vote.axi_path[0].ddr_ab_bw = 600000000;
+		bw_update->axi_vote.axi_path[0].ddr_ib_bw = 600000000;
+		bw_update->axi_vote.axi_path[0].transac_type =
+			CAM_AXI_TRANSACTION_WRITE;
+		bw_update->axi_vote.axi_path[0].path_data_type =
+			CAM_AXI_PATH_DATA_ALL;
+		rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_BW_UPDATE,
+			bw_update, sizeof(*bw_update));
+		if (rc) {
+			CAM_ERR(CAM_CRE, "CRE Dev clk update failed: %d", rc);
+			goto free_bw_update;
+		}
+	}
+
+	cam_cre_device_timer_start(hw_mgr);
+	hw_mgr->cre_ctx_cnt++;
+	ctx->context_priv = args->context_data;
+	args->ctxt_to_hw_map = ctx;
+	ctx->ctxt_event_cb = args->event_cb;
+	cam_cre_ctx_clk_info_init(ctx);
+	ctx->ctx_state = CRE_CTX_STATE_ACQUIRED;
+	cam_free_clear(bw_update);
+	bw_update = NULL;
+
+	mutex_unlock(&ctx->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	CAM_INFO(CAM_CRE, "CRE: %d acquire succesfull rc %d", ctx_id, rc);
+	return rc;
+
+free_bw_update:
+	cam_free_clear(bw_update);
+	bw_update = NULL;
+cre_clk_update_failed:
+	cre_dev_release.ctx_id = ctx_id;
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		if (hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_RELEASE,
+			&cre_dev_release, sizeof(cre_dev_release)))
+			CAM_ERR(CAM_CRE, "CRE Dev release failed");
+	}
+cre_dev_acquire_failed:
+	if (!hw_mgr->cre_ctx_cnt) {
+		irq_cb.cre_hw_mgr_cb = NULL;
+		irq_cb.data = hw_mgr;
+		for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+			if (hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+				hw_mgr->cre_dev_intf[i]->hw_priv,
+				CRE_HW_SET_IRQ_CB,
+				&irq_cb, sizeof(irq_cb)))
+				CAM_ERR(CAM_CRE,
+					"CRE IRQ de register failed");
+		}
+	}
+cre_irq_set_failed:
+	if (!hw_mgr->cre_ctx_cnt) {
+		for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+			if (hw_mgr->cre_dev_intf[i]->hw_ops.deinit(
+				hw_mgr->cre_dev_intf[i]->hw_priv, NULL, 0))
+				CAM_ERR(CAM_CRE, "CRE deinit fail");
+			if (hw_mgr->cre_dev_intf[i]->hw_ops.stop(
+				hw_mgr->cre_dev_intf[i]->hw_priv,
+				NULL, 0))
+				CAM_ERR(CAM_CRE, "CRE stop fail");
+		}
+	}
+end:
+	args->ctxt_to_hw_map = NULL;
+	cam_cre_put_free_ctx(hw_mgr, ctx_id);
+	mutex_unlock(&ctx->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_cre_mgr_release_ctx(struct cam_cre_hw_mgr *hw_mgr, int ctx_id)
+{
+	int i = 0, rc = 0;
+	struct cam_cre_dev_release cre_dev_release;
+
+	if (ctx_id >= CRE_CTX_MAX) {
+		CAM_ERR(CAM_CRE, "ctx_id is wrong: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+	if (hw_mgr->ctx[ctx_id].ctx_state !=
+		CRE_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+		CAM_DBG(CAM_CRE, "ctx id: %d not in right state: %d",
+			ctx_id, hw_mgr->ctx[ctx_id].ctx_state);
+		return 0;
+	}
+
+	hw_mgr->ctx[ctx_id].ctx_state = CRE_CTX_STATE_RELEASE;
+
+	for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+		cre_dev_release.ctx_id = ctx_id;
+		rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_RELEASE,
+			&cre_dev_release, sizeof(cre_dev_release));
+		if (rc)
+			CAM_ERR(CAM_CRE, "CRE Dev release failed: %d", rc);
+	}
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		if (!hw_mgr->ctx[ctx_id].req_list[i])
+			continue;
+		cam_cre_free_io_config(hw_mgr->ctx[ctx_id].req_list[i]);
+		cam_free_clear(hw_mgr->ctx[ctx_id].req_list[i]);
+		hw_mgr->ctx[ctx_id].req_list[i] = NULL;
+		clear_bit(i, hw_mgr->ctx[ctx_id].bitmap);
+	}
+
+	hw_mgr->ctx[ctx_id].req_cnt = 0;
+	hw_mgr->ctx[ctx_id].last_flush_req = 0;
+	cam_cre_put_free_ctx(hw_mgr, ctx_id);
+
+	rc = cam_cre_mgr_cre_clk_remove(hw_mgr, ctx_id);
+	if (rc)
+		CAM_ERR(CAM_CRE, "CRE clk update failed: %d", rc);
+
+	hw_mgr->cre_ctx_cnt--;
+	mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+	CAM_DBG(CAM_CRE, "X: ctx_id = %d", ctx_id);
+
+	return 0;
+}
+
+static int cam_cre_mgr_release_hw(void *hw_priv, void *hw_release_args)
+{
+	int i, rc = 0;
+	int ctx_id = 0;
+	struct cam_hw_release_args *release_hw = hw_release_args;
+	struct cam_cre_hw_mgr *hw_mgr = hw_priv;
+	struct cam_cre_ctx *ctx_data = NULL;
+	struct cam_cre_set_irq_cb irq_cb;
+	struct cam_hw_intf *dev_intf;
+
+	if (!release_hw || !hw_mgr) {
+		CAM_ERR(CAM_CRE, "Invalid args: %pK %pK", release_hw, hw_mgr);
+		return -EINVAL;
+	}
+
+	ctx_data = release_hw->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_CRE, "NULL ctx data");
+		return -EINVAL;
+	}
+
+	ctx_id = ctx_data->ctx_id;
+	if (ctx_id < 0 || ctx_id >= CRE_CTX_MAX) {
+		CAM_ERR(CAM_CRE, "Invalid ctx id: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+	if (hw_mgr->ctx[ctx_id].ctx_state != CRE_CTX_STATE_ACQUIRED) {
+		CAM_DBG(CAM_CRE, "ctx is not in use: %d", ctx_id);
+		mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = cam_cre_mgr_release_ctx(hw_mgr, ctx_id);
+	if (!hw_mgr->cre_ctx_cnt) {
+		CAM_DBG(CAM_CRE, "Last Release");
+		for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+			dev_intf = hw_mgr->cre_dev_intf[i];
+			irq_cb.cre_hw_mgr_cb = NULL;
+			irq_cb.data = NULL;
+			rc = dev_intf->hw_ops.process_cmd(
+				hw_mgr->cre_dev_intf[i]->hw_priv,
+				CRE_HW_SET_IRQ_CB,
+				&irq_cb, sizeof(irq_cb));
+			if (rc)
+				CAM_ERR(CAM_CRE, "IRQ dereg failed: %d", rc);
+		}
+		for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+			dev_intf = hw_mgr->cre_dev_intf[i];
+			rc = dev_intf->hw_ops.deinit(
+				hw_mgr->cre_dev_intf[i]->hw_priv,
+				NULL, 0);
+			if (rc)
+				CAM_ERR(CAM_CRE, "deinit failed: %d", rc);
+		}
+		cam_cre_device_timer_stop(hw_mgr);
+	}
+
+	rc = cam_cre_mgr_remove_bw(hw_mgr, ctx_id);
+	if (rc)
+		CAM_ERR(CAM_CRE, "CRE remove bw failed: %d", rc);
+
+	if (!hw_mgr->cre_ctx_cnt) {
+		for (i = 0; i < cre_hw_mgr->num_cre; i++) {
+			dev_intf = hw_mgr->cre_dev_intf[i];
+			rc = dev_intf->hw_ops.stop(
+				hw_mgr->cre_dev_intf[i]->hw_priv,
+				NULL, 0);
+			if (rc)
+				CAM_ERR(CAM_CRE, "stop failed: %d", rc);
+		}
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	CAM_DBG(CAM_CRE, "Release done for ctx_id %d", ctx_id);
+	return rc;
+}
+
+static int cam_cre_packet_generic_blob_handler(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+	struct cam_cre_clk_bw_request *clk_info;
+	struct cam_cre_clk_bw_req_internal_v2 *clk_info_v2;
+	struct cre_clk_bw_request_v2 *soc_req;
+
+	struct cre_cmd_generic_blob *blob;
+	struct cam_cre_ctx *ctx_data;
+	uint32_t index;
+	size_t clk_update_size = 0;
+	int rc = 0;
+
+	if (!blob_data || (blob_size == 0)) {
+		CAM_ERR(CAM_CRE, "Invalid blob info %pK %d", blob_data,
+		blob_size);
+		return -EINVAL;
+	}
+
+	blob = (struct cre_cmd_generic_blob *)user_data;
+	ctx_data = blob->ctx;
+	index = blob->req_idx;
+
+	switch (blob_type) {
+	case CAM_CRE_CMD_GENERIC_BLOB_CLK_V2:
+		if (blob_size < sizeof(struct cre_clk_bw_request_v2)) {
+			CAM_ERR(CAM_CRE, "Mismatch blob size %d expected %lu",
+				blob_size,
+				sizeof(struct cre_clk_bw_request_v2));
+			return -EINVAL;
+		}
+
+		soc_req = (struct cre_clk_bw_request_v2 *)blob_data;
+		if (soc_req->num_paths > CAM_CRE_MAX_PER_PATH_VOTES) {
+			CAM_ERR(CAM_CRE, "Invalid num paths: %d",
+				soc_req->num_paths);
+			return -EINVAL;
+		}
+
+		/* Check for integer overflow */
+		if (soc_req->num_paths != 1) {
+			if (sizeof(struct cam_axi_per_path_bw_vote) >
+				((UINT_MAX -
+				sizeof(struct cre_clk_bw_request_v2)) /
+				(soc_req->num_paths - 1))) {
+				CAM_ERR(CAM_CRE,
+					"Size exceeds limit paths:%u size per path:%lu",
+					soc_req->num_paths - 1,
+					sizeof(
+					struct cam_axi_per_path_bw_vote));
+			return -EINVAL;
+			}
+		}
+
+		clk_update_size = sizeof(struct cre_clk_bw_request_v2) +
+			((soc_req->num_paths - 1) *
+			sizeof(struct cam_axi_per_path_bw_vote));
+		if (blob_size < clk_update_size) {
+			CAM_ERR(CAM_CRE, "Invalid blob size: %u",
+				blob_size);
+			return -EINVAL;
+		}
+
+		clk_info = &ctx_data->req_list[index]->clk_info;
+		clk_info_v2 = &ctx_data->req_list[index]->clk_info_v2;
+
+		memcpy(clk_info, soc_req, clk_update_size);
+
+		/* Use v2 structure for clk fields */
+		clk_info->budget_ns = clk_info_v2->budget_ns;
+		clk_info->frame_cycles = clk_info_v2->frame_cycles;
+		clk_info->rt_flag = clk_info_v2->rt_flag;
+
+		CAM_DBG(CAM_CRE, "budget=%llu, frame_cycle=%llu, rt_flag=%d",
+			clk_info_v2->budget_ns, clk_info_v2->frame_cycles,
+			clk_info_v2->rt_flag);
+		break;
+
+	default:
+		CAM_WARN(CAM_CRE, "Invalid blob type %d", blob_type);
+		break;
+	}
+	return rc;
+}
+
+static int cam_cre_process_generic_cmd_buffer(
+	struct cam_packet *packet,
+	struct cam_cre_ctx *ctx_data,
+	int32_t index,
+	uint64_t *io_buf_addr)
+{
+	int i, rc = 0;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct cre_cmd_generic_blob cmd_generic_blob;
+
+	cmd_generic_blob.ctx = ctx_data;
+	cmd_generic_blob.req_idx = index;
+	cmd_generic_blob.io_buf_addr = io_buf_addr;
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+	if (cmd_desc[i].meta_data != CAM_CRE_CMD_META_GENERIC_BLOB)
+		continue;
+
+	rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
+		cam_cre_packet_generic_blob_handler, &cmd_generic_blob);
+	if (rc)
+		CAM_ERR(CAM_CRE, "Failed in processing blobs %d", rc);
+	}
+
+	return rc;
+}
+
+static int cam_cre_mgr_prepare_hw_update(void *hw_priv,
+	void *hw_prepare_update_args)
+{
+	int rc = 0;
+	struct cam_packet *packet = NULL;
+	struct cam_cre_hw_mgr *hw_mgr = hw_priv;
+	struct cam_hw_prepare_update_args *prepare_args =
+		hw_prepare_update_args;
+	struct cam_cre_ctx *ctx_data = NULL;
+	uint32_t request_idx = 0;
+	struct cam_cre_request *cre_req;
+	struct timespec64 ts;
+
+	if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
+		CAM_ERR(CAM_CRE, "Invalid args: %x %x",
+			prepare_args, hw_mgr);
+		return -EINVAL;
+	}
+
+	ctx_data = prepare_args->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_CRE, "Invalid Context");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (ctx_data->ctx_state != CRE_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_CRE, "ctx id %u is not acquired state: %d",
+			ctx_data->ctx_id, ctx_data->ctx_state);
+		return -EINVAL;
+	}
+
+	packet = prepare_args->packet;
+	rc = cam_packet_util_validate_packet(packet, prepare_args->remain_len);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_CRE,
+			"packet validation failed: %d req_id: %d ctx: %d",
+			rc, packet->header.request_id, ctx_data->ctx_id);
+		return rc;
+	}
+
+	rc = cam_cre_mgr_pkt_validation(packet);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_CRE,
+			"cre packet validation failed: %d req_id: %d ctx: %d",
+			rc, packet->header.request_id, ctx_data->ctx_id);
+		return -EINVAL;
+	}
+
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
+			hw_mgr->iommu_sec_hdl);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_CRE, "Patch processing failed %d", rc);
+		return rc;
+	}
+
+	request_idx  = find_first_zero_bit(ctx_data->bitmap, ctx_data->bits);
+	if (request_idx >= CAM_CTX_REQ_MAX || request_idx < 0) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_CRE, "Invalid ctx req slot = %d", request_idx);
+		return -EINVAL;
+	}
+
+	ctx_data->req_list[request_idx] =
+		kzalloc(sizeof(struct cam_cre_request), GFP_KERNEL);
+	if (!ctx_data->req_list[request_idx]) {
+		CAM_ERR(CAM_CRE, "mem allocation failed ctx:%d req_idx:%d",
+			ctx_data->ctx_id, request_idx);
+		rc = -ENOMEM;
+		goto req_mem_alloc_failed;
+	}
+
+	cre_req = ctx_data->req_list[request_idx];
+
+	rc = cam_cre_mgr_process_io_cfg(hw_mgr, packet, ctx_data,
+			request_idx, prepare_args);
+	if (rc) {
+		CAM_ERR(CAM_CRE,
+			"IO cfg processing failed: %d ctx: %d req_id:%d",
+			rc, ctx_data->ctx_id, packet->header.request_id);
+		goto end;
+	}
+
+	rc = cam_cre_mgr_create_cre_reg_buf(hw_mgr, packet, prepare_args,
+		ctx_data, request_idx);
+	if (rc) {
+		CAM_ERR(CAM_CRE,
+			"create kmd buf failed: %d ctx: %d request_id:%d",
+			rc, ctx_data->ctx_id, packet->header.request_id);
+		goto end;
+	}
+
+	rc = cam_cre_process_generic_cmd_buffer(packet, ctx_data,
+		request_idx, NULL);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "Failed: %d ctx: %d req_id: %d req_idx: %d",
+			rc, ctx_data->ctx_id, packet->header.request_id,
+			request_idx);
+		goto end;
+	}
+
+	prepare_args->num_hw_update_entries = 1;
+	prepare_args->priv = ctx_data->req_list[request_idx];
+	prepare_args->pf_data->packet = packet;
+	cre_req->hang_data.packet = packet;
+	ktime_get_boottime_ts64(&ts);
+	ctx_data->last_req_time = (uint64_t)((ts.tv_sec * 1000000000) +
+		ts.tv_nsec);
+	CAM_DBG(CAM_REQ, "req_id= %llu ctx_id= %d lrt=%llu",
+		packet->header.request_id, ctx_data->ctx_id,
+		ctx_data->last_req_time);
+	set_bit(request_idx, ctx_data->bitmap);
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	CAM_DBG(CAM_REQ, "Prepare Hw update Successful request_id: %d  ctx: %d",
+		packet->header.request_id, ctx_data->ctx_id);
+	return rc;
+
+end:
+	cam_free_clear((void *)ctx_data->req_list[request_idx]);
+	ctx_data->req_list[request_idx] = NULL;
+req_mem_alloc_failed:
+	clear_bit(request_idx, ctx_data->bitmap);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	return rc;
+}
+
+static int cam_cre_mgr_enqueue_config(struct cam_cre_hw_mgr *hw_mgr,
+	struct cam_cre_ctx *ctx_data,
+	struct cam_hw_config_args *config_args)
+{
+	int rc = 0;
+	uint64_t request_id = 0;
+	struct crm_workq_task *task;
+	struct cre_cmd_work_data *task_data;
+	struct cam_hw_update_entry *hw_update_entries;
+	struct cam_cre_request *cre_req = NULL;
+
+	cre_req = config_args->priv;
+	request_id = config_args->request_id;
+	hw_update_entries = config_args->hw_update_entries;
+
+	CAM_DBG(CAM_CRE, "req_id = %lld %pK", request_id, config_args->priv);
+
+	task = cam_req_mgr_workq_get_task(cre_hw_mgr->cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_CRE, "no empty task");
+		return -ENOMEM;
+	}
+
+	task_data = (struct cre_cmd_work_data *)task->payload;
+	task_data->data = (void *)hw_mgr;
+	task_data->req_idx = cre_req->req_idx;
+	task_data->type = CRE_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_cre_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, ctx_data,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int cam_cre_mgr_config_hw(void *hw_priv, void *hw_config_args)
+{
+	int rc = 0;
+	struct cam_cre_hw_mgr *hw_mgr = hw_priv;
+	struct cam_hw_config_args *config_args = hw_config_args;
+	struct cam_cre_ctx *ctx_data = NULL;
+	struct cam_cre_request *cre_req = NULL;
+
+	CAM_DBG(CAM_CRE, "E");
+	if (!hw_mgr || !config_args) {
+		CAM_ERR(CAM_CRE, "Invalid arguments %pK %pK",
+			hw_mgr, config_args);
+		return -EINVAL;
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_CRE, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	ctx_data = config_args->ctxt_to_hw_map;
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (ctx_data->ctx_state != CRE_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_CRE, "ctx id :%u is not in use",
+			ctx_data->ctx_id);
+		return -EINVAL;
+	}
+
+	cre_req = config_args->priv;
+
+	cam_cre_mgr_cre_clk_update(hw_mgr, ctx_data, cre_req->req_idx);
+	ctx_data->req_list[cre_req->req_idx]->submit_timestamp = ktime_get();
+
+	if (cre_req->request_id <= ctx_data->last_flush_req)
+		CAM_WARN(CAM_CRE,
+			"Anomaly submitting flushed req %llu [last_flush %llu] in ctx %u",
+			cre_req->request_id, ctx_data->last_flush_req,
+			ctx_data->ctx_id);
+
+	rc = cam_cre_mgr_enqueue_config(hw_mgr, ctx_data, config_args);
+	if (rc)
+		goto config_err;
+
+	CAM_DBG(CAM_REQ, "req_id %llu, ctx_id %u io config",
+		cre_req->request_id, ctx_data->ctx_id);
+
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+config_err:
+	cam_cre_mgr_handle_config_err(config_args, ctx_data);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static void cam_cre_mgr_print_io_bufs(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+	bool *mem_found)
+{
+	dma_addr_t   iova_addr;
+	size_t     src_buf_size;
+	int        i;
+	int        j;
+	int        rc = 0;
+	int32_t    mmu_hdl;
+
+	struct cam_buf_io_cfg  *io_cfg = NULL;
+
+	if (mem_found)
+		*mem_found = false;
+
+	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+
+	for (i = 0; i < packet->num_io_configs; i++) {
+		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+			if (!io_cfg[i].mem_handle[j])
+				break;
+
+			if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+				GET_FD_FROM_HANDLE(pf_buf_info)) {
+				CAM_INFO(CAM_CRE,
+					"Found PF at port: %d mem %x fd: %x",
+					io_cfg[i].resource_type,
+					io_cfg[i].mem_handle[j],
+					pf_buf_info);
+				if (mem_found)
+					*mem_found = true;
+			}
+
+			CAM_INFO(CAM_CRE, "port: %d f: %d format: %d dir %d",
+				io_cfg[i].resource_type,
+				io_cfg[i].fence,
+				io_cfg[i].format,
+				io_cfg[i].direction);
+
+			mmu_hdl = cam_mem_is_secure_buf(
+				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+				iommu_hdl;
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+				mmu_hdl, &iova_addr, &src_buf_size);
+			if (rc < 0) {
+				CAM_ERR(CAM_UTIL,
+					"get src buf address fail rc %d mem %x",
+					rc, io_cfg[i].mem_handle[j]);
+				continue;
+			}
+			if ((iova_addr & 0xFFFFFFFF) != iova_addr) {
+				CAM_ERR(CAM_CRE, "Invalid mapped address");
+				rc = -EINVAL;
+				continue;
+			}
+
+			CAM_INFO(CAM_CRE,
+				"pln %d dir %d w %d h %d s %u sh %u sz %d addr 0x%x off 0x%x memh %x",
+				j, io_cfg[i].direction,
+				io_cfg[i].planes[j].width,
+				io_cfg[i].planes[j].height,
+				io_cfg[i].planes[j].plane_stride,
+				io_cfg[i].planes[j].slice_height,
+				(int32_t)src_buf_size,
+				(unsigned int)iova_addr,
+				io_cfg[i].offsets[j],
+				io_cfg[i].mem_handle[j]);
+
+			iova_addr += io_cfg[i].offsets[j];
+
+		}
+	}
+	cam_packet_dump_patch_info(packet, cre_hw_mgr->iommu_hdl,
+		cre_hw_mgr->iommu_sec_hdl);
+}
+
+static int cam_cre_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+	struct cam_cre_hw_mgr  *hw_mgr = hw_mgr_priv;
+
+	if (!hw_mgr_priv || !cmd_args) {
+		CAM_ERR(CAM_CRE, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	switch (hw_cmd_args->cmd_type) {
+	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+		cam_cre_mgr_print_io_bufs(
+			hw_cmd_args->u.pf_args.pf_data.packet,
+			hw_mgr->iommu_hdl,
+			hw_mgr->iommu_sec_hdl,
+			hw_cmd_args->u.pf_args.buf_info,
+			hw_cmd_args->u.pf_args.mem_found);
+
+		break;
+	default:
+		CAM_ERR(CAM_CRE, "Invalid cmd");
+	}
+
+	return rc;
+}
+
+static int cam_cre_mgr_flush_req(struct cam_cre_ctx *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	int idx;
+	int64_t request_id;
+
+	request_id = *(int64_t *)flush_args->flush_req_pending[0];
+	for (idx = 0; idx < CAM_CTX_REQ_MAX; idx++) {
+		if (!ctx_data->req_list[idx])
+			continue;
+
+		if (ctx_data->req_list[idx]->request_id != request_id)
+			continue;
+
+		ctx_data->req_list[idx]->request_id = 0;
+		cam_cre_free_io_config(ctx_data->req_list[idx]);
+		cam_free_clear(ctx_data->req_list[idx]);
+		ctx_data->req_list[idx] = NULL;
+		clear_bit(idx, ctx_data->bitmap);
+	}
+
+	return 0;
+}
+
+static int cam_cre_mgr_flush_all(struct cam_cre_ctx *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	int i, rc;
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	rc = cam_cre_mgr_reset_hw();
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		if (!ctx_data->req_list[i])
+			continue;
+
+		ctx_data->req_list[i]->request_id = 0;
+		cam_cre_free_io_config(ctx_data->req_list[i]);
+		cam_free_clear(ctx_data->req_list[i]);
+		ctx_data->req_list[i] = NULL;
+		clear_bit(i, ctx_data->bitmap);
+	}
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	return rc;
+}
+
+static int cam_cre_mgr_hw_dump(void *hw_priv, void *hw_dump_args)
+{
+	struct cam_cre_ctx *ctx_data;
+	struct cam_cre_hw_mgr *hw_mgr = hw_priv;
+	struct cam_hw_dump_args  *dump_args;
+	int idx;
+	ktime_t cur_time;
+	struct timespec64 cur_ts, req_ts;
+	uint64_t diff;
+
+	if ((!hw_priv) || (!hw_dump_args)) {
+		CAM_ERR(CAM_CRE, "Invalid params %pK %pK",
+			hw_priv, hw_dump_args);
+		return -EINVAL;
+	}
+
+	dump_args = (struct cam_hw_dump_args *)hw_dump_args;
+	ctx_data = dump_args->ctxt_to_hw_map;
+
+	if (!ctx_data) {
+		CAM_ERR(CAM_CRE, "Invalid context");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	mutex_lock(&ctx_data->ctx_mutex);
+
+	CAM_INFO(CAM_CRE, "Req %lld", dump_args->request_id);
+	for (idx = 0; idx < CAM_CTX_REQ_MAX; idx++) {
+		if (!ctx_data->req_list[idx])
+			continue;
+
+		if (ctx_data->req_list[idx]->request_id ==
+			dump_args->request_id)
+			break;
+	}
+
+	/* no matching request found */
+	if (idx == CAM_CTX_REQ_MAX) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return 0;
+	}
+
+	cur_time = ktime_get();
+	diff = ktime_us_delta(cur_time,
+			ctx_data->req_list[idx]->submit_timestamp);
+	cur_ts = ktime_to_timespec64(cur_time);
+	req_ts = ktime_to_timespec64(ctx_data->req_list[idx]->submit_timestamp);
+
+	if (diff < (CRE_REQUEST_TIMEOUT * 1000)) {
+		CAM_INFO(CAM_CRE, "No Error req %llu %ld:%06ld %ld:%06ld",
+			dump_args->request_id,
+			req_ts.tv_sec,
+			req_ts.tv_nsec/NSEC_PER_USEC,
+			cur_ts.tv_sec,
+			cur_ts.tv_nsec/NSEC_PER_USEC);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return 0;
+	}
+
+	CAM_ERR(CAM_CRE, "Error req %llu %ld:%06ld %ld:%06ld",
+		dump_args->request_id,
+		req_ts.tv_sec,
+		req_ts.tv_nsec/NSEC_PER_USEC,
+		cur_ts.tv_sec,
+		cur_ts.tv_nsec/NSEC_PER_USEC);
+
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return 0;
+}
+
+static int cam_cre_mgr_hw_flush(void *hw_priv, void *hw_flush_args)
+{
+	struct cam_hw_flush_args *flush_args = hw_flush_args;
+	struct cam_cre_ctx *ctx_data;
+	struct cam_cre_hw_mgr *hw_mgr = cre_hw_mgr;
+
+	if ((!hw_priv) || (!hw_flush_args)) {
+		CAM_ERR(CAM_CRE, "Input params are Null");
+		return -EINVAL;
+	}
+
+	ctx_data = flush_args->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_CRE, "Ctx data is NULL");
+		return -EINVAL;
+	}
+
+	if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+		(flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+		CAM_ERR(CAM_CRE, "Invalid flush type: %d",
+			flush_args->flush_type);
+		return -EINVAL;
+	}
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_ALL:
+		mutex_lock(&hw_mgr->hw_mgr_mutex);
+		ctx_data->last_flush_req = flush_args->last_flush_req;
+
+		CAM_DBG(CAM_REQ, "ctx_id %d Flush type %d last_flush_req %u",
+				ctx_data->ctx_id, flush_args->flush_type,
+				ctx_data->last_flush_req);
+
+		cam_cre_mgr_flush_all(ctx_data, flush_args);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		break;
+	case CAM_FLUSH_TYPE_REQ:
+		mutex_lock(&ctx_data->ctx_mutex);
+		if (flush_args->num_req_active) {
+			CAM_ERR(CAM_CRE, "Flush request is not supported");
+			mutex_unlock(&ctx_data->ctx_mutex);
+			return -EINVAL;
+		}
+		if (flush_args->num_req_pending)
+			cam_cre_mgr_flush_req(ctx_data, flush_args);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		break;
+	default:
+		CAM_ERR(CAM_CRE, "Invalid flush type: %d",
+				flush_args->flush_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_cre_mgr_alloc_devs(struct device_node *of_node)
+{
+	int rc;
+	uint32_t num_dev;
+
+	rc = of_property_read_u32(of_node, "num-cre", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "getting num of cre failed: %d", rc);
+		return -EINVAL;
+	}
+
+	cre_hw_mgr->devices[CRE_DEV_CRE] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+	if (!cre_hw_mgr->devices[CRE_DEV_CRE])
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int cam_cre_mgr_init_devs(struct device_node *of_node)
+{
+	int rc = 0;
+	int count, i;
+	const char *name = NULL;
+	struct device_node *child_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *child_dev_intf = NULL;
+	struct cam_hw_info *cre_dev;
+	struct cam_hw_soc_info *soc_info = NULL;
+
+	rc = cam_cre_mgr_alloc_devs(of_node);
+	if (rc)
+		return rc;
+
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count) {
+		CAM_ERR(CAM_CRE, "no compat hw found in dev tree, cnt = %d",
+			count);
+		rc = -EINVAL;
+		goto compat_hw_name_failed;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "compat-hw-name",
+			i, &name);
+		if (rc) {
+			CAM_ERR(CAM_CRE, "getting dev object name failed");
+			goto compat_hw_name_failed;
+		}
+
+		child_node = of_find_node_by_name(NULL, name);
+		if (!child_node) {
+			CAM_ERR(CAM_CRE, "Cannot find node in dtsi %s", name);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+
+		child_pdev = of_find_device_by_node(child_node);
+		if (!child_pdev) {
+			CAM_ERR(CAM_CRE, "failed to find device on bus %s",
+				child_node->name);
+			rc = -ENODEV;
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+
+		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+			child_pdev);
+		if (!child_dev_intf) {
+			CAM_ERR(CAM_CRE, "no child device");
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+		cre_hw_mgr->devices[child_dev_intf->hw_type]
+			[child_dev_intf->hw_idx] = child_dev_intf;
+
+		if (!child_dev_intf->hw_ops.process_cmd)
+			goto compat_hw_name_failed;
+
+		of_node_put(child_node);
+	}
+
+	cre_hw_mgr->num_cre = count;
+	for (i = 0; i < count; i++) {
+		cre_hw_mgr->cre_dev_intf[i] =
+			cre_hw_mgr->devices[CRE_DEV_CRE][i];
+			cre_dev = cre_hw_mgr->cre_dev_intf[i]->hw_priv;
+			soc_info = &cre_dev->soc_info;
+	}
+
+	return 0;
+compat_hw_name_failed:
+	kfree(cre_hw_mgr->devices[CRE_DEV_CRE]);
+	cre_hw_mgr->devices[CRE_DEV_CRE] = NULL;
+	return rc;
+}
+
+static void cam_req_mgr_process_cre_command_queue(struct work_struct *w)
+{
+	cam_req_mgr_process_workq(w);
+}
+
+static void cam_req_mgr_process_cre_msg_queue(struct work_struct *w)
+{
+	cam_req_mgr_process_workq(w);
+}
+
+static void cam_req_mgr_process_cre_timer_queue(struct work_struct *w)
+{
+	cam_req_mgr_process_workq(w);
+}
+
+static int cam_cre_mgr_create_wq(void)
+{
+
+	int rc;
+	int i;
+
+	rc = cam_req_mgr_workq_create("cre_command_queue", CRE_WORKQ_NUM_TASK,
+		&cre_hw_mgr->cmd_work, CRM_WORKQ_USAGE_NON_IRQ,
+		0, cam_req_mgr_process_cre_command_queue);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "unable to create a command worker");
+		goto cmd_work_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("cre_message_queue", CRE_WORKQ_NUM_TASK,
+		&cre_hw_mgr->msg_work, CRM_WORKQ_USAGE_IRQ, 0,
+		cam_req_mgr_process_cre_msg_queue);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "unable to create a message worker");
+		goto msg_work_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("cre_timer_queue", CRE_WORKQ_NUM_TASK,
+		&cre_hw_mgr->timer_work, CRM_WORKQ_USAGE_IRQ, 0,
+		cam_req_mgr_process_cre_timer_queue);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "unable to create a timer worker");
+		goto timer_work_failed;
+	}
+
+	cre_hw_mgr->cmd_work_data =
+		kzalloc(sizeof(struct cre_cmd_work_data) * CRE_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!cre_hw_mgr->cmd_work_data) {
+		rc = -ENOMEM;
+		goto cmd_work_data_failed;
+	}
+
+	cre_hw_mgr->msg_work_data =
+		kzalloc(sizeof(struct cre_msg_work_data) * CRE_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!cre_hw_mgr->msg_work_data) {
+		rc = -ENOMEM;
+		goto msg_work_data_failed;
+	}
+
+	cre_hw_mgr->timer_work_data =
+		kzalloc(sizeof(struct cre_clk_work_data) * CRE_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!cre_hw_mgr->timer_work_data) {
+		rc = -ENOMEM;
+		goto timer_work_data_failed;
+	}
+
+	for (i = 0; i < CRE_WORKQ_NUM_TASK; i++)
+		cre_hw_mgr->msg_work->task.pool[i].payload =
+				&cre_hw_mgr->msg_work_data[i];
+
+	for (i = 0; i < CRE_WORKQ_NUM_TASK; i++)
+		cre_hw_mgr->cmd_work->task.pool[i].payload =
+				&cre_hw_mgr->cmd_work_data[i];
+
+	for (i = 0; i < CRE_WORKQ_NUM_TASK; i++)
+		cre_hw_mgr->timer_work->task.pool[i].payload =
+				&cre_hw_mgr->timer_work_data[i];
+	return 0;
+
+timer_work_data_failed:
+	kfree(cre_hw_mgr->msg_work_data);
+msg_work_data_failed:
+	kfree(cre_hw_mgr->cmd_work_data);
+cmd_work_data_failed:
+	cam_req_mgr_workq_destroy(&cre_hw_mgr->timer_work);
+timer_work_failed:
+	cam_req_mgr_workq_destroy(&cre_hw_mgr->msg_work);
+msg_work_failed:
+	cam_req_mgr_workq_destroy(&cre_hw_mgr->cmd_work);
+cmd_work_failed:
+	return rc;
+}
+
+static int cam_cre_create_debug_fs(void)
+{
+	cre_hw_mgr->dentry = debugfs_create_dir("camera_cre",
+		NULL);
+
+	if (!cre_hw_mgr->dentry) {
+		CAM_ERR(CAM_CRE, "failed to create dentry");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_bool("frame_dump_enable",
+		0644,
+		cre_hw_mgr->dentry,
+		&cre_hw_mgr->frame_dump_enable)) {
+		CAM_ERR(CAM_CRE,
+			"failed to create dump_enable_debug");
+		goto err;
+	}
+
+	if (!debugfs_create_bool("dump_req_data_enable",
+		0644,
+		cre_hw_mgr->dentry,
+		&cre_hw_mgr->dump_req_data_enable)) {
+		CAM_ERR(CAM_CRE,
+			"failed to create dump_enable_debug");
+		goto err;
+	}
+
+	return 0;
+err:
+	debugfs_remove_recursive(cre_hw_mgr->dentry);
+	return -ENOMEM;
+}
+
+int cam_cre_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+	int *iommu_hdl)
+{
+	int i, rc = 0, j;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+
+	if (!of_node || !hw_mgr_hdl) {
+		CAM_ERR(CAM_CRE, "Invalid args of_node %pK hw_mgr %pK",
+			of_node, hw_mgr_hdl);
+		return -EINVAL;
+	}
+	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+
+	cre_hw_mgr = kzalloc(sizeof(struct cam_cre_hw_mgr), GFP_KERNEL);
+	if (!cre_hw_mgr) {
+		CAM_ERR(CAM_CRE, "Unable to allocate mem for: size = %d",
+			sizeof(struct cam_cre_hw_mgr));
+		return -ENOMEM;
+	}
+
+	hw_mgr_intf->hw_mgr_priv = cre_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_cre_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_cre_mgr_acquire_hw;
+	hw_mgr_intf->hw_release = cam_cre_mgr_release_hw;
+	hw_mgr_intf->hw_start   = NULL;
+	hw_mgr_intf->hw_stop    = NULL;
+	hw_mgr_intf->hw_prepare_update = cam_cre_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config_stream_settings = NULL;
+	hw_mgr_intf->hw_config = cam_cre_mgr_config_hw;
+	hw_mgr_intf->hw_read   = NULL;
+	hw_mgr_intf->hw_write  = NULL;
+	hw_mgr_intf->hw_cmd = cam_cre_mgr_cmd;
+	hw_mgr_intf->hw_flush = cam_cre_mgr_hw_flush;
+	hw_mgr_intf->hw_dump = cam_cre_mgr_hw_dump;
+
+	cre_hw_mgr->secure_mode = false;
+	mutex_init(&cre_hw_mgr->hw_mgr_mutex);
+	spin_lock_init(&cre_hw_mgr->hw_mgr_lock);
+
+	for (i = 0; i < CRE_CTX_MAX; i++) {
+		cre_hw_mgr->ctx[i].bitmap_size =
+			BITS_TO_LONGS(CAM_CTX_REQ_MAX) *
+			sizeof(long);
+		cre_hw_mgr->ctx[i].bitmap = kzalloc(
+			cre_hw_mgr->ctx[i].bitmap_size, GFP_KERNEL);
+		if (!cre_hw_mgr->ctx[i].bitmap) {
+			CAM_ERR(CAM_CRE, "bitmap allocation failed: size = %d",
+				cre_hw_mgr->ctx[i].bitmap_size);
+			rc = -ENOMEM;
+			goto cre_ctx_bitmap_failed;
+		}
+		cre_hw_mgr->ctx[i].bits = cre_hw_mgr->ctx[i].bitmap_size *
+			BITS_PER_BYTE;
+		mutex_init(&cre_hw_mgr->ctx[i].ctx_mutex);
+	}
+
+	rc = cam_cre_mgr_init_devs(of_node);
+	if (rc)
+		goto dev_init_failed;
+
+	cre_hw_mgr->ctx_bitmap_size =
+		BITS_TO_LONGS(CRE_CTX_MAX) * sizeof(long);
+	cre_hw_mgr->ctx_bitmap = kzalloc(cre_hw_mgr->ctx_bitmap_size,
+		GFP_KERNEL);
+	if (!cre_hw_mgr->ctx_bitmap) {
+		rc = -ENOMEM;
+		goto ctx_bitmap_alloc_failed;
+	}
+
+	cre_hw_mgr->ctx_bits = cre_hw_mgr->ctx_bitmap_size *
+		BITS_PER_BYTE;
+
+	rc = cam_smmu_get_handle("cre", &cre_hw_mgr->iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "get mmu handle failed: %d", rc);
+		goto cre_get_hdl_failed;
+	}
+
+	rc = cam_smmu_get_handle("cam-secure", &cre_hw_mgr->iommu_sec_hdl);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "get secure mmu handle failed: %d", rc);
+		goto secure_hdl_failed;
+	}
+
+	rc = cam_cre_mgr_create_wq();
+	if (rc)
+		goto cre_wq_create_failed;
+
+	cam_cre_create_debug_fs();
+
+	if (iommu_hdl)
+		*iommu_hdl = cre_hw_mgr->iommu_hdl;
+
+	return rc;
+
+cre_wq_create_failed:
+	cam_smmu_destroy_handle(cre_hw_mgr->iommu_sec_hdl);
+	cre_hw_mgr->iommu_sec_hdl = -1;
+secure_hdl_failed:
+	cam_smmu_destroy_handle(cre_hw_mgr->iommu_hdl);
+	cre_hw_mgr->iommu_hdl = -1;
+cre_get_hdl_failed:
+	cam_free_clear(cre_hw_mgr->ctx_bitmap);
+	cre_hw_mgr->ctx_bitmap = NULL;
+	cre_hw_mgr->ctx_bitmap_size = 0;
+	cre_hw_mgr->ctx_bits = 0;
+ctx_bitmap_alloc_failed:
+	cam_free_clear(cre_hw_mgr->devices[CRE_DEV_CRE]);
+	cre_hw_mgr->devices[CRE_DEV_CRE] = NULL;
+dev_init_failed:
+cre_ctx_bitmap_failed:
+	mutex_destroy(&cre_hw_mgr->hw_mgr_mutex);
+	for (j = i - 1; j >= 0; j--) {
+		mutex_destroy(&cre_hw_mgr->ctx[j].ctx_mutex);
+		cam_free_clear(cre_hw_mgr->ctx[j].bitmap);
+		cre_hw_mgr->ctx[j].bitmap = NULL;
+		cre_hw_mgr->ctx[j].bitmap_size = 0;
+		cre_hw_mgr->ctx[j].bits = 0;
+	}
+	cam_free_clear(cre_hw_mgr);
+	cre_hw_mgr = NULL;
+
+	return rc;
+}

+ 447 - 0
drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.h

@@ -0,0 +1,447 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_CRE_HW_MGR_H
+#define CAM_CRE_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_cre.h>
+
+#include "cam_cre_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_context.h"
+#include "cre_top.h"
+
+#define CRE_CTX_MAX                  32
+#define CAM_FRAME_CMD_MAX            20
+
+#define CRE_WORKQ_NUM_TASK           100
+#define CRE_WORKQ_TASK_CMD_TYPE      1
+#define CRE_WORKQ_TASK_MSG_TYPE      2
+
+#define CRE_PACKET_SIZE              0
+#define CRE_PACKET_TYPE              1
+#define CRE_PACKET_OPCODE            2
+
+#define CRE_PACKET_MAX_CMD_BUFS      4
+
+#define CRE_FRAME_PROCESS_SUCCESS    0
+#define CRE_FRAME_PROCESS_FAILURE    1
+
+#define CRE_CTX_STATE_FREE           0
+#define CRE_CTX_STATE_IN_USE         1
+#define CRE_CTX_STATE_ACQUIRED       2
+#define CRE_CTX_STATE_RELEASE        3
+
+#define CRE_MAX_IN_RES               2
+#define CRE_MAX_OUT_RES              2
+#define CRE_MAX_IO_BUFS              3
+
+#define CAM_CRE_BW_CONFIG_UNKNOWN    0
+#define CAM_CRE_BW_CONFIG_V2         2
+
+#define CRE_DEV_MAX                  1
+#define CLK_HW_CRE                   0x0
+#define CLK_HW_MAX                   0x1
+
+#define CRE_DEVICE_IDLE_TIMEOUT      400
+#define CRE_REQUEST_TIMEOUT          200
+
+#define CAM_CRE_HW_CFG_Q_MAX         50
+
+#define CAM_CRE_MAX_PER_PATH_VOTES   6
+#define CAM_CRE_MAX_REG_SET          32
+/*
+ * Response time threshold in ms beyond which a request is not expected
+ * to be with CRE hw
+ */
+#define CAM_CRE_RESPONSE_TIME_THRESHOLD   100000
+
+/*
+ * struct cam_cre_irq_data
+ *
+ * @error:          IRQ error
+ * @top_irq_status: CRE top irq status
+ * @wr_buf_done:    write engine buf done
+ */
+struct cam_cre_irq_data {
+	uint32_t error;
+	uint32_t top_irq_status;
+	uint32_t wr_buf_done;
+};
+
+
+/**
+ * struct cam_cre_hw_intf_data - CRE hw intf data
+ *
+ * @Brief:        cre hw intf pointer and pid list data
+ *
+ * @devices:      cre hw intf pointer
+ * @num_devices:  Number of CRE devices
+ * @num_hw_pid:   Number of pids for this hw
+ * @hw_pid:       cre hw pid values
+ *
+ */
+struct cam_cre_hw_intf_data {
+	struct cam_hw_intf  *hw_intf;
+	uint32_t             num_hw_pid;
+	uint32_t             hw_pid[CRE_DEV_MAX];
+};
+
+/**
+ * struct cam_ctx_clk_info
+ * @curr_fc: Context latest request frame cycles
+ * @rt_flag: Flag to indicate real time request
+ * @base_clk: Base clock to process the request
+ * @reserved: Reserved field
+ * @clk_rate: Supported clock rates for the context
+ * @num_paths: Number of valid AXI paths
+ * @axi_path: ctx based per path bw vote
+ */
+struct cam_ctx_clk_info {
+	uint32_t curr_fc;
+	uint32_t rt_flag;
+	uint32_t base_clk;
+	uint32_t reserved;
+	int32_t clk_rate[CAM_MAX_VOTE];
+	uint32_t num_paths;
+	struct cam_axi_per_path_bw_vote axi_path[CAM_CRE_MAX_PER_PATH_VOTES];
+};
+
+/**
+ * struct cre_cmd_generic_blob
+ * @ctx: Current context info
+ * @req_info_idx: Index used for request
+ * @io_buf_addr: pointer to io buffer address
+ */
+struct cre_cmd_generic_blob {
+	struct cam_cre_ctx *ctx;
+	uint32_t req_idx;
+	uint64_t *io_buf_addr;
+};
+
+/**
+ * struct cam_cre_clk_info
+ * @base_clk: Base clock to process request
+ * @curr_clk: Current clock of hadrware
+ * @threshold: Threshold for overclk count
+ * @over_clked: Over clock count
+ * @num_paths: Number of AXI vote paths
+ * @axi_path: Current per path bw vote info
+ * @hw_type: IPE/BPS device type
+ * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
+ */
+struct cam_cre_clk_info {
+	uint32_t base_clk;
+	uint32_t curr_clk;
+	uint32_t threshold;
+	uint32_t over_clked;
+	uint32_t num_paths;
+	struct cam_axi_per_path_bw_vote axi_path[CAM_CRE_MAX_PER_PATH_VOTES];
+	uint32_t hw_type;
+	struct cam_req_mgr_timer *watch_dog;
+	uint32_t watch_dog_reset_counter;
+};
+
+/**
+ * struct cre_cmd_work_data
+ *
+ * @type:       Type of work data
+ * @data:       Private data
+ * @req_id:     Request Idx
+ */
+struct cre_cmd_work_data {
+	uint32_t type;
+	void *data;
+	int64_t req_idx;
+};
+
+/**
+ * struct cre_msg_work_data
+ *
+ * @type:       Type of work data
+ * @data:       Private data
+ * @irq_status: IRQ status
+ */
+struct cre_msg_work_data {
+	uint32_t type;
+	void *data;
+	struct cam_cre_irq_data irq_data;
+};
+
+/**
+ * struct cre_clk_work_data
+ *
+ * @type: Type of work data
+ * @data: Private data
+ */
+struct cre_clk_work_data {
+	uint32_t type;
+	void *data;
+};
+
+/**
+ * struct cre_debug_buffer
+ *
+ * @cpu_addr:         CPU address
+ * @iova_addr:        IOVA address
+ * @len:              Buffer length
+ * @size:             Buffer Size
+ * @offset:	      buffer offset
+ */
+struct cre_debug_buffer {
+	uintptr_t cpu_addr;
+	dma_addr_t iova_addr;
+	size_t len;
+	uint32_t size;
+	uint32_t offset;
+};
+
+struct plane_info {
+	uintptr_t  cpu_addr;
+	dma_addr_t iova_addr;
+	uint32_t   width;
+	uint32_t   height;
+	uint32_t   stride;
+	uint32_t   format;
+	uint32_t   alignment;
+	uint32_t   offset;
+	uint32_t   x_init;
+	size_t     len;
+};
+
+/**
+ * struct cre_io_buf
+ *
+ * @direction:     Direction of a buffer
+ * @resource_type: Resource type of IO Buffer
+ * @format:        Format
+ * @fence:         Fence
+ * @num_planes:    Number of planes
+ */
+struct cre_io_buf {
+	uint32_t direction;
+	uint32_t resource_type;
+	uint32_t format;
+	uint32_t fence;
+	uint32_t num_planes;
+	struct   plane_info p_info[CAM_CRE_MAX_PLANES];
+};
+
+struct cre_reg_set {
+	uint32_t offset;
+	uint32_t value;
+};
+
+struct cre_reg_buffer {
+	uint32_t num_rd_reg_set;
+	uint32_t num_wr_reg_set;
+	struct cre_reg_set rd_reg_set[CAM_CRE_MAX_REG_SET];
+	struct cre_reg_set wr_reg_set[CAM_CRE_MAX_REG_SET];
+};
+
+/**
+ * struct cam_cre_clk_bw_request
+ * @budget_ns: Time required to process frame
+ * @frame_cycles: Frame cycles needed to process the frame
+ * @rt_flag: Flag to indicate real time stream
+ * @uncompressed_bw: Bandwidth required to process frame
+ * @compressed_bw: Compressed bandwidth to process frame
+ */
+struct cam_cre_clk_bw_request {
+	uint64_t budget_ns;
+	uint32_t frame_cycles;
+	uint32_t rt_flag;
+	uint64_t uncompressed_bw;
+	uint64_t compressed_bw;
+};
+
+/**
+ * struct cam_cre_clk_bw_req_internal_v2
+ * @budget_ns: Time required to process frame
+ * @frame_cycles: Frame cycles needed to process the frame
+ * @rt_flag: Flag to indicate real time stream
+ * @reserved: Reserved for future use
+ * @num_paths: Number of paths for per path bw vote
+ * @axi_path: Per path vote info for CRE
+ */
+struct cam_cre_clk_bw_req_internal_v2 {
+	uint64_t budget_ns;
+	uint32_t frame_cycles;
+	uint32_t rt_flag;
+	uint32_t reserved;
+	uint32_t num_paths;
+	struct cam_axi_per_path_bw_vote axi_path[CAM_CRE_MAX_PER_PATH_VOTES];
+};
+
+/**
+ * struct cam_cre_request
+ *
+ * @request_id:          Request Id
+ * @req_idx:             Index in request list
+ * @state:               Request state
+ * @num_batch:           Number of batches
+ * @num_frame_bufs:      Number of frame buffers
+ * @num_pass_bufs:       Number of pass Buffers
+ * @num_io_bufs:         Number of IO Buffers
+ * @in_resource:         Input resource
+ * @cre_debug_buf:       Debug buffer
+ * @io_buf:              IO config info of a request
+ * @clk_info:            Clock Info V1
+ * @clk_info_v2:         Clock Info V2
+ * @hang_data:           Debug data for HW error
+ * @submit_timestamp:    Submit timestamp to hw
+ */
+struct cam_cre_request {
+	uint64_t  request_id;
+	uint32_t  req_idx;
+	uint32_t  state;
+	uint32_t  num_batch;
+	uint32_t  num_frame_bufs;
+	uint32_t  num_pass_bufs;
+	uint32_t  num_io_bufs[CRE_MAX_BATCH_SIZE];
+	uint32_t  in_resource;
+	struct    cre_reg_buffer cre_reg_buf;
+	struct    cre_debug_buffer cre_debug_buf;
+	struct    cre_io_buf *io_buf[CRE_MAX_BATCH_SIZE][CRE_MAX_IO_BUFS];
+	struct    cam_cre_clk_bw_request clk_info;
+	struct    cam_cre_clk_bw_req_internal_v2 clk_info_v2;
+	struct    cam_hw_mgr_dump_pf_data hang_data;
+	ktime_t   submit_timestamp;
+};
+
+/**
+ * struct cam_cre_ctx
+ *
+ * @context_priv:    Private data of context
+ * @bitmap:          Context bit map
+ * @bitmap_size:     Context bit map size
+ * @bits:            Context bit map bits
+ * @ctx_id:          Context ID
+ * @ctx_state:       State of a context
+ * @req_cnt:         Requests count
+ * @ctx_mutex:       Mutex for context
+ * @acquire_dev_cmd: Cam acquire command
+ * @cre_acquire:     CRE acquire command
+ * @ctxt_event_cb:   Callback of a context
+ * @req_list:        Request List
+ * @last_req_time:   Timestamp of last request
+ * @req_watch_dog:   Watchdog for requests
+ * @req_watch_dog_reset_counter: Request reset counter
+ * @clk_info:        CRE Ctx clock info
+ * @clk_watch_dog:   Clock watchdog
+ * @clk_watch_dog_reset_counter: Reset counter
+ * @last_flush_req: last flush req for this ctx
+ */
+struct cam_cre_ctx {
+	void *context_priv;
+	size_t bitmap_size;
+	void *bitmap;
+	size_t bits;
+	uint32_t ctx_id;
+	uint32_t ctx_state;
+	uint32_t req_cnt;
+	struct mutex ctx_mutex;
+	struct cam_acquire_dev_cmd acquire_dev_cmd;
+	struct cam_cre_acquire_dev_info cre_acquire;
+	cam_hw_event_cb_func ctxt_event_cb;
+	struct cam_cre_request *req_list[CAM_CTX_REQ_MAX];
+	struct cam_cre_request *active_req;
+	uint64_t last_req_time;
+	struct cam_req_mgr_timer *req_watch_dog;
+	uint32_t req_watch_dog_reset_counter;
+	struct cam_ctx_clk_info clk_info;
+	struct cam_req_mgr_timer *clk_watch_dog;
+	struct cre_top *cre_top;
+	uint32_t clk_watch_dog_reset_counter;
+	uint64_t last_flush_req;
+};
+
+/**
+ * struct cam_cre_hw_mgr
+ *
+ * @cren_cnt:          CRE device cren count
+ * @cre_ctx_cnt:       Open context count
+ * @hw_mgr_mutex:      Mutex for HW manager
+ * @hw_mgr_lock:       Spinlock for HW manager
+ * @iommu_hdl:         CRE Handle
+ * @iommu_sec_hdl:     CRE Handle for secure
+ * @num_cre:           Number of CRE
+ * @secure_mode:       Mode of CRE creration
+ * @ctx_bitmap:        Context bit map
+ * @ctx_bitmap_size:   Context bit map size
+ * @ctx_bits:          Context bit map bits
+ * @ctx:               CRE context
+ * @devices:           CRE devices
+ * @cre_caps:          CRE capabilities
+ * @cmd_work:          Command work
+ * @msg_work:          Message work
+ * @timer_work:        Timer work
+ * @cmd_work_data:     Command work data
+ * @msg_work_data:     Message work data
+ * @timer_work_data:   Timer work data
+ * @cre_dev_intf:      CRE device interface
+ * @clk_info:          CRE clock Info for HW manager
+ * @dentry:            Pointer to CRE debugfs directory
+ * @frame_dump_enable: CRE frame setting dump enablement
+ * @dump_req_data_enable: CRE hang dump enablement
+ */
+struct cam_cre_hw_mgr {
+	int32_t       cren_cnt;
+	uint32_t      cre_ctx_cnt;
+	struct mutex  hw_mgr_mutex;
+	spinlock_t    hw_mgr_lock;
+	int32_t       iommu_hdl;
+	int32_t       iommu_sec_hdl;
+	uint32_t      num_cre;
+	bool          secure_mode;
+	void    *ctx_bitmap;
+	size_t   ctx_bitmap_size;
+	size_t   ctx_bits;
+	struct   cam_cre_ctx  ctx[CRE_CTX_MAX];
+	struct   cam_hw_intf  **devices[CRE_DEV_MAX];
+	struct   cam_cre_query_cap_cmd cre_caps;
+
+	struct cam_req_mgr_core_workq *cmd_work;
+	struct cam_req_mgr_core_workq *msg_work;
+	struct cam_req_mgr_core_workq *timer_work;
+	struct cre_cmd_work_data *cmd_work_data;
+	struct cre_msg_work_data *msg_work_data;
+	struct cre_clk_work_data *timer_work_data;
+	struct cam_hw_intf *cre_dev_intf[CRE_DEV_MAX];
+	struct cam_soc_reg_map *reg_map[CRE_DEV_MAX][CRE_BASE_MAX];
+	struct cam_cre_clk_info clk_info;
+	struct dentry *dentry;
+	bool   frame_dump_enable;
+	bool   dump_req_data_enable;
+};
+
+/**
+ * struct cam_cre_hw_ctx_data
+ *
+ * @context_priv: Context private data, cam_context from
+ *     acquire.
+ * @ctx_mutex: Mutex for context
+ * @cre_dev_acquire_info: Acquire device info
+ * @ctxt_event_cb: Context callback function
+ * @in_use: Flag for context usage
+ * @wait_complete: Completion info
+ * @last_flush_req: req id which was flushed last.
+ */
+struct cam_cre_hw_ctx_data {
+	void *context_priv;
+	struct mutex ctx_mutex;
+	struct cam_cre_acquire_dev_info cre_dev_acquire_info;
+	cam_hw_event_cb_func ctxt_event_cb;
+	bool in_use;
+	struct completion wait_complete;
+	uint64_t last_flush_req;
+};
+#endif /* CAM_CRE_HW_MGR_H */

+ 619 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.c

@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include <media/cam_cre.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cre_core.h"
+#include "cre_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cre_hw.h"
+#include "cre_dev_intf.h"
+#include "cre_bus_rd.h"
+
+static struct cre_bus_rd *bus_rd;
+
+#define update_cre_reg_set(cre_reg_buf, off, val) \
+	do {                                           \
+		cre_reg_buf->rd_reg_set[cre_reg_buf->num_rd_reg_set].offset = (off); \
+		cre_reg_buf->rd_reg_set[cre_reg_buf->num_rd_reg_set].value = (val); \
+		cre_reg_buf->num_rd_reg_set++; \
+	} while (0)
+
+static int cam_cre_bus_rd_release(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	if (ctx_id < 0 || ctx_id >= CRE_CTX_MAX) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	vfree(bus_rd->bus_rd_ctx[ctx_id]);
+	bus_rd->bus_rd_ctx[ctx_id] = NULL;
+
+	return 0;
+}
+
+static int cam_cre_bus_is_rm_enabled(
+	struct cam_cre_request *cre_request,
+	uint32_t batch_idx,
+	uint32_t rm_id)
+{
+	int i, k;
+	struct cre_io_buf *io_buf;
+	struct cre_bus_in_port_to_rm *in_port_to_rm;
+
+	if (batch_idx >= CRE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_CRE, "Invalid batch idx: %d", batch_idx);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cre_request->num_io_bufs[batch_idx]; i++) {
+		io_buf = cre_request->io_buf[batch_idx][i];
+		if (io_buf->direction != CAM_BUF_INPUT)
+			continue;
+		in_port_to_rm =
+			&bus_rd->in_port_to_rm[io_buf->resource_type - 1];
+		for (k = 0; k < io_buf->num_planes; k++) {
+			if (rm_id ==
+				in_port_to_rm->rm_port_id[k])
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static uint32_t *cam_cre_bus_rd_update(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, struct cre_reg_buffer *cre_reg_buf, int batch_idx,
+	int io_idx, struct cam_cre_dev_prepare_req *prepare)
+{
+	int k;
+	uint32_t req_idx, temp;
+	uint32_t rm_id;
+	uint32_t rsc_type;
+	struct cam_hw_prepare_update_args *prepare_args;
+	struct cam_cre_ctx *ctx_data;
+	struct cam_cre_request *cre_request;
+	struct cre_io_buf *io_buf;
+	struct cre_bus_rd_ctx *bus_rd_ctx;
+	struct cam_cre_bus_rd_reg *rd_reg;
+	struct cam_cre_bus_rd_client_reg *rd_reg_client;
+	struct cam_cre_bus_rd_reg_val *rd_reg_val;
+	struct cam_cre_bus_rd_client_reg_val *rd_res_val_client;
+	struct cre_bus_in_port_to_rm *in_port_to_rm;
+	struct cre_bus_rd_io_port_info *io_port_info;
+
+
+	if (ctx_id < 0 || !prepare) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, prepare);
+		return NULL;
+	}
+
+	if (batch_idx >= CRE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_CRE, "Invalid batch idx: %d", batch_idx);
+		return NULL;
+	}
+
+	if (io_idx >= CRE_MAX_IO_BUFS) {
+		CAM_ERR(CAM_CRE, "Invalid IO idx: %d", io_idx);
+		return NULL;
+	}
+
+	prepare_args = prepare->prepare_args;
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+
+	cre_request = ctx_data->req_list[req_idx];
+	CAM_DBG(CAM_CRE, "req_idx = %d req_id = %lld",
+		req_idx, cre_request->request_id);
+	bus_rd_ctx = bus_rd->bus_rd_ctx[ctx_id];
+	io_port_info = &bus_rd_ctx->io_port_info;
+	rd_reg = cam_cre_hw_info->bus_rd_reg_offset;
+	rd_reg_val = cam_cre_hw_info->bus_rd_reg_val;
+	io_buf = cre_request->io_buf[batch_idx][io_idx];
+
+	CAM_DBG(CAM_CRE,
+		"req_idx = %d req_id = %lld rsc %d",
+		req_idx, cre_request->request_id,
+		io_buf->resource_type);
+	CAM_DBG(CAM_CRE, "batch:%d iobuf:%d direction:%d",
+		batch_idx, io_idx, io_buf->direction);
+
+	in_port_to_rm =
+	&bus_rd->in_port_to_rm[io_buf->resource_type - 1];
+
+	for (k = 0; k < io_buf->num_planes; k++) {
+		rsc_type = io_buf->resource_type - 1;
+		/* frame level info */
+		rm_id = in_port_to_rm->rm_port_id[k];
+		rd_reg_client = &rd_reg->rd_clients[rm_id];
+		rd_res_val_client = &rd_reg_val->rd_clients[rm_id];
+
+		/* security cfg */
+		update_cre_reg_set(cre_reg_buf,
+				rd_reg->offset + rd_reg->security_cfg,
+				ctx_data->cre_acquire.secure_mode & 0x1);
+
+		/* enable client */
+		update_cre_reg_set(cre_reg_buf,
+			rd_reg->offset + rd_reg_client->core_cfg,
+			1);
+
+		/* ccif meta data */
+		temp = 0;
+		update_cre_reg_set(cre_reg_buf,
+			(rd_reg->offset + rd_reg_client->ccif_meta_data),
+			temp);
+
+		/* Address of the Image */
+		update_cre_reg_set(cre_reg_buf,
+			rd_reg->offset + rd_reg_client->img_addr,
+			io_buf->p_info[k].iova_addr);
+
+		/* Buffer size */
+		update_cre_reg_set(cre_reg_buf,
+			rd_reg->offset + rd_reg_client->rd_width,
+			io_buf->p_info[k].width);
+		update_cre_reg_set(cre_reg_buf,
+			rd_reg->offset + rd_reg_client->rd_height,
+			io_buf->p_info[k].height);
+
+		/* stride */
+		update_cre_reg_set(cre_reg_buf,
+			rd_reg->offset + rd_reg_client->rd_stride,
+			io_buf->p_info[k].stride);
+
+		/* unpacker cfg : Mode and alignment */
+		temp = 0;
+		temp |= (io_buf->p_info[k].format &
+			rd_res_val_client->mode_mask) <<
+			rd_res_val_client->mode_shift;
+		temp |= (io_buf->p_info[k].alignment &
+			rd_res_val_client->alignment_mask) <<
+			rd_res_val_client->alignment_shift;
+		update_cre_reg_set(cre_reg_buf,
+				rd_reg->offset + rd_reg_client->unpacker_cfg,
+				temp);
+
+		/* latency buffer allocation */
+		update_cre_reg_set(cre_reg_buf,
+				rd_reg->offset + rd_reg_client->latency_buf_allocation,
+				io_port_info->latency_buf_size);
+		/* Enable Debug cfg */
+		temp = 0xFFFF;
+		update_cre_reg_set(cre_reg_buf,
+				rd_reg->offset + rd_reg_client->debug_status_cfg,
+				temp);
+	}
+
+
+	return (uint32_t *)cre_reg_buf;
+}
+
+static uint32_t *cam_cre_bus_rm_disable(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, struct cam_cre_dev_prepare_req *prepare,
+	int batch_idx, int rm_idx,
+	struct cre_reg_buffer *cre_reg_buf)
+{
+	uint32_t req_idx;
+	struct cam_cre_ctx *ctx_data;
+	struct cre_bus_rd_ctx *bus_rd_ctx;
+	struct cam_cre_bus_rd_reg *rd_reg;
+	struct cam_cre_bus_rd_client_reg *rd_reg_client;
+
+
+	if (ctx_id < 0 || !prepare) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, prepare);
+		return NULL;
+	}
+
+	if (batch_idx >= CRE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_CRE, "Invalid batch idx: %d", batch_idx);
+		return NULL;
+	}
+
+	if (rm_idx >= CAM_CRE_INPUT_IMAGES_MAX) {
+		CAM_ERR(CAM_CRE, "Invalid read client: %d", rm_idx);
+		return NULL;
+	}
+
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+
+	bus_rd_ctx = bus_rd->bus_rd_ctx[ctx_id];
+	rd_reg = cam_cre_hw_info->bus_rd_reg_offset;
+
+	rd_reg_client = &rd_reg->rd_clients[rm_idx];
+
+	/* Core cfg: enable, Mode */
+	update_cre_reg_set(cre_reg_buf,
+		rd_reg->offset + rd_reg_client->core_cfg,
+		0);
+
+	return (uint32_t *)cre_reg_buf;
+}
+
+static int cam_cre_bus_rd_prepare(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	int i, j;
+	uint32_t req_idx;
+	int is_rm_enabled;
+	struct cam_cre_dev_prepare_req *prepare;
+	struct cam_cre_ctx *ctx_data;
+	struct cam_cre_request *cre_request;
+	struct cre_io_buf *io_buf;
+	struct cre_bus_rd_ctx *bus_rd_ctx;
+	struct cam_cre_bus_rd_reg *rd_reg;
+	struct cam_cre_bus_rd_reg_val *rd_reg_val;
+	struct cre_reg_buffer *cre_reg_buf;
+	uint32_t *ret;
+
+	int temp;
+
+	if (ctx_id < 0 || !data) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, data);
+		return -EINVAL;
+	}
+	prepare = data;
+
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+
+	cre_request = ctx_data->req_list[req_idx];
+	cre_reg_buf = &cre_request->cre_reg_buf;
+
+	CAM_DBG(CAM_CRE, "req_idx = %d req_id = %lld",
+		req_idx, cre_request->request_id);
+	bus_rd_ctx = bus_rd->bus_rd_ctx[ctx_id];
+	rd_reg = cam_cre_hw_info->bus_rd_reg_offset;
+	rd_reg_val = cam_cre_hw_info->bus_rd_reg_val;
+
+	for (i = 0; i < cre_request->num_batch; i++) {
+		for (j = 0; j < cre_request->num_io_bufs[i]; j++) {
+			io_buf = cre_request->io_buf[i][j];
+			if (io_buf->direction != CAM_BUF_INPUT)
+				continue;
+
+			CAM_DBG(CAM_CRE, "batch:%d iobuf:%d direction:%d",
+				i, j, io_buf->direction);
+
+			ret = cam_cre_bus_rd_update(cam_cre_hw_info,
+				ctx_id, cre_reg_buf, i, j, prepare);
+			if (!ret) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	/* Disable RMs which are not enabled */
+	for (i = 0; i < cre_request->num_batch; i++) {
+		for (j = 0; j < rd_reg_val->num_clients; j++) {
+			is_rm_enabled = cam_cre_bus_is_rm_enabled(
+				cre_request, i, j);
+			if (is_rm_enabled < 0) {
+				rc = -EINVAL;
+				goto end;
+			}
+			if (is_rm_enabled)
+				continue;
+
+			ret = cam_cre_bus_rm_disable(cam_cre_hw_info,
+				ctx_id, prepare, i, j,
+				cre_reg_buf);
+			if (!ret) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	/* Go command */
+	temp = 0;
+	temp |= rd_reg_val->go_cmd;
+	temp |= rd_reg_val->static_prg & rd_reg_val->static_prg_mask;
+	update_cre_reg_set(cre_reg_buf,
+		rd_reg->offset + rd_reg->input_if_cmd,
+		temp);
+end:
+	return 0;
+}
+
+static int cam_cre_bus_rd_in_port_idx(uint32_t input_port_id)
+{
+	int i;
+
+	for (i = 0; i < CRE_MAX_IN_RES; i++)
+		if (bus_rd->in_port_to_rm[i].input_port_id ==
+			input_port_id)
+			return i;
+
+	return -EINVAL;
+}
+
+static int cam_cre_bus_rd_acquire(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0, i;
+	struct cam_cre_acquire_dev_info *in_acquire;
+	struct cre_bus_rd_ctx *bus_rd_ctx;
+	struct cre_bus_in_port_to_rm *in_port_to_rm;
+	struct cam_cre_bus_rd_reg_val *bus_rd_reg_val;
+	int in_port_idx;
+
+
+	if (ctx_id < 0 || !data || !cam_cre_hw_info || ctx_id >= CRE_CTX_MAX) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d %x %x",
+			ctx_id, data, cam_cre_hw_info);
+		return -EINVAL;
+	}
+
+	bus_rd->bus_rd_ctx[ctx_id] = vzalloc(sizeof(struct cre_bus_rd_ctx));
+	if (!bus_rd->bus_rd_ctx[ctx_id]) {
+		CAM_ERR(CAM_CRE, "Out of memory");
+		return -ENOMEM;
+	}
+
+	bus_rd->bus_rd_ctx[ctx_id]->cre_acquire = data;
+	in_acquire = data;
+	bus_rd_ctx = bus_rd->bus_rd_ctx[ctx_id];
+	bus_rd_ctx->num_in_ports = in_acquire->num_in_res;
+	bus_rd_ctx->security_flag = in_acquire->secure_mode;
+	bus_rd_reg_val = cam_cre_hw_info->bus_rd_reg_val;
+
+	for (i = 0; i < in_acquire->num_in_res; i++) {
+		if (!in_acquire->in_res[i].width)
+			continue;
+
+		CAM_DBG(CAM_CRE, "i = %d format = %u width = %x height = %x",
+			i, in_acquire->in_res[i].format,
+			in_acquire->in_res[i].width,
+			in_acquire->in_res[i].height);
+
+		in_port_idx = cam_cre_bus_rd_in_port_idx(i + 1);
+		if (in_port_idx < 0) {
+			CAM_ERR(CAM_CRE, "Invalid in_port_idx: %d", i + 1);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		in_port_to_rm = &bus_rd->in_port_to_rm[in_port_idx];
+
+		if (!in_port_to_rm->num_rm) {
+			CAM_ERR(CAM_CRE, "Invalid format for Input port");
+			rc = -EINVAL;
+			goto end;
+		}
+
+		bus_rd_ctx->io_port_info.input_port_id[i] =
+			in_acquire->in_res[i].res_id;
+		bus_rd_ctx->io_port_info.input_format_type[i] =
+			in_acquire->in_res[i].format;
+
+		CAM_DBG(CAM_CRE, "i:%d port_id = %u format %u",
+			i, bus_rd_ctx->io_port_info.input_port_id[i],
+			bus_rd_ctx->io_port_info.input_format_type[i]);
+	}
+
+end:
+	return rc;
+}
+
+static int cam_cre_bus_rd_reg_set_update(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int i;
+	uint32_t num_reg_set;
+	struct cre_reg_set *rd_reg_set;
+	struct cam_cre_dev_reg_set_update *reg_set_upd_cmd =
+		(struct cam_cre_dev_reg_set_update *)data;
+
+	num_reg_set = reg_set_upd_cmd->cre_reg_buf.num_rd_reg_set;
+	rd_reg_set = reg_set_upd_cmd->cre_reg_buf.rd_reg_set;
+
+	for (i = 0; i < num_reg_set; i++) {
+		cam_io_w_mb(rd_reg_set[i].value,
+			cam_cre_hw_info->bus_rd_reg_offset->base + rd_reg_set[i].offset);
+	}
+	return 0;
+}
+
+static int cam_cre_bus_rd_init(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	struct cam_cre_bus_rd_reg_val *bus_rd_reg_val;
+	struct cam_cre_bus_rd_reg *bus_rd_reg;
+	struct cam_cre_dev_init *dev_init = data;
+
+	if (!cam_cre_hw_info) {
+		CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info");
+		return -EINVAL;
+	}
+
+	bus_rd_reg_val = cam_cre_hw_info->bus_rd_reg_val;
+	bus_rd_reg = cam_cre_hw_info->bus_rd_reg_offset;
+	bus_rd_reg->base =
+	dev_init->core_info->cre_hw_info->cre_hw->bus_rd_reg_offset->base;
+
+	/* enable interrupt mask */
+	cam_io_w_mb(bus_rd_reg_val->irq_mask,
+		cam_cre_hw_info->bus_rd_reg_offset->base + bus_rd_reg->irq_mask);
+
+	return 0;
+}
+
+static int cam_cre_bus_rd_probe(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int i, k, rm_idx;
+	struct cam_cre_bus_rd_reg_val *bus_rd_reg_val;
+	struct cam_cre_bus_rd_reg *bus_rd_reg;
+	struct cre_bus_in_port_to_rm *in_port_to_rm;
+	uint32_t input_port_idx;
+
+	if (!cam_cre_hw_info) {
+		CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info");
+		return -EINVAL;
+	}
+
+	bus_rd = kzalloc(sizeof(struct cre_bus_rd), GFP_KERNEL);
+	if (!bus_rd) {
+		CAM_ERR(CAM_CRE, "Out of memory");
+		return -ENOMEM;
+	}
+	bus_rd->cre_hw_info = cam_cre_hw_info;
+	bus_rd_reg_val = cam_cre_hw_info->bus_rd_reg_val;
+	bus_rd_reg = cam_cre_hw_info->bus_rd_reg_offset;
+
+	for (i = 0; i < bus_rd_reg_val->num_clients; i++) {
+		input_port_idx =
+			bus_rd_reg_val->rd_clients[i].input_port_id - 1;
+		in_port_to_rm = &bus_rd->in_port_to_rm[input_port_idx];
+
+		rm_idx = in_port_to_rm->num_rm;
+		in_port_to_rm->input_port_id =
+			bus_rd_reg_val->rd_clients[i].input_port_id;
+		in_port_to_rm->rm_port_id[rm_idx] =
+			bus_rd_reg_val->rd_clients[i].rm_port_id;
+		in_port_to_rm->num_rm++;
+	}
+
+	for (i = 0; i < CRE_MAX_IN_RES; i++) {
+		in_port_to_rm = &bus_rd->in_port_to_rm[i];
+		CAM_DBG(CAM_CRE, "input port id = %d",
+			in_port_to_rm->input_port_id);
+			CAM_DBG(CAM_CRE, "num_rms = %d",
+				in_port_to_rm->num_rm);
+			for (k = 0; k < in_port_to_rm->num_rm; k++) {
+				CAM_DBG(CAM_CRE, "rm port id = %d",
+					in_port_to_rm->rm_port_id[k]);
+			}
+	}
+
+	return 0;
+}
+
+static int cam_cre_bus_rd_isr(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	uint32_t irq_status;
+	uint32_t violation_status;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+	struct cam_cre_bus_rd_reg *bus_rd_reg;
+	struct cam_cre_bus_rd_reg_val *bus_rd_reg_val;
+	struct cam_cre_irq_data *irq_data = data;
+
+	if (!cam_cre_hw_info) {
+		CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_CRE, "error 0x%x", irq_data->error);
+	bus_rd_reg = cam_cre_hw_info->bus_rd_reg_offset;
+	bus_rd_reg_val = cam_cre_hw_info->bus_rd_reg_val;
+
+	/* Read and Clear Top Interrupt status */
+	irq_status = cam_io_r_mb(bus_rd_reg->base + bus_rd_reg->irq_status);
+	cam_io_w_mb(irq_status,
+		bus_rd_reg->base + bus_rd_reg->irq_clear);
+
+	cam_io_w_mb(bus_rd_reg_val->irq_cmd_clear,
+		bus_rd_reg->base + bus_rd_reg->irq_cmd);
+
+	if (irq_status & bus_rd_reg_val->rup_done)
+		CAM_DBG(CAM_CRE, "CRE Read Bus RUP done");
+
+	if (irq_status & bus_rd_reg_val->rd_buf_done)
+		CAM_DBG(CAM_CRE, "CRE Read Bus Buff done");
+
+	if (irq_status & bus_rd_reg_val->cons_violation) {
+		irq_data->error = 1;
+		violation_status = cam_io_r_mb(bus_rd_reg->base +
+			bus_rd_reg->rd_clients[0].cons_violation_status);
+		debug_status_0 = cam_io_r_mb(bus_rd_reg->base +
+			bus_rd_reg->rd_clients[0].debug_status_0);
+		debug_status_1 = cam_io_r_mb(bus_rd_reg->base +
+			bus_rd_reg->rd_clients[0].debug_status_1);
+		CAM_DBG(CAM_CRE, "CRE Read Bus Violation");
+		CAM_DBG(CAM_CRE,
+			"violation status 0x%x debug status 0/1 0x%x/0x%x",
+			violation_status, debug_status_0, debug_status_1);
+	}
+
+	return 0;
+}
+
+int cam_cre_bus_rd_process(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data)
+{
+	int rc = -EINVAL;
+
+	switch (cmd_id) {
+	case CRE_HW_PROBE:
+		CAM_DBG(CAM_CRE, "CRE_HW_PROBE: E");
+		rc = cam_cre_bus_rd_probe(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_PROBE: X");
+		break;
+	case CRE_HW_INIT:
+		CAM_DBG(CAM_CRE, "CRE_HW_INIT: E");
+		rc = cam_cre_bus_rd_init(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_INIT: X");
+		break;
+	case CRE_HW_ACQUIRE:
+		CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: E");
+		rc = cam_cre_bus_rd_acquire(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: X");
+		break;
+	case CRE_HW_RELEASE:
+		CAM_DBG(CAM_CRE, "CRE_HW_RELEASE: E");
+		rc = cam_cre_bus_rd_release(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_RELEASE: X");
+		break;
+	case CRE_HW_PREPARE:
+		CAM_DBG(CAM_CRE, "CRE_HW_PREPARE: E");
+		rc = cam_cre_bus_rd_prepare(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_PREPARE: X");
+		break;
+	case CRE_HW_ISR:
+		rc = cam_cre_bus_rd_isr(cam_cre_hw_info, 0, data);
+		break;
+	case CRE_HW_REG_SET_UPDATE:
+		rc = cam_cre_bus_rd_reg_set_update(cam_cre_hw_info, 0, data);
+		break;
+	case CRE_HW_DEINIT:
+	case CRE_HW_START:
+	case CRE_HW_STOP:
+	case CRE_HW_FLUSH:
+	case CRE_HW_CLK_UPDATE:
+	case CRE_HW_BW_UPDATE:
+	case CRE_HW_RESET:
+	case CRE_HW_SET_IRQ_CB:
+		rc = 0;
+		CAM_DBG(CAM_CRE, "Unhandled cmds: %d", cmd_id);
+		break;
+	default:
+		CAM_ERR(CAM_CRE, "Unsupported cmd: %d", cmd_id);
+		break;
+	}
+
+	return rc;
+}

+ 98 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.h

@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CRE_BUS_RD_H
+#define CRE_BUS_RD_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_cre.h>
+#include "cre_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_soc_util.h"
+#include "cam_cre_hw_mgr.h"
+
+/**
+ * struct cre_bus_rd_io_port_info
+ *
+ * @pixel_pattern:      Pixel pattern
+ * @input_port_id:      Port Id
+ * @input_format_type:  Format type
+ * @latency_buf_size:   Latency buffer size
+ */
+struct cre_bus_rd_io_port_info {
+	uint32_t pixel_pattern[CRE_MAX_IN_RES];
+	uint32_t input_port_id[CRE_MAX_IN_RES];
+	uint32_t input_format_type[CRE_MAX_IN_RES];
+	uint32_t latency_buf_size;
+};
+
+/**
+ * struct cre_bus_rd_io_port_batch
+ *
+ * num_batch:   Number of batches
+ * io_port: CDM IO Port Info
+ */
+struct cre_bus_rd_io_port_batch {
+	uint32_t num_batch;
+	struct cre_bus_rd_io_port_info io_port[CRE_MAX_BATCH_SIZE];
+};
+
+/**
+ * struct cre_bus_rd_rm
+ *
+ * @rm_port_id:  RM port ID
+ * @format_type: Format type
+ */
+struct cre_bus_rd_rm {
+	uint32_t rm_port_id;
+	uint32_t format_type;
+};
+
+/**
+ * struct cre_bus_rd_ctx
+ *
+ * @cre_acquire:    CRE acquire structure
+ * @security_flag:  security flag
+ * @num_in_ports:   Number of in ports
+ * @io_port_info:   IO port info
+ * @io_port_batch:  IO port info
+ */
+struct cre_bus_rd_ctx {
+	struct cam_cre_acquire_dev_info *cre_acquire;
+	bool security_flag;
+	uint32_t num_in_ports;
+	struct cre_bus_rd_io_port_info io_port_info;
+	struct cre_bus_rd_io_port_batch io_port_batch;
+};
+
+/**
+ * struct cre_bus_in_port_to_rm
+ *
+ * @input_port_id:  Intput port ID
+ * @num_rm:         Number of RMs
+ * @rm_port_id:     RM port Id
+ */
+struct cre_bus_in_port_to_rm {
+	uint32_t input_port_id;
+	uint32_t num_rm;
+	uint32_t rm_port_id[CRE_MAX_IN_RES];
+};
+
+/**
+ * struct cre_bus_rd
+ *
+ * @cre_hw_info:    CRE hardware info
+ * @in_port_to_rm:  IO port to RM mapping
+ * @bus_rd_ctx:     RM context
+ */
+struct cre_bus_rd {
+	struct cam_cre_hw *cre_hw_info;
+	struct cre_bus_in_port_to_rm in_port_to_rm[CRE_MAX_IN_RES];
+	struct cre_bus_rd_ctx *bus_rd_ctx[CRE_CTX_MAX];
+	struct completion reset_complete;
+};
+#endif /* CRE_BUS_RD_H */

+ 613 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.c

@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include <media/cam_cre.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cre_core.h"
+#include "cre_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cre_hw.h"
+#include "cre_dev_intf.h"
+#include "cre_bus_wr.h"
+
+static struct cre_bus_wr *wr_info;
+
+#define update_cre_reg_set(cre_reg_buf, off, val) \
+	do {                                           \
+		cre_reg_buf->wr_reg_set[cre_reg_buf->num_wr_reg_set].offset = (off); \
+		cre_reg_buf->wr_reg_set[cre_reg_buf->num_wr_reg_set].value = (val); \
+		cre_reg_buf->num_wr_reg_set++; \
+	} while (0)
+
+static int cam_cre_bus_en_port_idx(
+	struct cam_cre_request *cre_request,
+	uint32_t batch_idx,
+	uint32_t output_port_id)
+{
+	int i;
+	struct cre_io_buf *io_buf;
+
+	if (batch_idx >= CRE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_CRE, "Invalid batch idx: %d", batch_idx);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cre_request->num_io_bufs[batch_idx]; i++) {
+		io_buf = cre_request->io_buf[batch_idx][i];
+		if (io_buf->direction != CAM_BUF_OUTPUT)
+			continue;
+		if (io_buf->resource_type == output_port_id)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+static int cam_cre_bus_wr_out_port_idx(uint32_t output_port_id)
+{
+	int i;
+
+	for (i = 0; i < CRE_MAX_OUT_RES; i++)
+		if (wr_info->out_port_to_wm[i].output_port_id == output_port_id)
+			return i;
+
+	return -EINVAL;
+}
+
+static int cam_cre_bus_wr_reg_set_update(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int i;
+	uint32_t num_reg_set;
+	struct cre_reg_set *wr_reg_set;
+	struct cam_cre_dev_reg_set_update *reg_set_upd_cmd =
+		(struct cam_cre_dev_reg_set_update *)data;
+
+	num_reg_set = reg_set_upd_cmd->cre_reg_buf.num_wr_reg_set;
+	wr_reg_set = reg_set_upd_cmd->cre_reg_buf.wr_reg_set;
+
+	for (i = 0; i < num_reg_set; i++) {
+		cam_io_w_mb(wr_reg_set[i].value,
+			cam_cre_hw_info->bus_wr_reg_offset->base + wr_reg_set[i].offset);
+	}
+	return 0;
+}
+
+static int cam_cre_bus_wr_release(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	if (ctx_id < 0 || ctx_id >= CRE_CTX_MAX) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	vfree(wr_info->bus_wr_ctx[ctx_id]);
+	wr_info->bus_wr_ctx[ctx_id] = NULL;
+
+	return 0;
+}
+
+static uint32_t *cam_cre_bus_wr_update(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, struct cam_cre_dev_prepare_req *prepare,
+	int batch_idx, int io_idx,
+	struct cre_reg_buffer *cre_reg_buf)
+{
+	int k, out_port_idx;
+	uint32_t num_wm_ports;
+	uint32_t comb_idx = 0;
+	uint32_t req_idx;
+	uint32_t temp = 0;
+	uint32_t wm_port_id;
+	struct cam_hw_prepare_update_args *prepare_args;
+	struct cam_cre_ctx *ctx_data;
+	struct cam_cre_request *cre_request;
+	struct cre_io_buf *io_buf;
+	struct cre_bus_wr_ctx *bus_wr_ctx;
+	struct cam_cre_bus_wr_reg *wr_reg;
+	struct cam_cre_bus_wr_client_reg *wr_reg_client;
+	struct cam_cre_bus_wr_reg_val *wr_reg_val;
+	struct cam_cre_bus_wr_client_reg_val *wr_res_val_client;
+	struct cre_bus_out_port_to_wm *out_port_to_wm;
+
+	if (ctx_id < 0 || !prepare) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, prepare);
+		return NULL;
+	}
+
+	if (batch_idx >= CRE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_CRE, "Invalid batch idx: %d", batch_idx);
+		return NULL;
+	}
+
+	if (io_idx >= CRE_MAX_IO_BUFS) {
+		CAM_ERR(CAM_CRE, "Invalid IO idx: %d", io_idx);
+		return NULL;
+	}
+
+	prepare_args = prepare->prepare_args;
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+
+	cre_request = ctx_data->req_list[req_idx];
+	bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id];
+	wr_reg = cam_cre_hw_info->bus_wr_reg_offset;
+	wr_reg_val = cam_cre_hw_info->bus_wr_reg_val;
+
+	CAM_DBG(CAM_CRE, "req_idx = %d req_id = %lld offset = %d",
+		req_idx, cre_request->request_id);
+
+	io_buf = cre_request->io_buf[batch_idx][io_idx];
+	CAM_DBG(CAM_CRE, "batch = %d io buf num = %d dir = %d rsc %d",
+		batch_idx, io_idx, io_buf->direction, io_buf->resource_type);
+
+	out_port_idx =
+		cam_cre_bus_wr_out_port_idx(io_buf->resource_type);
+	if (out_port_idx < 0) {
+		CAM_ERR(CAM_CRE, "Invalid idx for rsc type: %d",
+			io_buf->resource_type);
+		return NULL;
+	}
+	out_port_to_wm = &wr_info->out_port_to_wm[out_port_idx];
+	num_wm_ports = out_port_to_wm->num_wm;
+
+	for (k = 0; k < io_buf->num_planes; k++) {
+		CAM_DBG(CAM_CRE, "comb_idx = %d p_idx = %d",
+			comb_idx, k);
+		/* frame level info */
+		wm_port_id = out_port_to_wm->wm_port_id[k];
+		wr_reg_client = &wr_reg->wr_clients[wm_port_id];
+		wr_res_val_client = &wr_reg_val->wr_clients[wm_port_id];
+
+		/* Core cfg: enable, Mode */
+		temp = 0;
+		temp |= ((wr_res_val_client->mode &
+			wr_res_val_client->mode_mask) <<
+			wr_res_val_client->mode_shift);
+		update_cre_reg_set(cre_reg_buf,
+			wr_reg->offset + wr_reg_client->client_cfg,
+			temp);
+
+		/* Address of the Image */
+		update_cre_reg_set(cre_reg_buf,
+			wr_reg->offset + wr_reg_client->img_addr,
+			io_buf->p_info[k].iova_addr);
+
+		/* Buffer size */
+		temp = 0;
+		temp = io_buf->p_info[k].width;
+		temp |= (io_buf->p_info[k].height &
+				wr_res_val_client->height_mask) <<
+				wr_res_val_client->height_shift;
+		update_cre_reg_set(cre_reg_buf,
+			wr_reg->offset + wr_reg_client->img_cfg_0,
+			temp);
+
+		update_cre_reg_set(cre_reg_buf,
+			wr_reg->offset + wr_reg_client->img_cfg_1,
+			io_buf->p_info[k].x_init);
+
+		/* stride */
+		update_cre_reg_set(cre_reg_buf,
+			wr_reg->offset + wr_reg_client->img_cfg_2,
+			io_buf->p_info[k].stride);
+
+		/* pack cfg : Format and alignment */
+		temp = 0;
+		temp |= ((io_buf->p_info[k].format &
+			wr_res_val_client->format_mask) <<
+			wr_res_val_client->format_shift);
+		temp |= ((io_buf->p_info[k].alignment &
+			wr_res_val_client->alignment_mask) <<
+			wr_res_val_client->alignment_shift);
+		update_cre_reg_set(cre_reg_buf,
+			wr_reg->offset + wr_reg_client->packer_cfg,
+			temp);
+		/* Upadte debug status CFG*/
+		temp = 0xFFFF;
+		update_cre_reg_set(cre_reg_buf,
+			wr_reg->offset + wr_reg_client->debug_status_cfg,
+			temp);
+	}
+
+	return (uint32_t *)cre_reg_buf;
+}
+
+static uint32_t *cam_cre_bus_wm_disable(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, struct cam_cre_dev_prepare_req *prepare,
+	int batch_idx, int io_idx,
+	struct cre_reg_buffer *cre_reg_buf)
+{
+	int k;
+	uint32_t num_wm_ports;
+	uint32_t req_idx;
+	uint32_t wm_port_id;
+	struct cam_cre_ctx *ctx_data;
+	struct cre_bus_wr_ctx *bus_wr_ctx;
+	struct cam_cre_bus_wr_reg *wr_reg;
+	struct cre_bus_out_port_to_wm *out_port_to_wm;
+	struct cam_cre_bus_wr_client_reg *wr_reg_client;
+
+
+	if (ctx_id < 0 || !prepare) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, prepare);
+		return NULL;
+	}
+
+	if (batch_idx >= CRE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_CRE, "Invalid batch idx: %d", batch_idx);
+		return NULL;
+	}
+
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+
+	bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id];
+	wr_reg = cam_cre_hw_info->bus_wr_reg_offset;
+
+	CAM_DBG(CAM_CRE,
+		"req_idx = %d out_idx %d b %d",
+		req_idx, io_idx, batch_idx);
+
+	out_port_to_wm = &wr_info->out_port_to_wm[io_idx];
+	num_wm_ports = out_port_to_wm->num_wm;
+
+	for (k = 0; k < num_wm_ports; k++) {
+		/* frame level info */
+		wm_port_id = out_port_to_wm->wm_port_id[k];
+		wr_reg_client = &wr_reg->wr_clients[wm_port_id];
+
+		/* Core cfg: enable, Mode */
+		update_cre_reg_set(cre_reg_buf,
+			wr_reg->offset + wr_reg_client->client_cfg,
+			0);
+	}
+
+	return (uint32_t *)cre_reg_buf;
+}
+
+static int cam_cre_bus_wr_prepare(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	int i, j = 0;
+	uint32_t req_idx;
+	struct cam_cre_dev_prepare_req *prepare;
+	struct cam_cre_ctx *ctx_data;
+	struct cam_cre_request *cre_request;
+	struct cre_io_buf *io_buf;
+	int io_buf_idx;
+	struct cre_bus_wr_ctx *bus_wr_ctx;
+	struct cre_reg_buffer *cre_reg_buf;
+	uint32_t *ret;
+
+	if (ctx_id < 0 || !data) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, data);
+		return -EINVAL;
+	}
+	prepare = data;
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+	bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id];
+
+	cre_request = ctx_data->req_list[req_idx];
+	cre_reg_buf = &cre_request->cre_reg_buf;
+
+	CAM_DBG(CAM_CRE, "req_idx = %d req_id = %lld offset = %d",
+		req_idx, cre_request->request_id);
+
+
+	for (i = 0; i < cre_request->num_batch; i++) {
+		for (j = 0; j < cre_request->num_io_bufs[i]; j++) {
+			io_buf = cre_request->io_buf[i][j];
+			CAM_DBG(CAM_CRE, "batch = %d io buf num = %d dir = %d",
+				i, j, io_buf->direction);
+			if (io_buf->direction != CAM_BUF_OUTPUT)
+				continue;
+
+			ret = cam_cre_bus_wr_update(cam_cre_hw_info,
+				ctx_id, prepare, i, j,
+				cre_reg_buf);
+			if (!ret) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	/* Disable WMs which are not enabled */
+	for (i = 0; i < cre_request->num_batch; i++) {
+		for (j = CRE_MAX_IN_RES; j <= CRE_MAX_OUT_RES; j++) {
+			io_buf_idx = cam_cre_bus_en_port_idx(cre_request, i, j);
+			if (io_buf_idx >= 0)
+				continue;
+
+			io_buf_idx = cam_cre_bus_wr_out_port_idx(j);
+			if (io_buf_idx < 0) {
+				CAM_ERR(CAM_CRE, "Invalid idx for rsc type:%d",
+					j);
+				return io_buf_idx;
+			}
+			ret = cam_cre_bus_wm_disable(cam_cre_hw_info,
+				ctx_id, prepare, i, io_buf_idx,
+				cre_reg_buf);
+			if (!ret) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int cam_cre_bus_wr_acquire(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0, i;
+	struct cam_cre_acquire_dev_info *in_acquire;
+	struct cre_bus_wr_ctx *bus_wr_ctx;
+	struct cre_bus_out_port_to_wm *out_port_to_wr;
+	int out_port_idx;
+
+	if (ctx_id < 0 || !data || ctx_id >= CRE_CTX_MAX) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, data);
+		return -EINVAL;
+	}
+
+	wr_info->bus_wr_ctx[ctx_id] = vzalloc(sizeof(struct cre_bus_wr_ctx));
+	if (!wr_info->bus_wr_ctx[ctx_id]) {
+		CAM_ERR(CAM_CRE, "Out of memory");
+		return -ENOMEM;
+	}
+
+	wr_info->bus_wr_ctx[ctx_id]->cre_acquire = data;
+	in_acquire = data;
+	bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id];
+	bus_wr_ctx->num_out_ports = in_acquire->num_out_res;
+	bus_wr_ctx->security_flag = in_acquire->secure_mode;
+
+	for (i = 0; i < in_acquire->num_out_res; i++) {
+		if (!in_acquire->out_res[i].width)
+			continue;
+
+		CAM_DBG(CAM_CRE, "i = %d format = %u width = %x height = %x",
+			i, in_acquire->out_res[i].format,
+			in_acquire->out_res[i].width,
+			in_acquire->out_res[i].height);
+
+		out_port_idx =
+		cam_cre_bus_wr_out_port_idx(in_acquire->out_res[i].res_id);
+		if (out_port_idx < 0) {
+			CAM_DBG(CAM_CRE, "Invalid out_port_idx: %d",
+				in_acquire->out_res[i].res_id);
+			rc = -EINVAL;
+			goto end;
+		}
+		out_port_to_wr = &wr_info->out_port_to_wm[out_port_idx];
+		if (!out_port_to_wr->num_wm) {
+			CAM_DBG(CAM_CRE, "Invalid format for Input port");
+			rc = -EINVAL;
+			goto end;
+		}
+
+		bus_wr_ctx->io_port_info.output_port_id[i] =
+			in_acquire->out_res[i].res_id;
+		bus_wr_ctx->io_port_info.output_format_type[i] =
+			in_acquire->out_res[i].format;
+
+		CAM_DBG(CAM_CRE, "i:%d port_id = %u",
+			i, bus_wr_ctx->io_port_info.output_port_id[i]);
+	}
+
+end:
+	return rc;
+}
+
+static int cam_cre_bus_wr_init(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	struct cam_cre_bus_wr_reg_val *bus_wr_reg_val;
+	struct cam_cre_bus_wr_reg *bus_wr_reg;
+	struct cam_cre_dev_init *dev_init = data;
+
+	if (!cam_cre_hw_info) {
+		CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info");
+		return -EINVAL;
+	}
+
+	wr_info->cre_hw_info = cam_cre_hw_info;
+	bus_wr_reg_val = cam_cre_hw_info->bus_wr_reg_val;
+	bus_wr_reg = cam_cre_hw_info->bus_wr_reg_offset;
+	bus_wr_reg->base = dev_init->core_info->cre_hw_info->cre_bus_wr_base;
+
+	cam_io_w_mb(bus_wr_reg_val->irq_mask_0,
+		cam_cre_hw_info->bus_wr_reg_offset->base +
+		bus_wr_reg->irq_mask_0);
+	cam_io_w_mb(bus_wr_reg_val->irq_mask_1,
+		cam_cre_hw_info->bus_wr_reg_offset->base +
+		bus_wr_reg->irq_mask_1);
+
+	return 0;
+}
+
+static int cam_cre_bus_wr_probe(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int i, k;
+	struct cam_cre_bus_wr_reg_val *bus_wr_reg_val;
+	struct cre_bus_out_port_to_wm *out_port_to_wm;
+	uint32_t output_port_idx;
+	uint32_t wm_idx;
+
+	if (!cam_cre_hw_info) {
+		CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info");
+		return -EINVAL;
+	}
+	wr_info = kzalloc(sizeof(struct cre_bus_wr), GFP_KERNEL);
+	if (!wr_info) {
+		CAM_ERR(CAM_CRE, "Out of memory");
+		return -ENOMEM;
+	}
+
+	wr_info->cre_hw_info = cam_cre_hw_info;
+	bus_wr_reg_val = cam_cre_hw_info->bus_wr_reg_val;
+
+	for (i = 0; i < bus_wr_reg_val->num_clients; i++) {
+		output_port_idx =
+			bus_wr_reg_val->wr_clients[i].output_port_id - 1;
+		out_port_to_wm = &wr_info->out_port_to_wm[output_port_idx];
+		wm_idx = out_port_to_wm->num_wm;
+		out_port_to_wm->output_port_id =
+			bus_wr_reg_val->wr_clients[i].output_port_id;
+		out_port_to_wm->wm_port_id[wm_idx] =
+			bus_wr_reg_val->wr_clients[i].wm_port_id;
+		out_port_to_wm->num_wm++;
+	}
+
+	for (i = 0; i < CRE_MAX_OUT_RES; i++) {
+		out_port_to_wm = &wr_info->out_port_to_wm[i];
+		CAM_DBG(CAM_CRE, "output port id = %d",
+			out_port_to_wm->output_port_id);
+			CAM_DBG(CAM_CRE, "num_wms = %d",
+				out_port_to_wm->num_wm);
+			for (k = 0; k < out_port_to_wm->num_wm; k++) {
+				CAM_DBG(CAM_CRE, "wm port id = %d",
+					out_port_to_wm->wm_port_id[k]);
+			}
+	}
+
+	return 0;
+}
+
+static int cam_cre_bus_wr_isr(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	uint32_t irq_status_0, irq_status_1;
+	struct cam_cre_bus_wr_reg *bus_wr_reg;
+	struct cam_cre_bus_wr_reg_val *bus_wr_reg_val;
+	struct cam_cre_irq_data *irq_data = data;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+	uint32_t img_violation_status;
+	uint32_t violation_status;
+
+	if (!cam_cre_hw_info || !irq_data) {
+		CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info");
+		return -EINVAL;
+	}
+
+	bus_wr_reg = cam_cre_hw_info->bus_wr_reg_offset;
+	bus_wr_reg_val = cam_cre_hw_info->bus_wr_reg_val;
+
+	/* Read and Clear Top Interrupt status */
+	irq_status_0 = cam_io_r_mb(bus_wr_reg->base + bus_wr_reg->irq_status_0);
+	irq_status_1 = cam_io_r_mb(bus_wr_reg->base + bus_wr_reg->irq_status_1);
+	cam_io_w_mb(irq_status_0,
+		bus_wr_reg->base + bus_wr_reg->irq_clear_0);
+	cam_io_w_mb(irq_status_1,
+		bus_wr_reg->base + bus_wr_reg->irq_clear_1);
+
+	cam_io_w_mb(bus_wr_reg_val->irq_cmd_clear,
+		bus_wr_reg->base + bus_wr_reg->irq_cmd);
+
+	if (irq_status_0 & bus_wr_reg_val->cons_violation) {
+		irq_data->error = 1;
+		CAM_ERR(CAM_CRE, "cre bus wr cons_violation");
+	}
+
+	if ((irq_status_0 & bus_wr_reg_val->violation) ||
+		(irq_status_0 & bus_wr_reg_val->img_size_violation)) {
+		irq_data->error = 1;
+		img_violation_status = cam_io_r_mb(bus_wr_reg->base +
+			bus_wr_reg->image_size_violation_status);
+		violation_status = cam_io_r_mb(bus_wr_reg->base +
+			bus_wr_reg->violation_status);
+
+		debug_status_0 = cam_io_r_mb(bus_wr_reg->base +
+			bus_wr_reg->wr_clients[0].debug_status_0);
+		debug_status_1 = cam_io_r_mb(bus_wr_reg->base +
+			bus_wr_reg->wr_clients[0].debug_status_1);
+		CAM_ERR(CAM_CRE,
+			"violation status 0x%x 0x%x debug status 0/1 0x%x/0x%x",
+			violation_status, img_violation_status,
+			debug_status_0, debug_status_1);
+	}
+
+	if (irq_status_1 & bus_wr_reg_val->client_buf_done)
+		CAM_INFO(CAM_CRE, "Cleint 0 Buff done");
+
+	return 0;
+}
+
+int cam_cre_bus_wr_process(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data)
+{
+	int rc = 0;
+
+	switch (cmd_id) {
+	case CRE_HW_PROBE:
+		CAM_DBG(CAM_CRE, "CRE_HW_PROBE: E");
+		rc = cam_cre_bus_wr_probe(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_PROBE: X");
+		break;
+	case CRE_HW_INIT:
+		CAM_DBG(CAM_CRE, "CRE_HW_INIT: E");
+		rc = cam_cre_bus_wr_init(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_INIT: X");
+		break;
+	case CRE_HW_ACQUIRE:
+		CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: E");
+		rc = cam_cre_bus_wr_acquire(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: X");
+		break;
+	case CRE_HW_RELEASE:
+		CAM_DBG(CAM_CRE, "CRE_HW_RELEASE: E");
+		rc = cam_cre_bus_wr_release(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_RELEASE: X");
+		break;
+	case CRE_HW_PREPARE:
+		CAM_DBG(CAM_CRE, "CRE_HW_PREPARE: E");
+		rc = cam_cre_bus_wr_prepare(cam_cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_PREPARE: X");
+		break;
+	case CRE_HW_REG_SET_UPDATE:
+		rc = cam_cre_bus_wr_reg_set_update(cam_cre_hw_info, 0, data);
+		break;
+	case CRE_HW_DEINIT:
+	case CRE_HW_START:
+	case CRE_HW_STOP:
+	case CRE_HW_FLUSH:
+	case CRE_HW_CLK_UPDATE:
+	case CRE_HW_BW_UPDATE:
+	case CRE_HW_RESET:
+	case CRE_HW_SET_IRQ_CB:
+		rc = 0;
+		CAM_DBG(CAM_CRE, "Unhandled cmds: %d", cmd_id);
+		break;
+	case CRE_HW_ISR:
+		rc = cam_cre_bus_wr_isr(cam_cre_hw_info, 0, data);
+		break;
+	default:
+		CAM_ERR(CAM_CRE, "Unsupported cmd: %d", cmd_id);
+		break;
+	}
+
+	return rc;
+}

+ 98 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.h

@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CRE_BUS_WR_H
+#define CRE_BUS_WR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_cre.h>
+#include "cre_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_soc_util.h"
+#include "cam_cre_hw_mgr.h"
+
+/**
+ * struct cre_bus_wr_io_port_info
+ *
+ * @num_frames_cmds: Number of frame commands
+ * @go_cmd_addr:     GO command address
+ * @go_cmd_len:      GO command length
+ */
+struct cre_bus_wr_io_port_info {
+	uint32_t  num_frames_cmds;
+	uint32_t *go_cmd_addr;
+	uint32_t  go_cmd_len;
+	uint32_t  output_port_id[CRE_MAX_OUT_RES];
+	uint32_t  output_format_type[CRE_MAX_OUT_RES];
+};
+
+/**
+ * struct cre_bus_wr_io_port_batch
+ *
+ * num_batch:   Number of batches
+ * io_port: CDM IO Port Info
+ */
+struct cre_bus_wr_io_port_batch {
+	uint32_t num_batch;
+	struct cre_bus_wr_io_port_info io_port[CRE_MAX_BATCH_SIZE];
+};
+
+/**
+ * struct cre_bus_wr_wm
+ *
+ * @wm_port_id:  WM port ID
+ * @format_type: Format type
+ */
+struct cre_bus_wr_wm {
+	uint32_t wm_port_id;
+	uint32_t format_type;
+};
+
+/**
+ * struct cre_bus_out_port_to_wm
+ *
+ * @output_port_id: Output port ID
+ * @num_combos:     Number of combos
+ * @num_wm:         Number of WMs
+ * @wm_port_id:     WM port Id
+ */
+struct cre_bus_out_port_to_wm {
+	uint32_t output_port_id;
+	uint32_t num_wm;
+	uint32_t wm_port_id[CRE_MAX_OUT_RES];
+};
+
+/**
+ * struct cre_bus_wr_ctx
+ *
+ * @cre_acquire:       CRE acquire structure
+ * @security_flag:     security flag
+ * @num_out_ports:     Number of out ports
+ * @io_port_info:      IO port info
+ */
+struct cre_bus_wr_ctx {
+	struct cam_cre_acquire_dev_info *cre_acquire;
+	bool security_flag;
+	uint32_t num_out_ports;
+	struct cre_bus_wr_io_port_info io_port_info;
+	struct cre_bus_wr_io_port_batch io_port_batch;
+};
+
+/**
+ * struct cre_bus_wr
+ *
+ * @cre_hw_info:    CRE hardware info
+ * @out_port_to_wm: IO port to WM mapping
+ * @bus_wr_ctx:     WM context
+ */
+struct cre_bus_wr {
+	struct cam_cre_hw *cre_hw_info;
+	struct cre_bus_out_port_to_wm out_port_to_wm[CRE_MAX_OUT_RES];
+	struct cre_bus_wr_ctx *bus_wr_ctx[CRE_CTX_MAX];
+};
+
+#endif /* CRE_BUS_WR_H */

+ 580 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.c

@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cre_core.h"
+#include "cre_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_cre_hw_intf.h"
+#include "cam_cre_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cre_dev_intf.h"
+#include "cam_compat.h"
+#include "cre_bus_wr.h"
+#include "cre_bus_rd.h"
+
+#define CAM_CRE_RESET_TIMEOUT msecs_to_jiffies(500)
+
+#define CAM_CRE_FE_IRQ 0x4
+#define CAM_CRE_WE_IRQ 0x2
+
+struct cam_cre_irq_data irq_data;
+
+static int cam_cre_caps_vote(struct cam_cre_device_core_info *core_info,
+	struct cam_cre_dev_bw_update *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+			&cpas_vote->ahb_vote);
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+			&cpas_vote->axi_vote);
+	if (rc)
+		CAM_ERR(CAM_CRE, "cpas vote is failed: %d", rc);
+
+	return rc;
+}
+
+int cam_cre_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cre_dev = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cre_device_core_info *core_info = NULL;
+	struct cam_cre_hw_ver *cre_hw_ver;
+	struct cam_cre_top_reg_val *top_reg_val;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_CRE, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &cre_dev->soc_info;
+	core_info = (struct cam_cre_device_core_info *)cre_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_CRE, "soc_info = %x core_info = %x",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	if (!get_hw_cap_args) {
+		CAM_ERR(CAM_CRE, "Invalid caps");
+		return -EINVAL;
+	}
+
+	top_reg_val = core_info->cre_hw_info->cre_hw->top_reg_val;
+	cre_hw_ver = get_hw_cap_args;
+	cre_hw_ver->hw_ver.major =
+		(core_info->hw_version & top_reg_val->major_mask) >>
+		top_reg_val->major_shift;
+	cre_hw_ver->hw_ver.minor =
+		(core_info->hw_version & top_reg_val->minor_mask) >>
+		top_reg_val->minor_shift;
+
+	return 0;
+}
+
+static int cam_cre_dev_process_init(struct cam_cre_hw *cre_hw,
+	void *cmd_args)
+{
+	int rc = 0;
+
+	rc = cam_cre_top_process(cre_hw, 0, CRE_HW_INIT, cmd_args);
+	if (rc)
+		goto top_init_fail;
+
+	rc = cam_cre_bus_rd_process(cre_hw, 0, CRE_HW_INIT, cmd_args);
+		if (rc)
+			goto bus_rd_init_fail;
+
+	rc = cam_cre_bus_wr_process(cre_hw, 0, CRE_HW_INIT, cmd_args);
+		if (rc)
+			goto bus_wr_init_fail;
+
+	return rc;
+
+bus_wr_init_fail:
+	rc = cam_cre_bus_rd_process(cre_hw, 0,
+		CRE_HW_DEINIT, NULL);
+bus_rd_init_fail:
+	rc = cam_cre_top_process(cre_hw, 0,
+		CRE_HW_DEINIT, NULL);
+top_init_fail:
+	return rc;
+}
+
+static int cam_cre_process_init(struct cam_cre_hw *cre_hw,
+	void *cmd_args)
+{
+	return cam_cre_dev_process_init(cre_hw, cmd_args);
+}
+
+int cam_cre_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cre_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cre_device_core_info *core_info = NULL;
+	struct cam_cre_cpas_vote *cpas_vote;
+	int rc = 0;
+	struct cam_cre_dev_init *init;
+	struct cam_cre_hw *cre_hw;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_CRE, "Invalid cam_dev_info");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	soc_info = &cre_dev->soc_info;
+	core_info = (struct cam_cre_device_core_info *)cre_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_CRE, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		rc = -EINVAL;
+		goto end;
+	}
+	cre_hw = core_info->cre_hw_info->cre_hw;
+
+	cpas_vote = kzalloc(sizeof(struct cam_cre_cpas_vote), GFP_KERNEL);
+	if (!cpas_vote) {
+		CAM_ERR(CAM_ISP, "Out of memory");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	cpas_vote->ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote->ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote->axi_vote.num_paths = 1;
+	cpas_vote->axi_vote.axi_path[0].path_data_type =
+		CAM_AXI_PATH_DATA_ALL;
+	cpas_vote->axi_vote.axi_path[0].transac_type =
+		CAM_AXI_TRANSACTION_WRITE;
+	cpas_vote->axi_vote.axi_path[0].camnoc_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote->axi_vote.axi_path[0].mnoc_ab_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote->axi_vote.axi_path[0].mnoc_ib_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote->axi_vote.axi_path[0].ddr_ab_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote->axi_vote.axi_path[0].ddr_ib_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_CRE, "cpass start failed: %d", rc);
+		goto free_cpas_vote;
+	}
+	core_info->cpas_start = true;
+
+	rc = cam_cre_enable_soc_resources(soc_info);
+	if (rc)
+		goto enable_soc_resource_failed;
+	else
+		core_info->clk_enable = true;
+
+	init = init_hw_args;
+
+	init->core_info = core_info;
+	rc = cam_cre_process_init(cre_hw, init_hw_args);
+	if (rc)
+		goto process_init_failed;
+	else
+		goto free_cpas_vote;
+
+process_init_failed:
+	if (cam_cre_disable_soc_resources(soc_info))
+		CAM_ERR(CAM_CRE, "disable soc resource failed");
+enable_soc_resource_failed:
+	if (cam_cpas_stop(core_info->cpas_handle))
+		CAM_ERR(CAM_CRE, "cpas stop is failed");
+	else
+		core_info->cpas_start = false;
+free_cpas_vote:
+	cam_free_clear((void *)cpas_vote);
+	cpas_vote = NULL;
+end:
+	return rc;
+}
+
+int cam_cre_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cre_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cre_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_CRE, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &cre_dev->soc_info;
+	core_info = (struct cam_cre_device_core_info *)cre_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_CRE, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_cre_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_CRE, "soc disable is failed : %d", rc);
+	core_info->clk_enable = false;
+
+	return rc;
+}
+
+static int cam_cre_dev_process_dump_debug_reg(struct cam_cre_hw *cre_hw)
+{
+	int rc = 0;
+
+	rc = cam_cre_top_process(cre_hw, -1,
+		CRE_HW_DUMP_DEBUG, NULL);
+
+	return rc;
+}
+
+static int cam_cre_dev_process_reset(struct cam_cre_hw *cre_hw, void *cmd_args)
+{
+	int rc = 0;
+
+	rc = cam_cre_top_process(cre_hw, -1,
+		CRE_HW_RESET, NULL);
+
+	return rc;
+}
+
+static int cam_cre_dev_process_release(struct cam_cre_hw *cre_hw, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_cre_dev_release *cre_dev_release;
+
+	cre_dev_release = cmd_args;
+	rc = cam_cre_top_process(cre_hw, cre_dev_release->ctx_id,
+		CRE_HW_RELEASE, NULL);
+
+	rc |= cam_cre_bus_rd_process(cre_hw, cre_dev_release->ctx_id,
+		CRE_HW_RELEASE, NULL);
+
+	rc |= cam_cre_bus_wr_process(cre_hw, cre_dev_release->ctx_id,
+		CRE_HW_RELEASE, NULL);
+
+	return rc;
+}
+
+static int cam_cre_dev_process_acquire(struct cam_cre_hw *cre_hw, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_cre_dev_acquire *cre_dev_acquire;
+
+	if (!cmd_args || !cre_hw) {
+		CAM_ERR(CAM_CRE, "Invalid arguments: %pK %pK",
+		cmd_args, cre_hw);
+		return -EINVAL;
+	}
+
+	cre_dev_acquire = cmd_args;
+	rc = cam_cre_top_process(cre_hw, cre_dev_acquire->ctx_id,
+		CRE_HW_ACQUIRE, cre_dev_acquire);
+	if (rc)
+		goto top_acquire_fail;
+
+	rc = cam_cre_bus_rd_process(cre_hw, cre_dev_acquire->ctx_id,
+		CRE_HW_ACQUIRE, cre_dev_acquire->cre_acquire);
+	if (rc)
+		goto bus_rd_acquire_fail;
+
+	rc = cam_cre_bus_wr_process(cre_hw, cre_dev_acquire->ctx_id,
+		CRE_HW_ACQUIRE, cre_dev_acquire->cre_acquire);
+	if (rc)
+		goto bus_wr_acquire_fail;
+
+	return 0;
+
+bus_wr_acquire_fail:
+	cam_cre_bus_rd_process(cre_hw, cre_dev_acquire->ctx_id,
+		CRE_HW_RELEASE, cre_dev_acquire->cre_acquire);
+bus_rd_acquire_fail:
+	cam_cre_top_process(cre_hw, cre_dev_acquire->ctx_id,
+		CRE_HW_RELEASE, cre_dev_acquire->cre_acquire);
+top_acquire_fail:
+	return rc;
+}
+
+static int cam_cre_dev_process_reg_set_update(struct cam_cre_hw *cre_hw, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_cre_dev_reg_set_update *reg_set_update;
+
+	reg_set_update = cmd_args;
+
+	rc = cam_cre_top_process(cre_hw, 0,
+		CRE_HW_REG_SET_UPDATE, reg_set_update);
+	if (rc)
+		goto end;
+
+	rc = cam_cre_bus_rd_process(cre_hw, 0,
+		CRE_HW_REG_SET_UPDATE, reg_set_update);
+	if (rc)
+		goto end;
+
+	rc = cam_cre_bus_wr_process(cre_hw, 0,
+		CRE_HW_REG_SET_UPDATE, reg_set_update);
+	if (rc)
+		goto end;
+
+end:
+	return rc;
+}
+
+static int cam_cre_dev_process_prepare(struct cam_cre_hw *cre_hw, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_cre_dev_prepare_req *cre_dev_prepare_req;
+
+	cre_dev_prepare_req = cmd_args;
+
+	rc = cam_cre_top_process(cre_hw, cre_dev_prepare_req->ctx_data->ctx_id,
+		CRE_HW_PREPARE, cre_dev_prepare_req);
+	if (rc)
+		goto end;
+
+	rc = cam_cre_bus_rd_process(cre_hw,
+		cre_dev_prepare_req->ctx_data->ctx_id,
+		CRE_HW_PREPARE, cre_dev_prepare_req);
+	if (rc)
+		goto end;
+
+	rc = cam_cre_bus_wr_process(cre_hw,
+		cre_dev_prepare_req->ctx_data->ctx_id,
+		CRE_HW_PREPARE, cre_dev_prepare_req);
+	if (rc)
+		goto end;
+
+end:
+	return rc;
+}
+
+static int cam_cre_dev_process_probe(struct cam_cre_hw *cre_hw,
+	void *cmd_args)
+{
+	cam_cre_top_process(cre_hw, -1, CRE_HW_PROBE, NULL);
+	cam_cre_bus_rd_process(cre_hw, -1, CRE_HW_PROBE, NULL);
+	cam_cre_bus_wr_process(cre_hw, -1, CRE_HW_PROBE, NULL);
+
+	return 0;
+}
+
+static int cam_cre_process_probe(struct cam_cre_hw *cre_hw,
+	void *cmd_args)
+{
+	return cam_cre_dev_process_probe(cre_hw, cmd_args);
+}
+
+static int cam_cre_process_dump_debug_reg(struct cam_cre_hw *cre_hw)
+{
+	return cam_cre_dev_process_dump_debug_reg(cre_hw);
+}
+
+static int cam_cre_process_reset(struct cam_cre_hw *cre_hw,
+	void *cmd_args)
+{
+	return cam_cre_dev_process_reset(cre_hw, cmd_args);
+}
+
+static int cam_cre_process_release(struct cam_cre_hw *cre_hw,
+	void *cmd_args)
+{
+	return cam_cre_dev_process_release(cre_hw, cmd_args);
+}
+
+static int cam_cre_process_acquire(struct cam_cre_hw *cre_hw,
+	void *cmd_args)
+{
+	return cam_cre_dev_process_acquire(cre_hw, cmd_args);
+}
+
+static int cam_cre_process_prepare(struct cam_cre_hw *cre_hw,
+	void *cmd_args)
+{
+	return cam_cre_dev_process_prepare(cre_hw, cmd_args);
+}
+
+static int cam_cre_process_reg_set_update(struct cam_cre_hw *cre_hw,
+	void *cmd_args)
+{
+	return cam_cre_dev_process_reg_set_update(cre_hw, cmd_args);
+}
+
+int cam_cre_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_hw_info *cre_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cre_device_core_info *core_info = NULL;
+	struct cam_cre_hw *cre_hw;
+	unsigned long flags;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_CRE, "Invalid args %x for cmd %u",
+			device_priv, cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &cre_dev->soc_info;
+	core_info = (struct cam_cre_device_core_info *)cre_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_CRE, "soc_info = %x core_info = %x",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cre_hw = core_info->cre_hw_info->cre_hw;
+	if (!cre_hw) {
+		CAM_ERR(CAM_CRE, "Invalid cre hw info");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CRE_HW_PROBE:
+		rc = cam_cre_process_probe(cre_hw, cmd_args);
+		break;
+	case CRE_HW_ACQUIRE:
+		rc = cam_cre_process_acquire(cre_hw, cmd_args);
+		break;
+	case CRE_HW_RELEASE:
+		rc = cam_cre_process_release(cre_hw, cmd_args);
+		break;
+	case CRE_HW_PREPARE:
+		rc = cam_cre_process_prepare(cre_hw, cmd_args);
+		break;
+	case CRE_HW_START:
+		break;
+	case CRE_HW_STOP:
+		break;
+	case CRE_HW_FLUSH:
+		break;
+	case CRE_HW_RESET:
+		rc = cam_cre_process_reset(cre_hw, cmd_args);
+		break;
+	case CRE_HW_CLK_UPDATE: {
+		struct cam_cre_dev_clk_update *clk_upd_cmd =
+			(struct cam_cre_dev_clk_update *)cmd_args;
+
+		if (!core_info->clk_enable) {
+			rc = cam_soc_util_clk_enable_default(soc_info,
+				CAM_SVS_VOTE);
+			if (rc) {
+				CAM_ERR(CAM_CRE, "Clock enable is failed");
+				return rc;
+			}
+			core_info->clk_enable = true;
+		}
+
+		rc = cam_cre_update_clk_rate(soc_info, clk_upd_cmd->clk_rate);
+		if (rc)
+			CAM_ERR(CAM_CRE, "Failed to update clk: %d", rc);
+		}
+		break;
+	case CRE_HW_CLK_DISABLE: {
+		if (core_info->clk_enable)
+			cam_soc_util_clk_disable_default(soc_info);
+
+		core_info->clk_enable = false;
+		}
+		break;
+	case CRE_HW_BW_UPDATE: {
+		struct cam_cre_dev_bw_update *cpas_vote = cmd_args;
+
+		if (!cmd_args)
+			return -EINVAL;
+
+		rc = cam_cre_caps_vote(core_info, cpas_vote);
+		if (rc)
+			CAM_ERR(CAM_CRE, "failed to update bw: %d", rc);
+		}
+		break;
+	case CRE_HW_SET_IRQ_CB: {
+		struct cam_cre_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_CRE, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		spin_lock_irqsave(&cre_dev->hw_lock, flags);
+		core_info->irq_cb.cre_hw_mgr_cb = irq_cb->cre_hw_mgr_cb;
+		core_info->irq_cb.data = irq_cb->data;
+		spin_unlock_irqrestore(&cre_dev->hw_lock, flags);
+		}
+		break;
+	case CRE_HW_REG_SET_UPDATE:
+		rc = cam_cre_process_reg_set_update(cre_hw, cmd_args);
+		break;
+	case CRE_HW_DUMP_DEBUG:
+		rc = cam_cre_process_dump_debug_reg(cre_hw);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+irqreturn_t cam_cre_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cre_dev = data;
+	struct cam_cre_device_core_info *core_info = NULL;
+	struct cam_cre_hw *cre_hw;
+
+	if (!data) {
+		CAM_ERR(CAM_CRE, "Invalid cam_dev_info or query_cap args");
+		return IRQ_HANDLED;
+	}
+
+	core_info = (struct cam_cre_device_core_info *)cre_dev->core_info;
+	cre_hw = core_info->cre_hw_info->cre_hw;
+
+	irq_data.error = 0;
+
+	cam_cre_top_process(cre_hw, 0, CRE_HW_ISR, &irq_data);
+
+	if (irq_data.top_irq_status & CAM_CRE_FE_IRQ)
+		cam_cre_bus_rd_process(cre_hw, 0, CRE_HW_ISR, &irq_data);
+	if (irq_data.top_irq_status & CAM_CRE_WE_IRQ)
+		cam_cre_bus_wr_process(cre_hw, 0, CRE_HW_ISR, &irq_data);
+
+	spin_lock(&cre_dev->hw_lock);
+	if (core_info->irq_cb.cre_hw_mgr_cb && core_info->irq_cb.data)
+		if (irq_data.error ||
+			((irq_data.top_irq_status & CAM_CRE_WE_IRQ) &&
+			 irq_data.wr_buf_done))
+			core_info->irq_cb.cre_hw_mgr_cb(&irq_data,
+				sizeof(struct cam_hw_info),
+				core_info->irq_cb.data);
+	spin_unlock(&cre_dev->hw_lock);
+
+	return IRQ_HANDLED;
+}

+ 84 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.h

@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_CRE_CORE_H
+#define CAM_CRE_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_cre.h>
+#include "cam_cpas_api.h"
+#include "cre_hw.h"
+#include "cam_cre_hw_intf.h"
+
+/**
+ * struct cam_cre_cpas_vote
+ * @ahb_vote: AHB vote info
+ * @axi_vote: AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote data
+ * @axi_vote_valid: flag for axi vote data
+ */
+struct cam_cre_cpas_vote {
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	uint32_t ahb_vote_valid;
+	uint32_t axi_vote_valid;
+};
+
+
+struct cam_cre_device_hw_info {
+	struct cam_cre_hw *cre_hw;
+	uint32_t hw_idx;
+	void *cre_top_base;
+	void *cre_qos_base;
+	void *cre_pp_base;
+	void *cre_bus_rd_base;
+	void *cre_bus_wr_base;
+	uint32_t reserved;
+};
+
+struct cam_cre_device_core_info {
+	struct   cam_cre_device_hw_info *cre_hw_info;
+	uint32_t hw_version;
+	uint32_t hw_idx;
+	uint32_t hw_type;
+	uint32_t cpas_handle;
+	bool     cpas_start;
+	bool     clk_enable;
+	struct   cam_cre_set_irq_cb irq_cb;
+};
+
+int cam_cre_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_cre_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_cre_start_hw(void *device_priv,
+	void *start_hw_args, uint32_t arg_size);
+int cam_cre_stop_hw(void *device_priv,
+	void *stop_hw_args, uint32_t arg_size);
+int cam_cre_reset_hw(void *device_priv,
+	void *reset_hw_args, uint32_t arg_size);
+int cam_cre_flush_hw(void *device_priv,
+	void *flush_args, uint32_t arg_size);
+int cam_cre_get_hw_caps(void *device_priv,
+	void *get_hw_cap_args, uint32_t arg_size);
+int cam_cre_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_cre_irq(int irq_num, void *data);
+
+/**
+ * @brief : API to register CRE hw to platform framework.
+ * @return struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+int cam_cre_init_module(void);
+
+/**
+ * @brief : API to remove CRE Hw from platform framework.
+ */
+void cam_cre_exit_module(void);
+#endif /* CAM_CRE_CORE_H */

+ 321 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev.c

@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "cam_node.h"
+#include "cre_core.h"
+#include "cre_soc.h"
+#include "cam_hw.h"
+#include "cre_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_cre_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cre_hw_100.h"
+#include "cre_dev_intf.h"
+#include "cam_smmu_api.h"
+#include "camera_main.h"
+
+static struct cam_cre_hw_intf_data cam_cre_dev_list[CRE_DEV_MAX];
+static struct cam_cre_device_hw_info cre_hw_info;
+static struct cam_cre_soc_private cre_soc_info;
+
+static char cre_dev_name[8];
+
+static struct cre_hw_version_reg cre_hw_version_reg = {
+	.hw_ver = 0x0,
+};
+
+static int cam_cre_init_hw_version(struct cam_hw_soc_info *soc_info,
+	struct cam_cre_device_core_info *core_info)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_CRE, "soc_info = %x core_info = %x",
+		soc_info, core_info);
+	CAM_DBG(CAM_CRE, "TOP: %x RD: %x WR: %x",
+		soc_info->reg_map[CRE_TOP_BASE].mem_base,
+		soc_info->reg_map[CRE_BUS_RD].mem_base,
+		soc_info->reg_map[CRE_BUS_WR].mem_base);
+
+	core_info->cre_hw_info->cre_top_base =
+		soc_info->reg_map[CRE_TOP_BASE].mem_base;
+	core_info->cre_hw_info->cre_bus_rd_base =
+		soc_info->reg_map[CRE_BUS_RD].mem_base;
+	core_info->cre_hw_info->cre_bus_wr_base =
+		soc_info->reg_map[CRE_BUS_WR].mem_base;
+
+	core_info->hw_version = cam_io_r_mb(
+			core_info->cre_hw_info->cre_top_base +
+			cre_hw_version_reg.hw_ver);
+
+	switch (core_info->hw_version) {
+	case CRE_HW_VER_1_0_0:
+		core_info->cre_hw_info->cre_hw = &cre_hw_100;
+		break;
+	default:
+		CAM_ERR(CAM_CRE, "Unsupported version : %u",
+			core_info->hw_version);
+		rc = -EINVAL;
+		break;
+	}
+
+	cre_hw_100.top_reg_offset->base = core_info->cre_hw_info->cre_top_base;
+	cre_hw_100.bus_rd_reg_offset->base = core_info->cre_hw_info->cre_bus_rd_base;
+	cre_hw_100.bus_wr_reg_offset->base = core_info->cre_hw_info->cre_bus_wr_base;
+
+	return rc;
+}
+
+int cam_cre_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_cre_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "cre", sizeof("cre"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRE, "failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+static int cam_cre_component_bind(struct device *dev,
+	struct device *master_dev, void *data)
+{
+	struct cam_hw_intf                *cre_dev_intf = NULL;
+	struct cam_hw_info                *cre_dev = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_cre_device_core_info   *core_info = NULL;
+	struct cam_cre_dev_probe           cre_probe;
+	struct cam_cre_cpas_vote           cpas_vote;
+	struct cam_cre_soc_private        *soc_private;
+	int i;
+	uint32_t hw_idx;
+	int rc = 0;
+
+	struct platform_device *pdev = to_platform_device(dev);
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &hw_idx);
+
+	cre_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cre_dev_intf)
+		return -ENOMEM;
+
+	cre_dev_intf->hw_idx = hw_idx;
+	cre_dev_intf->hw_type = CRE_DEV_CRE;
+	cre_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cre_dev) {
+		rc = -ENOMEM;
+		goto cre_dev_alloc_failed;
+	}
+
+	memset(cre_dev_name, 0, sizeof(cre_dev_name));
+	snprintf(cre_dev_name, sizeof(cre_dev_name),
+		"cre%1u", cre_dev_intf->hw_idx);
+
+	cre_dev->soc_info.pdev = pdev;
+	cre_dev->soc_info.dev = &pdev->dev;
+	cre_dev->soc_info.dev_name = cre_dev_name;
+	cre_dev_intf->hw_priv = cre_dev;
+	cre_dev_intf->hw_ops.init = cam_cre_init_hw;
+	cre_dev_intf->hw_ops.deinit = cam_cre_deinit_hw;
+	cre_dev_intf->hw_ops.get_hw_caps = cam_cre_get_hw_caps;
+	cre_dev_intf->hw_ops.process_cmd = cam_cre_process_cmd;
+
+	CAM_DBG(CAM_CRE, "type %d index %d",
+		cre_dev_intf->hw_type,
+		cre_dev_intf->hw_idx);
+
+	if (cre_dev_intf->hw_idx < CRE_DEV_MAX)
+		cam_cre_dev_list[cre_dev_intf->hw_idx].hw_intf =
+			cre_dev_intf;
+
+	platform_set_drvdata(pdev, cre_dev_intf);
+
+
+	cre_dev->core_info = kzalloc(sizeof(struct cam_cre_device_core_info),
+		GFP_KERNEL);
+	if (!cre_dev->core_info) {
+		rc = -ENOMEM;
+		goto cre_core_alloc_failed;
+	}
+	core_info = (struct cam_cre_device_core_info *)cre_dev->core_info;
+	core_info->cre_hw_info = &cre_hw_info;
+	cre_dev->soc_info.soc_private = &cre_soc_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		rc = -EINVAL;
+		CAM_DBG(CAM_CRE, "No cre hardware info");
+		goto cre_match_dev_failed;
+	}
+
+	rc = cam_cre_init_soc_resources(&cre_dev->soc_info, cam_cre_irq,
+		cre_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRE, "failed to init_soc");
+		goto init_soc_failed;
+	}
+	core_info->hw_type = CRE_DEV_CRE;
+	core_info->hw_idx = hw_idx;
+	rc = cam_cre_register_cpas(&cre_dev->soc_info,
+		core_info, cre_dev_intf->hw_idx);
+	if (rc < 0)
+		goto register_cpas_failed;
+
+	rc = cam_cre_enable_soc_resources(&cre_dev->soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRE, "enable soc resorce failed: %d", rc);
+		goto enable_soc_failed;
+	}
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.num_paths = 1;
+	cpas_vote.axi_vote.axi_path[0].path_data_type =
+		CAM_AXI_PATH_DATA_CRE_WR_OUT;
+	cpas_vote.axi_vote.axi_path[0].transac_type =
+		CAM_AXI_TRANSACTION_WRITE;
+	cpas_vote.axi_vote.axi_path[0].camnoc_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].ddr_ab_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].ddr_ib_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+
+	rc = cam_cre_init_hw_version(&cre_dev->soc_info, cre_dev->core_info);
+	if (rc)
+		goto init_hw_failure;
+
+	cam_cre_disable_soc_resources(&cre_dev->soc_info);
+	cam_cpas_stop(core_info->cpas_handle);
+	cre_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+
+	cam_cre_process_cmd(cre_dev, CRE_HW_PROBE,
+		&cre_probe, sizeof(cre_probe));
+	mutex_init(&cre_dev->hw_mutex);
+	spin_lock_init(&cre_dev->hw_lock);
+	init_completion(&cre_dev->hw_complete);
+
+	CAM_DBG(CAM_CRE, "CRE:%d component bound successfully",
+		cre_dev_intf->hw_idx);
+	soc_private = cre_dev->soc_info.soc_private;
+	cam_cre_dev_list[cre_dev_intf->hw_idx].num_hw_pid =
+		soc_private->num_pid;
+
+	for (i = 0; i < soc_private->num_pid; i++)
+		cam_cre_dev_list[cre_dev_intf->hw_idx].hw_pid[i] =
+			soc_private->pid[i];
+
+	return rc;
+
+init_hw_failure:
+enable_soc_failed:
+register_cpas_failed:
+init_soc_failed:
+cre_match_dev_failed:
+	kfree(cre_dev->core_info);
+cre_core_alloc_failed:
+	kfree(cre_dev);
+cre_dev_alloc_failed:
+	kfree(cre_dev_intf);
+	return rc;
+}
+
+static void cam_cre_component_unbind(struct device *dev,
+	struct device *master_dev, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+
+	CAM_DBG(CAM_CRE, "Unbinding component: %s", pdev->name);
+}
+
+int cam_cre_hw_init(struct cam_cre_hw_intf_data **cre_hw_intf_data,
+		uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_cre_dev_list[hw_idx].hw_intf) {
+		*cre_hw_intf_data = &cam_cre_dev_list[hw_idx];
+		rc = 0;
+	} else {
+		CAM_ERR(CAM_CRE, "inval param");
+		*cre_hw_intf_data = NULL;
+		rc = -ENODEV;
+	}
+	return rc;
+}
+
+const static struct component_ops cam_cre_component_ops = {
+	.bind = cam_cre_component_bind,
+	.unbind = cam_cre_component_unbind,
+};
+
+int cam_cre_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_CRE, "Adding CRE component");
+	rc = component_add(&pdev->dev, &cam_cre_component_ops);
+	if (rc)
+		CAM_ERR(CAM_CRE, "failed to add component rc: %d", rc);
+
+	return rc;
+}
+
+static const struct of_device_id cam_cre_dt_match[] = {
+	{
+		.compatible = "qcom,cre",
+		.data = &cre_hw_version_reg,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_cre_dt_match);
+
+struct platform_driver cam_cre_driver = {
+	.probe = cam_cre_probe,
+	.driver = {
+		.name = "cre",
+		.of_match_table = cam_cre_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+int cam_cre_init_module(void)
+{
+	return platform_driver_register(&cam_cre_driver);
+}
+
+void cam_cre_exit_module(void)
+{
+	platform_driver_unregister(&cam_cre_driver);
+}
+
+MODULE_DESCRIPTION("CAM CRE driver");
+MODULE_LICENSE("GPL v2");

+ 135 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev_intf.h

@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+#ifndef CAM_CRE_DEV_INTF_H
+#define CAM_CRE_DEV_INTF_H
+
+#include <media/cam_cre.h>
+#include "cam_cre_hw_mgr.h"
+#include "cam_cpas_api.h"
+#include "cre_top.h"
+
+
+#define CRE_HW_INIT            0x1
+#define CRE_HW_DEINIT          0x2
+#define CRE_HW_ACQUIRE         0x3
+#define CRE_HW_RELEASE         0x4
+#define CRE_HW_START           0x5
+#define CRE_HW_STOP            0x6
+#define CRE_HW_FLUSH           0x7
+#define CRE_HW_PREPARE         0x8
+#define CRE_HW_ISR             0x9
+#define CRE_HW_PROBE           0xA
+#define CRE_HW_CLK_UPDATE      0xB
+#define CRE_HW_BW_UPDATE       0xC
+#define CRE_HW_RESET           0xD
+#define CRE_HW_SET_IRQ_CB      0xE
+#define CRE_HW_CLK_DISABLE     0xF
+#define CRE_HW_CLK_ENABLE      0x10
+#define CRE_HW_DUMP_DEBUG      0x11
+#define CRE_HW_REG_SET_UPDATE  0x12
+
+/**
+ * struct cam_cre_dev_probe
+ *
+ * @reserved: Reserved field for future use
+ */
+struct cam_cre_dev_probe {
+	uint32_t reserved;
+};
+
+/**
+ * struct cam_cre_dev_init
+ *
+ * @core_info: CRE core info
+ */
+struct cam_cre_dev_init {
+	struct cam_cre_device_core_info *core_info;
+};
+
+/**
+ * struct cam_cre_dev_clk_update
+ *
+ * @clk_rate: Clock rate
+ */
+struct cam_cre_dev_clk_update {
+	uint32_t clk_rate;
+};
+
+struct cam_cre_dev_reg_set_update {
+	struct cre_reg_buffer cre_reg_buf;
+};
+
+/**
+ * struct cam_cre_dev_bw_update
+ *
+ * @ahb_vote:       AHB vote info
+ * @axi_vote:       AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote
+ * @axi_vote_valid: Flag for axi vote
+ */
+struct cam_cre_dev_bw_update {
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	uint32_t ahb_vote_valid;
+	uint32_t axi_vote_valid;
+};
+
+/**
+ * struct cam_cre_dev_acquire
+ *
+ * @ctx_id:      Context id
+ * @cre_acquire: CRE acquire info
+ * @bus_wr_ctx:  Bus Write context
+ * @bus_rd_ctx:  Bus Read context
+ */
+struct cam_cre_dev_acquire {
+	uint32_t ctx_id;
+	struct cre_top *cre_top;
+	struct cam_cre_acquire_dev_info *cre_acquire;
+	struct cre_bus_wr_ctx *bus_wr_ctx;
+	struct cre_bus_rd_ctx *bus_rd_ctx;
+};
+
+/**
+ * struct cam_cre_dev_release
+ *
+ * @ctx_id:      Context id
+ * @bus_wr_ctx:  Bus Write context
+ * @bus_rd_ctx:  Bus Read context
+ */
+struct cam_cre_dev_release {
+	uint32_t ctx_id;
+	struct cre_bus_wr_ctx *bus_wr_ctx;
+	struct cre_bus_rd_ctx *bus_rd_ctx;
+};
+
+/**
+ * struct cam_cre_dev_prepare_req
+ *
+ * @hw_mgr:         CRE hardware manager
+ * @packet:         Packet
+ * @prepare_args:   Prepare request args
+ * @ctx_data:       Context data
+ * @frame_process:  Frame process command
+ * @req_idx:        Request Index
+ */
+struct cam_cre_dev_prepare_req {
+	struct cam_cre_hw_mgr *hw_mgr;
+	struct cam_packet *packet;
+	struct cam_hw_prepare_update_args *prepare_args;
+	struct cam_cre_ctx *ctx_data;
+	uint32_t req_idx;
+};
+
+int cam_cre_top_process(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data);
+
+int cam_cre_bus_rd_process(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data);
+
+int cam_cre_bus_wr_process(struct cam_cre_hw *cam_cre_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data);
+
+#endif /* CAM_CRE_DEV_INTF_H */

+ 340 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw.h

@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_CRE_HW_H
+#define CAM_CRE_HW_H
+
+#define CRE_HW_VER_1_0_0 0x10000000
+
+#define CRE_DEV_CRE  0
+#define CRE_DEV_MAX  1
+
+#define MAX_CRE_RD_CLIENTS   1
+#define MAX_CRE_WR_CLIENTS   1
+
+#define CRE_TOP_BASE     0x1
+#define CRE_QOS_BASE     0x2
+#define CRE_BUS_RD       0x3
+#define CRE_BUS_WR       0x4
+#define CRE_BASE_MAX     0x5
+
+#define CRE_WAIT_BUS_WR_RUP    0x1
+#define CRE_WAIT_BUS_WR_DONE   0x2
+#define CRE_WAIT_BUS_RD_DONE   0x3
+#define CRE_WAIT_IDLE_IRQ      0x4
+
+struct cam_cre_top_reg {
+	void *base;
+	uint32_t offset;
+	uint32_t hw_version;
+	uint32_t hw_cap;
+	uint32_t debug_0;
+	uint32_t debug_1;
+	uint32_t debug_cfg;
+	uint32_t testbus_ctrl;
+	uint32_t scratch_0;
+	uint32_t irq_status;
+	uint32_t irq_mask;
+	uint32_t irq_clear;
+	uint32_t irq_set;
+	uint32_t irq_cmd;
+	uint32_t reset_cmd;
+	uint32_t core_clk_cfg_ctrl_0;
+	uint32_t core_clk_cfg_ctrl_1;
+	uint32_t top_spare;
+};
+
+struct cam_cre_top_reg_val {
+	uint32_t hw_version;
+	uint32_t hw_cap;
+	uint32_t major_mask;
+	uint32_t major_shift;
+	uint32_t minor_mask;
+	uint32_t minor_shift;
+	uint32_t irq_mask;
+	uint32_t irq_set;
+	uint32_t irq_clear;
+	uint32_t irq_cmd_set;
+	uint32_t irq_cmd_clear;
+	uint32_t idle;
+	uint32_t fe_done;
+	uint32_t we_done;
+	uint32_t rst_done;
+	uint32_t sw_reset_cmd;
+	uint32_t hw_reset_cmd;
+	uint32_t core_clk_cfg_ctrl_0;
+	uint32_t core_clk_cfg_ctrl_1;
+	uint32_t top_override;
+	uint32_t we_override;
+	uint32_t fe_override;
+	uint32_t ahb_override;
+};
+
+struct cam_cre_bus_rd_client_reg {
+	uint32_t bus_ctrl;
+	uint32_t spare;
+	uint32_t cons_violation;
+	uint32_t cons_violation_status;
+	uint32_t core_cfg;
+	uint32_t ccif_meta_data;
+	uint32_t img_addr;
+	uint32_t rd_width;
+	uint32_t rd_height;
+	uint32_t rd_stride;
+	uint32_t unpacker_cfg;
+	uint32_t latency_buf_allocation;
+	uint32_t misr_cfg_0;
+	uint32_t misr_cfg_1;
+	uint32_t misr_rd_val;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+	uint32_t read_buff_cfg;
+	uint32_t addr_cfg;
+};
+
+struct cam_cre_bus_rd_reg {
+	void *base;
+	uint32_t offset;
+	uint32_t hw_version;
+	uint32_t irq_mask;
+	uint32_t irq_clear;
+	uint32_t irq_cmd;
+	uint32_t irq_status;
+	uint32_t input_if_cmd;
+	uint32_t irq_set;
+	uint32_t misr_reset;
+	uint32_t security_cfg;
+	uint32_t iso_cfg;
+	uint32_t iso_seed;
+	uint32_t test_bus_ctrl;
+
+	uint32_t num_clients;
+	struct   cam_cre_bus_rd_client_reg rd_clients[MAX_CRE_RD_CLIENTS];
+};
+
+struct cam_cre_bus_rd_client_reg_val {
+	uint32_t client_en;
+	uint32_t ai_en;
+	uint32_t ai_en_mask;
+	uint32_t ai_en_shift;
+	uint32_t pix_pattern;
+	uint32_t pix_pattern_mask;
+	uint32_t pix_pattern_shift;
+	uint32_t stripe_location;
+	uint32_t stripe_location_mask;
+	uint32_t stripe_location_shift;
+	uint32_t img_addr;
+	uint32_t img_width;
+	uint32_t img_height;
+	uint32_t stride;
+	uint32_t alignment;
+	uint32_t alignment_mask;
+	uint32_t alignment_shift;
+	uint32_t mode;
+	uint32_t mode_mask;
+	uint32_t mode_shift;
+	uint32_t latency_buf_size;
+	uint32_t latency_buf_size_mask;
+	uint32_t misr_cfg_en;
+	uint32_t misr_cfg_en_mask;
+	uint32_t misr_cfg_samp_mode;
+	uint32_t misr_cfg_samp_mode_mask;
+	uint32_t misr_cfg_1;
+	uint32_t misr_rd_val;
+	uint32_t x_int;
+	uint32_t x_int_mask;
+	uint32_t byte_offset;
+	uint32_t byte_offset_mask;
+	uint32_t input_port_id;
+	uint32_t rm_port_id;
+};
+
+struct cam_cre_bus_rd_reg_val {
+	uint32_t hw_version;
+	uint32_t cgc_override;
+	uint32_t irq_mask;
+	uint32_t irq_status;
+	uint32_t irq_cmd_set;
+	uint32_t irq_cmd_clear;
+	uint32_t rup_done;
+	uint32_t rd_buf_done;
+	uint32_t cons_violation;
+	uint32_t static_prg;
+	uint32_t static_prg_mask;
+	uint32_t ica_en;
+	uint32_t ica_en_mask;
+	uint32_t go_cmd;
+	uint32_t go_cmd_mask;
+	uint32_t irq_set;
+	uint32_t irq_clear;
+	uint32_t misr_reset;
+	uint32_t security_cfg;
+	uint32_t iso_bpp_select;
+	uint32_t iso_bpp_select_mask;
+	uint32_t iso_pattern_select;
+	uint32_t iso_pattern_select_mask;
+	uint32_t iso_en;
+	uint32_t iso_en_mask;
+	uint32_t iso_seed;
+	uint32_t bus_ctrl;
+	uint32_t spare;
+	uint32_t num_clients;
+	struct   cam_cre_bus_rd_client_reg_val rd_clients[MAX_CRE_RD_CLIENTS];
+};
+
+struct cam_cre_bus_wr_client_reg {
+	uint32_t client_cfg;
+	uint32_t img_addr;
+	uint32_t img_cfg_0;
+	uint32_t img_cfg_1;
+	uint32_t img_cfg_2;
+	uint32_t bw_limit;
+	uint32_t packer_cfg;
+	uint32_t addr_cfg;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+};
+
+struct cam_cre_bus_wr_reg {
+	void *base;
+	uint32_t offset;
+	uint32_t hw_version;
+	uint32_t cgc_override;
+	uint32_t irq_mask_0;
+	uint32_t irq_mask_1;
+	uint32_t irq_clear_0;
+	uint32_t irq_clear_1;
+	uint32_t irq_status_0;
+	uint32_t irq_status_1;
+	uint32_t irq_cmd;
+	uint32_t frame_header_cfg_0;
+	uint32_t local_frame_header_cfg_0;
+	uint32_t irq_set_0;
+	uint32_t irq_set_1;
+	uint32_t iso_cfg;
+	uint32_t violation_status;
+	uint32_t image_size_violation_status;
+	uint32_t perf_count_cfg_0;
+	uint32_t perf_count_cfg_1;
+	uint32_t perf_count_cfg_2;
+	uint32_t perf_count_cfg_3;
+	uint32_t perf_count_val_0;
+	uint32_t perf_count_val_1;
+	uint32_t perf_count_val_2;
+	uint32_t perf_count_val_3;
+	uint32_t perf_count_status;
+	uint32_t misr_cfg_0;
+	uint32_t misr_cfg_1;
+	uint32_t misr_rd_sel;
+	uint32_t misr_reset;
+	uint32_t misr_val;
+
+	uint32_t num_clients;
+	struct   cam_cre_bus_wr_client_reg wr_clients[MAX_CRE_WR_CLIENTS];
+};
+
+struct cam_cre_bus_wr_client_reg_val {
+	uint32_t client_en;
+	uint32_t client_en_mask;
+	uint32_t client_en_shift;
+	uint32_t auto_recovery_en;
+	uint32_t auto_recovery_en_mask;
+	uint32_t auto_recovery_en_shift;
+	uint32_t mode;
+	uint32_t mode_mask;
+	uint32_t mode_shift;
+	uint32_t img_addr;
+	uint32_t width;
+	uint32_t width_mask;
+	uint32_t width_shift;
+	uint32_t height;
+	uint32_t height_mask;
+	uint32_t height_shift;
+	uint32_t x_init;
+	uint32_t x_init_mask;
+	uint32_t stride;
+	uint32_t stride_mask;
+	uint32_t format;
+	uint32_t format_mask;
+	uint32_t format_shift;
+	uint32_t alignment;
+	uint32_t alignment_mask;
+	uint32_t alignment_shift;
+	uint32_t bw_limit_en;
+	uint32_t bw_limit_en_mask;
+	uint32_t bw_limit_counter;
+	uint32_t bw_limit_counter_mask;
+	uint32_t frame_header_addr;
+	uint32_t output_port_id;
+	uint32_t wm_port_id;
+};
+
+struct cam_cre_bus_wr_reg_val {
+	uint32_t hw_version;
+	uint32_t cgc_override;
+	uint32_t irq_mask_0;
+	uint32_t irq_set_0;
+	uint32_t irq_clear_0;
+	uint32_t irq_status_0;
+	uint32_t img_size_violation;
+	uint32_t violation;
+	uint32_t cons_violation;
+	uint32_t comp_buf_done;
+	uint32_t comp_rup_done;
+	uint32_t irq_mask_1;
+	uint32_t irq_set_1;
+	uint32_t irq_clear_1;
+	uint32_t irq_status_1;
+	uint32_t irq_cmd_set;
+	uint32_t irq_cmd_clear;
+	uint32_t client_buf_done;
+	uint32_t frame_header_cfg_0;
+	uint32_t local_frame_header_cfg_0;
+	uint32_t iso_en;
+	uint32_t iso_en_mask;
+	uint32_t violation_status;
+	uint32_t img_size_violation_status;
+	uint32_t misr_0_en;
+	uint32_t misr_0_en_mask;
+	uint32_t misr_0_samp_mode;
+	uint32_t misr_0_samp_mode_mask;
+	uint32_t misr_0_id;
+	uint32_t misr_0_id_mask;
+	uint32_t misr_rd_misr_sel;
+	uint32_t misr_rd_misr_sel_mask;
+	uint32_t misr_rd_word_sel;
+	uint32_t misr_rd_word_sel_mask;
+	uint32_t misr_reset;
+	uint32_t misr_val;
+
+	uint32_t num_clients;
+	struct cam_cre_bus_wr_client_reg_val wr_clients[MAX_CRE_WR_CLIENTS];
+};
+
+struct cam_cre_debug_register {
+	uint32_t offset;
+};
+
+struct cam_cre_hw {
+	struct cam_cre_top_reg        *top_reg_offset;
+	struct cam_cre_top_reg_val    *top_reg_val;
+
+	struct cam_cre_bus_rd_reg     *bus_rd_reg_offset;
+	struct cam_cre_bus_rd_reg_val *bus_rd_reg_val;
+
+	struct cam_cre_bus_wr_reg     *bus_wr_reg_offset;
+	struct cam_cre_bus_wr_reg_val *bus_wr_reg_val;
+
+	struct cam_cre_common         *common;
+};
+
+struct cre_hw_version_reg {
+	uint32_t hw_ver;
+	uint32_t reserved;
+};
+
+#endif /* CAM_CRE_HW_H */

+ 239 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw_100.h

@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_CRE_HW_100_H
+#define CAM_CRE_HW_100_H
+
+#include "cre_hw.h"
+
+#define CRE_BUS_RD_TYPE            0x1
+#define CRE_BUS_WR_TYPE            0x2
+
+static struct cam_cre_top_reg top_reg = {
+	.hw_version            =  0xFA000,
+	.hw_cap                =  0xFA004,
+	.debug_0               =  0xFA080,
+	.debug_1               =  0xFA084,
+	.debug_cfg             =  0xFA0DC,
+	.testbus_ctrl          =  0xFA1F4,
+	.scratch_0             =  0xFA1F8,
+	.irq_status            =  0xFA00C,
+	.irq_mask              =  0xFA010,
+	.irq_clear             =  0xFA014,
+	.irq_set               =  0xFA018,
+	.irq_cmd               =  0xFA01C,
+	.reset_cmd             =  0xFA008,
+	.core_clk_cfg_ctrl_0   =  0xFA020,
+	.core_clk_cfg_ctrl_1   =  0xFA024,
+	.top_spare             =  0xFA1FC,
+};
+
+struct cam_cre_top_reg_val top_reg_value = {
+	.hw_version = 0x10000000,
+	.hw_cap = 0x4000,
+	.irq_mask = 0xf,
+	.irq_clear = 0xf,
+	.irq_set = 0xf,
+	.irq_cmd_set = 0xf,
+	.irq_cmd_clear = 0xf,
+	.idle = 0x8,
+	.fe_done = 0x4,
+	.we_done = 0x2,
+	.rst_done = 0x1,
+	.sw_reset_cmd = 0x2,
+	.hw_reset_cmd = 0x1,
+};
+
+struct cam_cre_bus_rd_reg bus_rd_reg = {
+	.hw_version     = 0xFA400,
+	.irq_mask       = 0xFA404,
+	.irq_clear      = 0xFA408,
+	.irq_cmd        = 0xFA40C,
+	.irq_status     = 0xFA410,
+	.input_if_cmd   = 0xFA414,
+	.irq_set        = 0xFA418,
+	.misr_reset     = 0xFA41C,
+	.security_cfg   = 0xFA420,
+	.iso_cfg        = 0xFA424,
+	.iso_seed       = 0xFA428,
+	.test_bus_ctrl  = 0xFA42C,
+	.num_clients    = 1,
+	.rd_clients[0]  = {
+		.core_cfg               = 0xFA450,
+		.ccif_meta_data         = 0xFA454,
+		.img_addr               = 0xFA458,
+		.rd_width               = 0xFA45C,
+		.rd_height              = 0xFA460,
+		.rd_stride              = 0xFA464,
+		.unpacker_cfg           = 0xFA468,
+		.latency_buf_allocation = 0xFA47C,
+		.misr_cfg_0             = 0xFA484,
+		.misr_cfg_1             = 0xFA488,
+		.misr_rd_val            = 0xFA48C,
+		.debug_status_cfg       = 0xFA490,
+		.debug_status_0         = 0xFA494,
+		.debug_status_1         = 0xFA498,
+		.read_buff_cfg          = 0xFA4A0,
+		.addr_cfg               = 0xFA4A4,
+		.spare                  = 0xFA430,
+		.cons_violation         = 0xFA434,
+	},
+};
+
+struct cam_cre_bus_wr_reg_val bus_wr_reg_value = {
+	.hw_version                   = 0x30000000,
+	.cgc_override                 = 0x1,
+	.irq_mask_0                   = 0xd0000101,
+	.irq_set_0                    = 0xd0000101,
+	.irq_clear_0                  = 0xd0000101,
+	.img_size_violation           = 0x80000000,
+	.violation                    = 0x40000000,
+	.cons_violation               = 0x10000000,
+	.comp_buf_done                = 0x100,
+	.comp_rup_done                = 0x1,
+	.irq_mask_1                   = 0x1,
+	.irq_set_1                    = 0x1,
+	.irq_clear_1                  = 0x1,
+	.irq_status_1                 = 0x1,
+	.irq_cmd_set                  = 0x10,
+	.irq_cmd_clear                = 0x1,
+	.client_buf_done              = 0x1,
+	.iso_en                       = 0x1,
+	.iso_en_mask                  = 0x1,
+	.misr_0_en                    = 0x1,
+	.misr_0_en_mask               = 0x1,
+	.misr_0_samp_mode             = 0x1,
+	.misr_0_samp_mode_mask        = 0x10000,
+	.misr_0_id_mask               = 0xff,
+	.misr_rd_misr_sel_mask        = 0xf,
+	.misr_rd_word_sel_mask        = 0x70,
+	.num_clients = 1,
+	.wr_clients[0] = {
+		.client_en                = 0x1,
+		.client_en_mask           = 0x1,
+		.client_en_shift          = 0x0,
+		.auto_recovery_en         = 0x1,
+		.auto_recovery_en_mask    = 0x1,
+		.auto_recovery_en_shift   = 0x4,
+		.mode_mask                = 0x3,
+		.mode_shift               = 0x10,
+		.width_mask               = 0xffff,
+		.width_shift              = 0x0,
+		.height_mask              = 0xffff,
+		.height_shift             = 0x10,
+		.x_init_mask              = 0xff,
+		.stride_mask              = 0xffffff,
+		.format_mask              = 0xf,
+		.format_shift             = 0x0,
+		.alignment_mask           = 0x1,
+		.alignment_shift          = 0x5,
+		.bw_limit_en              = 0x1,
+		.bw_limit_en_mask         = 0x1,
+		.bw_limit_counter_mask    = 0x1fe,
+		.output_port_id           = 1,
+		.wm_port_id               = 1,
+	},
+};
+
+struct cam_cre_bus_rd_reg_val bus_rd_reg_value = {
+	.hw_version              = 0x30000000,
+	.irq_mask                = 0x7,
+	.rd_buf_done             = 0x4,
+	.rup_done                = 0x2,
+	.cons_violation          = 0x1,
+	.irq_cmd_set             = 0x10,
+	.irq_cmd_clear           = 0x1,
+	.static_prg              = 0x8,
+	.static_prg_mask         = 0x8,
+	.ica_en                  = 0x1,
+	.ica_en_mask             = 0x2,
+	.go_cmd                  = 0x1,
+	.go_cmd_mask             = 0x1,
+	.irq_set                 = 0x7,
+	.irq_clear               = 0x7,
+	.misr_reset              = 0x1,
+	.security_cfg            = 0x1,
+	.iso_bpp_select_mask     = 0x60,
+	.iso_pattern_select_mask = 0x6,
+	.iso_en                  = 0x1,
+	.iso_en_mask             = 0x1,
+	.num_clients = 1,
+	.rd_clients[0] = {
+		.client_en               = 0x1,
+		.ai_en                   = 0x1,
+		.ai_en_mask              = 0x1000,
+		.ai_en_shift             = 0xc,
+		.pix_pattern_mask        = 0xfc,
+		.pix_pattern_shift       = 0x2,
+		.stripe_location_mask    = 0x3,
+		.stripe_location_shift   = 0x0,
+		.alignment_mask          = 0x1,
+		.alignment_shift         = 0x5,
+		.mode_mask               = 0x1f,
+		.mode_shift              = 0x0,
+		.latency_buf_size_mask   = 0xffff,
+		.misr_cfg_en_mask        = 0x4,
+		.misr_cfg_samp_mode_mask = 0x3,
+		.x_int_mask              = 0xffff,
+		.byte_offset_mask        = 0xff,
+		.rm_port_id              = 0x0,
+	},
+};
+
+struct cam_cre_bus_wr_reg bus_wr_reg = {
+	.hw_version                   = 0xFA700,
+	.cgc_override                 = 0xFA708,
+	.irq_mask_0                   = 0xFA718,
+	.irq_mask_1                   = 0xFA71C,
+	.irq_clear_0                  = 0xFA720,
+	.irq_clear_1                  = 0xFA724,
+	.irq_status_0                 = 0xFA728,
+	.irq_status_1                 = 0xFA72C,
+	.irq_cmd                      = 0xFA730,
+	.frame_header_cfg_0           = 0x0,
+	.local_frame_header_cfg_0     = 0xFA74C,
+	.irq_set_0                    = 0xFA750,
+	.irq_set_1                    = 0xFA754,
+	.iso_cfg                      = 0xFA75C,
+	.violation_status             = 0xFA764,
+	.image_size_violation_status  = 0xFA770,
+	.perf_count_cfg_0             = 0xFA774,
+	.perf_count_cfg_1             = 0xFA778,
+	.perf_count_cfg_2             = 0xFA77C,
+	.perf_count_cfg_3             = 0xFA780,
+	.perf_count_val_0             = 0xFA794,
+	.perf_count_val_1             = 0xFA798,
+	.perf_count_val_2             = 0xFA79C,
+	.perf_count_val_3             = 0xFA7A0,
+	.perf_count_status            = 0xFA7B4,
+	.misr_cfg_0                   = 0xFA7B8,
+	.misr_cfg_1                   = 0xFA7BC,
+	.misr_rd_sel                  = 0xFA7C8,
+	.misr_reset                   = 0xFA7CC,
+	.misr_val                     = 0xFA7D0,
+	.wr_clients[0]             = {
+		.client_cfg           = 0xFA900,
+		.img_addr             = 0xFA904,
+		.img_cfg_0            = 0xFA90C,
+		.img_cfg_1            = 0xFA910,
+		.img_cfg_2            = 0xFA914,
+		.bw_limit             = 0xFA918,
+		.packer_cfg           = 0xFA91C,
+		.addr_cfg             = 0xFA970,
+		.debug_status_cfg     = 0xFA984,
+		.debug_status_0       = 0xFA988,
+		.debug_status_1       = 0xFA98C,
+	},
+};
+
+static struct cam_cre_hw cre_hw_100 = {
+	.top_reg_offset    = &top_reg,
+	.top_reg_val       = &top_reg_value,
+	.bus_wr_reg_offset = &bus_wr_reg,
+	.bus_wr_reg_val    = &bus_wr_reg_value,
+	.bus_rd_reg_offset = &bus_rd_reg,
+	.bus_rd_reg_val    = &bus_rd_reg_value,
+};
+#endif // CAM_CRE_HW_100_H

+ 83 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.c

@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <media/cam_defs.h>
+#include <media/cam_cre.h>
+
+#include "cre_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_cre_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t cre_irq_handler, void *irq_data)
+{
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_request_platform_resource(soc_info,
+		cre_irq_handler,
+		irq_data);
+	if (rc)
+		CAM_ERR(CAM_CRE, "init soc failed %d", rc);
+
+	return rc;
+}
+
+int cam_cre_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc)
+		CAM_ERR(CAM_CRE, "enable platform failed %d", rc);
+
+	return rc;
+}
+
+int cam_cre_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		CAM_ERR(CAM_CRE, "disable platform failed %d", rc);
+
+	return rc;
+}
+
+int cam_cre_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate)
+{
+	int32_t src_clk_idx;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_CRE, "Invalid soc info");
+		return -EINVAL;
+	}
+
+	src_clk_idx = soc_info->src_clk_idx;
+
+	CAM_DBG(CAM_CRE, "clk_rate = %u src_clk_index = %d",
+		clk_rate, src_clk_idx);
+	if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+		(soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+		(clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+		CAM_DBG(CAM_CRE, "clk_rate %d greater than max, reset to %d",
+			clk_rate,
+			soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+		clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+	}
+
+	CAM_DBG(CAM_CRE, "clk_rate = %u src_clk_index = %d",
+		clk_rate, src_clk_idx);
+	return cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+}

+ 33 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.h

@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CRE_SOC_H_
+#define _CAM_CRE_SOC_H_
+
+#include "cam_soc_util.h"
+
+#define CAM_CRE_HW_MAX_NUM_PID 2
+
+/**
+ * struct cam_cre_soc_private
+ *
+ * @num_pid: CRE number of pids
+ * @pid:     CRE pid value list
+ */
+struct cam_cre_soc_private {
+	uint32_t num_pid;
+	uint32_t pid[CAM_CRE_HW_MAX_NUM_PID];
+};
+
+int cam_cre_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t cre_irq_handler, void *irq_data);
+
+int cam_cre_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_cre_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_cre_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate);
+#endif /* _CAM_CRE_SOC_H_*/

+ 49 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_intf.h

@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_CRE_HW_INTF_H
+#define CAM_CRE_HW_INTF_H
+
+#include "cam_cpas_api.h"
+
+#define CAM_CRE_DEV_PER_TYPE_MAX     1
+
+#define CAM_CRE_CMD_BUF_MAX_SIZE     128
+#define CAM_CRE_MSG_BUF_MAX_SIZE     CAM_CRE_CMD_BUF_MAX_SIZE
+
+#define CRE_VOTE                     640000000
+
+#define CAM_CRE_HW_DUMP_TAG_MAX_LEN 32
+#define CAM_CRE_HW_DUMP_NUM_WORDS   5
+
+struct cam_cre_set_irq_cb {
+	int32_t (*cre_hw_mgr_cb)(void *irq_data,
+		 int32_t result_size, void *data);
+	void *data;
+	uint32_t b_set_cb;
+};
+
+struct cam_cre_hw_dump_args {
+	uint64_t  request_id;
+	uintptr_t cpu_addr;
+	size_t    offset;
+	size_t    buf_len;
+};
+
+struct cam_cre_hw_dump_header {
+	uint8_t     tag[CAM_CRE_HW_DUMP_TAG_MAX_LEN];
+	uint64_t    size;
+	uint32_t    word_size;
+};
+
+enum cam_cre_cmd_type {
+	CAM_CRE_CMD_CFG,
+	CAM_CRE_CMD_SET_IRQ_CB,
+	CAM_CRE_CMD_HW_DUMP,
+	CAM_CRE_CMD_RESET_HW,
+	CAM_CRE_CMD_MAX,
+};
+
+#endif

+ 18 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_mgr_intf.h

@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_CRE_HW_MGR_INTF_H
+#define CAM_CRE_HW_MGR_INTF_H
+
+#include <linux/of.h>
+#include <media/cam_cre.h>
+#include <media/cam_defs.h>
+
+#define CAM_CRE_CTX_MAX        16
+
+int cam_cre_hw_mgr_init(struct device_node *of_node,
+	uint64_t *hw_mgr_hdl, int *iommu_hdl);
+
+#endif /* CAM_CRE_HW_MGR_INTF_H */

+ 315 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.c

@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include <linux/completion.h>
+#include <media/cam_cre.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cre_core.h"
+#include "cre_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cre_hw.h"
+#include "cre_dev_intf.h"
+#include "cre_top.h"
+
+static struct cre_top cre_top_info;
+
+static int cam_cre_top_reset(struct cam_cre_hw *cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	struct cam_cre_top_reg *top_reg;
+	struct cam_cre_top_reg_val *top_reg_val;
+	uint32_t irq_mask, irq_status;
+	unsigned long flags;
+
+	if (!cre_hw_info) {
+		CAM_ERR(CAM_CRE, "Invalid cre_hw_info");
+		return -EINVAL;
+	}
+
+	top_reg = cre_hw_info->top_reg_offset;
+	top_reg_val = cre_hw_info->top_reg_val;
+
+	mutex_lock(&cre_top_info.cre_hw_mutex);
+	reinit_completion(&cre_top_info.reset_complete);
+	reinit_completion(&cre_top_info.idle_done);
+
+	/* enable interrupt mask */
+	cam_io_w_mb(top_reg_val->irq_mask,
+		cre_hw_info->top_reg_offset->base + top_reg->irq_mask);
+
+	/* CRE SW RESET */
+	cam_io_w_mb(top_reg_val->sw_reset_cmd,
+		cre_hw_info->top_reg_offset->base + top_reg->reset_cmd);
+
+	rc = wait_for_completion_timeout(
+			&cre_top_info.reset_complete,
+			msecs_to_jiffies(60));
+
+	if (!rc || rc < 0) {
+		spin_lock_irqsave(&cre_top_info.hw_lock, flags);
+		if (!completion_done(&cre_top_info.reset_complete)) {
+			CAM_DBG(CAM_CRE,
+				"IRQ delayed, checking the status registers");
+			irq_mask = cam_io_r_mb(cre_hw_info->top_reg_offset->base +
+				top_reg->irq_mask);
+			irq_status = cam_io_r_mb(cre_hw_info->top_reg_offset->base +
+				top_reg->irq_status);
+			if (irq_status & top_reg_val->rst_done) {
+				CAM_DBG(CAM_CRE, "cre reset done");
+				cam_io_w_mb(irq_status,
+					top_reg->base + top_reg->irq_clear);
+				cam_io_w_mb(top_reg_val->irq_cmd_clear,
+					top_reg->base + top_reg->irq_cmd);
+			} else {
+				CAM_ERR(CAM_CRE,
+					"irq mask 0x%x irq status 0x%x",
+					irq_mask, irq_status);
+				rc = -ETIMEDOUT;
+			}
+		} else {
+			rc = 0;
+		}
+		spin_unlock_irqrestore(&cre_top_info.hw_lock, flags);
+	} else {
+		rc = 0;
+	}
+
+	/* enable interrupt mask */
+	cam_io_w_mb(top_reg_val->irq_mask,
+		cre_hw_info->top_reg_offset->base + top_reg->irq_mask);
+
+	mutex_unlock(&cre_top_info.cre_hw_mutex);
+	return rc;
+}
+
+static int cam_cre_top_release(struct cam_cre_hw *cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+
+	if (ctx_id < 0) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	cre_top_info.top_ctx[ctx_id].cre_acquire = NULL;
+
+	return rc;
+}
+
+static int cam_cre_top_acquire(struct cam_cre_hw *cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	struct cam_cre_dev_acquire *cre_dev_acquire = data;
+
+	if (ctx_id < 0 || !data) {
+		CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, data);
+		return -EINVAL;
+	}
+
+	cre_top_info.top_ctx[ctx_id].cre_acquire = cre_dev_acquire->cre_acquire;
+	cre_dev_acquire->cre_top = &cre_top_info;
+
+	return rc;
+}
+
+static int cam_cre_top_init(struct cam_cre_hw *cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	struct cam_cre_top_reg *top_reg;
+	struct cam_cre_top_reg_val *top_reg_val;
+	struct cam_cre_dev_init *dev_init = data;
+	uint32_t irq_mask, irq_status;
+	unsigned long flags;
+
+	if (!cre_hw_info) {
+		CAM_ERR(CAM_CRE, "Invalid cre_hw_info");
+		return -EINVAL;
+	}
+
+	top_reg = cre_hw_info->top_reg_offset;
+	top_reg_val = cre_hw_info->top_reg_val;
+
+	top_reg->base = dev_init->core_info->cre_hw_info->cre_top_base;
+
+	mutex_init(&cre_top_info.cre_hw_mutex);
+	/* CRE SW RESET */
+	init_completion(&cre_top_info.reset_complete);
+	init_completion(&cre_top_info.idle_done);
+
+	/* enable interrupt mask */
+	cam_io_w_mb(top_reg_val->irq_mask,
+		cre_hw_info->top_reg_offset->base + top_reg->irq_mask);
+	cam_io_w_mb(top_reg_val->sw_reset_cmd,
+		cre_hw_info->top_reg_offset->base + top_reg->reset_cmd);
+
+	rc = wait_for_completion_timeout(
+			&cre_top_info.reset_complete,
+			msecs_to_jiffies(60));
+
+	if (!rc || rc < 0) {
+		spin_lock_irqsave(&cre_top_info.hw_lock, flags);
+		if (!completion_done(&cre_top_info.reset_complete)) {
+			CAM_DBG(CAM_CRE,
+				"IRQ delayed, checking the status registers");
+			irq_mask = cam_io_r_mb(cre_hw_info->top_reg_offset->base +
+				top_reg->irq_mask);
+			irq_status = cam_io_r_mb(cre_hw_info->top_reg_offset->base +
+				top_reg->irq_status);
+			if (irq_status & top_reg_val->rst_done) {
+				CAM_DBG(CAM_CRE, "cre reset done");
+				cam_io_w_mb(irq_status,
+					top_reg->base + top_reg->irq_clear);
+				cam_io_w_mb(top_reg_val->irq_cmd_clear,
+					top_reg->base + top_reg->irq_cmd);
+			} else {
+				CAM_ERR(CAM_CRE,
+					"irq mask 0x%x irq status 0x%x",
+					irq_mask, irq_status);
+				rc = -ETIMEDOUT;
+			}
+		} else {
+			CAM_DBG(CAM_CRE, "reset done");
+			rc = 0;
+		}
+		spin_unlock_irqrestore(&cre_top_info.hw_lock, flags);
+	} else {
+		rc = 0;
+	}
+	/* enable interrupt mask */
+	cam_io_w_mb(top_reg_val->irq_mask,
+		cre_hw_info->top_reg_offset->base + top_reg->irq_mask);
+	return rc;
+}
+
+static int cam_cre_top_probe(struct cam_cre_hw *cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+
+	if (!cre_hw_info) {
+		CAM_ERR(CAM_CRE, "Invalid cre_hw_info");
+		return -EINVAL;
+	}
+
+	cre_top_info.cre_hw_info = cre_hw_info;
+	spin_lock_init(&cre_top_info.hw_lock);
+
+	return rc;
+}
+
+static int cam_cre_top_isr(struct cam_cre_hw *cre_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	uint32_t irq_status;
+	struct cam_cre_top_reg *top_reg;
+	struct cam_cre_top_reg_val *top_reg_val;
+	struct cam_cre_irq_data *irq_data = data;
+
+	if (!cre_hw_info) {
+		CAM_ERR(CAM_CRE, "Invalid cre_hw_info");
+		return -EINVAL;
+	}
+
+	top_reg = cre_hw_info->top_reg_offset;
+	top_reg_val = cre_hw_info->top_reg_val;
+
+	spin_lock(&cre_top_info.hw_lock);
+	/* Read and Clear Top Interrupt status */
+	irq_status = cam_io_r_mb(top_reg->base + top_reg->irq_status);
+	cam_io_w_mb(irq_status,
+		top_reg->base + top_reg->irq_clear);
+
+	cam_io_w_mb(top_reg_val->irq_cmd_clear,
+		top_reg->base + top_reg->irq_cmd);
+
+	if (irq_status & top_reg_val->rst_done) {
+		CAM_DBG(CAM_CRE, "cre reset done");
+		complete(&cre_top_info.reset_complete);
+	}
+
+	if (irq_status & top_reg_val->idle) {
+		CAM_DBG(CAM_CRE, "cre idle IRQ, can configure new settings");
+		complete(&cre_top_info.idle_done);
+	}
+
+	if (irq_status & top_reg_val->we_done)
+		CAM_DBG(CAM_CRE, "Received Write Engine IRQ");
+
+	if (irq_status & top_reg_val->fe_done)
+		CAM_DBG(CAM_CRE, "Received Fetch Engine IRQ");
+
+	irq_data->top_irq_status = irq_status;
+	spin_unlock(&cre_top_info.hw_lock);
+
+	return rc;
+}
+
+int cam_cre_top_process(struct cam_cre_hw *cre_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data)
+{
+	int rc = 0;
+
+	switch (cmd_id) {
+	case CRE_HW_PROBE:
+		CAM_DBG(CAM_CRE, "CRE_HW_PROBE: E");
+		rc = cam_cre_top_probe(cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_PROBE: X");
+		break;
+	case CRE_HW_INIT:
+		CAM_DBG(CAM_CRE, "CRE_HW_INIT: E");
+		rc = cam_cre_top_init(cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_INIT: X");
+		break;
+	case CRE_HW_DEINIT:
+		break;
+	case CRE_HW_ACQUIRE:
+		CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: E");
+		rc = cam_cre_top_acquire(cre_hw_info, ctx_id, data);
+		CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: X");
+		break;
+	case CRE_HW_PREPARE:
+		break;
+	case CRE_HW_RELEASE:
+		rc = cam_cre_top_release(cre_hw_info, ctx_id, data);
+		break;
+	case CRE_HW_REG_SET_UPDATE:
+		break;
+	case CRE_HW_START:
+		break;
+	case CRE_HW_STOP:
+		break;
+	case CRE_HW_FLUSH:
+		break;
+	case CRE_HW_ISR:
+		rc = cam_cre_top_isr(cre_hw_info, ctx_id, data);
+		break;
+	case CRE_HW_RESET:
+		rc = cam_cre_top_reset(cre_hw_info, ctx_id, 0);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}

+ 46 - 0
drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.h

@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CRE_TOP_H
+#define CRE_TOP_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_cre.h>
+#include "cre_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_soc_util.h"
+#include "cam_context.h"
+#include "cam_cre_context.h"
+#include "cam_cre_hw_mgr.h"
+
+/**
+ * struct cre_top_ctx
+ *
+ * @cre_acquire: CRE acquire info
+ */
+struct cre_top_ctx {
+	struct cam_cre_acquire_dev_info *cre_acquire;
+};
+
+/**
+ * struct cre_top
+ *
+ * @cre_hw_info:    CRE hardware info
+ * @top_ctx:        CRE top context
+ * @reset_complete: Reset complete flag
+ * @cre_mutex:      CRE hardware mutex
+ * @hw_lock:        CRE hardware spinlock
+ */
+struct cre_top {
+	struct cam_cre_hw *cre_hw_info;
+	struct cre_top_ctx top_ctx[CAM_CRE_CTX_MAX];
+	struct completion reset_complete;
+	struct completion idle_done;
+	struct mutex      cre_hw_mutex;
+	spinlock_t        hw_lock;
+};
+#endif /* CRE_TOP_H */

+ 1 - 0
drivers/cam_utils/cam_debug_util.h

@@ -40,6 +40,7 @@
 #define CAM_OPE        (1 << 28)
 #define CAM_IO_ACCESS  (1 << 29)
 #define CAM_SFE        (1 << 30)
+#define CAM_CRE        (1 << 31)
 
 /* Log level types */
 #define CAM_TYPE_TRACE      (1 << 0)

+ 13 - 0
drivers/camera_main.c

@@ -39,6 +39,9 @@
 #include "jpeg_enc_core.h"
 #include "cam_jpeg_dev.h"
 
+#include "cre_core.h"
+#include "cam_cre_dev.h"
+
 #include "cam_fd_hw_intf.h"
 #include "cam_fd_dev.h"
 
@@ -131,6 +134,11 @@ static const struct camera_submodule_component camera_ope[] = {
 #endif
 };
 
+static const struct camera_submodule_component camera_cre[] = {
+#ifdef CONFIG_SPECTRA_CRE
+	{&cam_cre_init_module, &cam_cre_exit_module},
+#endif
+};
 static const struct camera_submodule_component camera_jpeg[] = {
 #ifdef CONFIG_SPECTRA_JPEG
 	{&cam_jpeg_enc_init_module, &cam_jpeg_enc_exit_module},
@@ -207,6 +215,11 @@ static const struct camera_submodule submodule_table[] = {
 		.num_component = ARRAY_SIZE(camera_lrme),
 		.component = camera_lrme,
 	},
+	{
+		.name = "Camera CRE",
+		.num_component = ARRAY_SIZE(camera_cre),
+		.component = camera_cre,
+	},
 	{
 		.name = "Camera CUSTOM",
 		.num_component = ARRAY_SIZE(camera_custom),

+ 6 - 0
drivers/camera_main.h

@@ -51,6 +51,9 @@ extern struct platform_driver cam_icp_driver;
 extern struct platform_driver cam_ope_driver;
 extern struct platform_driver cam_ope_subdev_driver;
 #endif
+#ifdef CONFIG_SPECTRA_CRE
+extern struct platform_driver cam_cre_driver;
+#endif
 #ifdef CONFIG_SPECTRA_JPEG
 extern struct platform_driver cam_jpeg_enc_driver;
 extern struct platform_driver cam_jpeg_dma_driver;
@@ -123,6 +126,9 @@ static struct platform_driver *const cam_component_drivers[] = {
 	&cam_jpeg_dma_driver,
 	&jpeg_driver,
 #endif
+#ifdef CONFIG_SPECTRA_CRE
+	&cam_cre_driver,
+#endif
 #ifdef CONFIG_SPECTRA_FD
 	&cam_fd_hw_driver,
 	&cam_fd_driver,

+ 46 - 70
include/uapi/camera/media/cam_cre.h

@@ -13,7 +13,7 @@
 
 /* CRE HW TYPE */
 #define CAM_CRE_HW_TYPE_CRE       0x1
-#define CAM_CRE_HW_TYPE_MAX       0x2
+#define CAM_CRE_HW_TYPE_MAX       0x1
 
 /* packet opcode types */
 #define CAM_CRE_OPCODE_CONFIG     0x1
@@ -26,50 +26,17 @@
 #define CAM_CRE_OUTPUT_IMAGE      0x1
 #define CAM_CRE_OUTPUT_IMAGES_MAX (CAM_CRE_OUTPUT_IMAGE + 1)
 
+#define CAM_CRE_MAX_PLANES        0x2
+#define CRE_MAX_BATCH_SIZE        0x10
+
 /* definitions needed for cre aquire device */
 #define CAM_CRE_DEV_TYPE_NRT      0x1
 #define CAM_CRE_DEV_TYPE_RT       0x2
 #define CAM_CRE_DEV_TYPE_MAX      0x3
 
+#define CAM_CRE_CMD_META_GENERIC_BLOB     0x1
 /* Clock blob */
-#define CAM_CRE_CMD_GENERIC_BLOB_CLK      0x1
-
-#define CAM_CRE_MAX_PLANES                0x2
-#define CRE_MAX_BATCH_SIZE                0x10
-#define CAM_CRE_MAX_IO_BUFS               0x3
-
-#define CAM_CRE_ACQUIRE_FLAG_SECURE  0x1
-
-/**
- * struct cam_cre_io_buf_info - CRE IO buffers meta
- *
- * @direction:     Direction of a buffer of a port(Input/Output)
- * @res_id:        Resource ID
- * @num_planes:    Number of planes
- * @width:         Height of a plane buffer
- * @height:        Height of a plane buffer
- * @stride:        Plane stride
- * @packer_format: Format
- * @alignment:     Alignment
- * @reserved:      Reserved field 0
- * @reserved_1:    Reserved field 1
- * @reserved_2:    Reserved field 2
- *
- */
-struct cam_cre_io_buf_info {
-	__u32 direction;
-	__u32 res_id;
-	__u32 num_planes;
-	__u32 width;
-	__u32 height;
-	__u32 stride;
-	__u32 fence;
-	__u32 packer_format;
-	__u32 alignment;
-	__u32 reserved;
-	__u32 reserved_1;
-	__u32 reserved_2;
-};
+#define CAM_CRE_CMD_GENERIC_BLOB_CLK_V2   0x1
 
 /**
  * struct cam_cre_hw_ver - Device information for particular hw type
@@ -77,13 +44,9 @@ struct cam_cre_io_buf_info {
  * This is used to get device version info of CRE
  * from hardware and use this info in CAM_QUERY_CAP IOCTL
  *
- * @hw_type:   Hardware type
- * @reserved:  Reserved field
  * @hw_ver:    Major, minor and incr values of a device version
  */
 struct cam_cre_hw_ver {
-	__u32  hw_type;
-	__u32  reserved;
 	struct cam_hw_version hw_ver;
 };
 
@@ -103,29 +66,29 @@ struct cam_cre_query_cap_cmd {
 };
 
 /**
- * struct cam_cre_clk_bw_request
+ * struct cam_cre_io_buf_info - CRE IO buffers meta
  *
- * @budget_ns: Time required to process frame
- * @frame_cycles: Frame cycles needed to process the frame
- * @rt_flag: Flag to indicate real time stream
- * @uncompressed_bw: Bandwidth required to process frame
- * @compressed_bw: Compressed bandwidth to process frame
+ * @direction:     Direction of a buffer of a port(Input/Output)
+ * @res_id:        Resource ID
+ * @num_planes:    Number of planes
+ * @width:         Height of a plane buffer
+ * @height:        Height of a plane buffer
+ * @stride:        Plane stride
+ * @format:        unpacker format for FE, packer format for WE
+ * @alignment:     Alignment
  * @reserved:      Reserved field 0
- * @reserved_1:    Reserved field 1
- * @reserved_2:    Reserved field 2
- * @num_path:      Number of AXI Paths
  */
-struct cam_cre_clk_bw_request {
-	__u64  budget_ns;
-	__u32  frame_cycles;
-	__u32  rt_flag;
-	__u64  uncompressed_bw;
-	__u64  compressed_bw;
-	__u32  num_paths;
-	__u32  reserved;
-	__u32  reserved_1;
-	__u32  reserved_2;
-	struct cam_axi_per_path_bw_vote axi_path[1];
+struct cam_cre_io_buf_info {
+	__u32 direction;
+	__u32 res_id;
+	__u32 num_planes;
+	__u32 width;
+	__u32 height;
+	__u32 stride;
+	__u32 fence;
+	__u32 format;
+	__u32 alignment;
+	__u32 reserved;
 };
 
 /**
@@ -133,28 +96,41 @@ struct cam_cre_clk_bw_request {
  *
  * @dev_type:      NRT/RT Acquire
  * @dev_name:      Device name (CRE)
- * @acquire_flag:  Tells if CRE will process the secure buff or not.
+ * @secure_mode:   Tells if CRE will process the secure buff or not.
  * @batch_size:    Batch size
  * @num_in_res:    Number of In resources
  * @num_out_res:   Number of Out resources
  * @reserved:      Reserved field 0
- * @reserved_1:    Reserved field 1
- * @reserved_2:    Reserved field 2
  * @in_res:        In resource info
  * @in_res:        Out resource info
  */
 struct cam_cre_acquire_dev_info {
-	__u32                      dev_type;
 	char                       dev_name[CAM_CRE_DEV_NAME_SIZE];
-	__u32                      acquire_flag;
+	__u32                      dev_type;
+	__u32                      secure_mode;
 	__u32                      batch_size;
 	__u32                      num_in_res;
 	__u32                      num_out_res;
 	__u32                      reserved;
-	__u32                      reserved_1;
-	__u32                      reserved_2;
 	struct cam_cre_io_buf_info in_res[CAM_CRE_INPUT_IMAGES_MAX];
 	struct cam_cre_io_buf_info out_res[CAM_CRE_OUTPUT_IMAGES_MAX];
 }__attribute__((__packed__));
 
+/**
+ * struct cre_clk_bw_request_v2
+ *
+ * @budget_ns: Time required to process frame
+ * @frame_cycles: Frame cycles needed to process the frame
+ * @rt_flag: Flag to indicate real time stream
+ * @reserved:      Reserved field 0
+ * @num_path:      Number of AXI Paths
+ */
+struct cre_clk_bw_request_v2 {
+	__u64  budget_ns;
+	__u32  frame_cycles;
+	__u32  rt_flag;
+	__u32  reserved;
+	__u32  num_paths;
+	struct cam_axi_per_path_bw_vote axi_path[1];
+};
 #endif /* __UAPI_CAM_CRE_H__ */