浏览代码

msm: camera: ope: Add support to OPE driver

OPE is camera offline engine, support is added
to enable camera OPE hardware.

CRs-Fixed: 2594541
Change-Id: I65c69f5763d05abf265b645b09c95c55fb290182
Signed-off-by: Suresh Vankadara <[email protected]>
Signed-off-by: Ravikishore Pampana <[email protected]>
Signed-off-by: Trishansh Bhardwaj <[email protected]>
Trishansh Bhardwaj 5 年之前
父节点
当前提交
12d9311463

+ 11 - 0
drivers/Makefile

@@ -172,6 +172,17 @@ camera-$(CONFIG_SPECTRA_CUSTOM) += \
 	cam_cust/cam_custom_dev.o \
 	cam_cust/cam_custom_context.o
 
+camera-$(CONFIG_SPECTRA_OPE) += \
+	cam_ope/cam_ope_subdev.o \
+	cam_ope/cam_ope_context.o \
+	cam_ope/ope_hw_mgr/cam_ope_hw_mgr.o \
+	cam_ope/ope_hw_mgr/ope_hw/ope_dev.o \
+	cam_ope/ope_hw_mgr/ope_hw/ope_soc.o \
+	cam_ope/ope_hw_mgr/ope_hw/ope_core.o \
+	cam_ope/ope_hw_mgr/ope_hw/top/ope_top.o \
+	cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.o\
+	cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.o
+
 camera-y += camera_main.o
 
 obj-$(CONFIG_SPECTRA_CAMERA) += camera.o

+ 273 - 0
drivers/cam_ope/cam_ope_context.c

@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/cam_sync.h>
+#include <media/cam_defs.h>
+#include <media/cam_ope.h>
+#include "cam_sync_api.h"
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_ope_context.h"
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+
+static const char ope_dev_name[] = "cam-ope";
+
+static int cam_ope_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info)
+{
+	struct cam_context *ctx = (struct cam_context *)data;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp = NULL;
+	struct cam_hw_mgr_dump_pf_data  *pf_dbg_entry = NULL;
+	int rc = 0;
+	bool b_mem_found = false;
+
+	if (!ctx) {
+		CAM_ERR(CAM_OPE, "Invalid ctx");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state < CAM_CTX_ACQUIRED || ctx->state > CAM_CTX_ACTIVATED) {
+		CAM_ERR(CAM_ICP, "Invalid state icp ctx %d state %d",
+			ctx->ctx_id, ctx->state);
+		goto end;
+	}
+
+	CAM_INFO(CAM_OPE, "iommu fault for ope ctx %d state %d",
+		ctx->ctx_id, ctx->state);
+
+	list_for_each_entry_safe(req, req_temp,
+			&ctx->active_req_list, list) {
+		pf_dbg_entry = &(req->pf_data);
+		CAM_INFO(CAM_OPE, "req_id : %lld", req->request_id);
+
+		rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+			iova, buf_info, &b_mem_found);
+		if (rc)
+			CAM_ERR(CAM_OPE, "Failed to dump pf info");
+
+		if (b_mem_found)
+			CAM_ERR(CAM_OPE, "Found page fault in req %lld %d",
+				req->request_id, rc);
+	}
+
+end:
+	mutex_unlock(&ctx->ctx_mutex);
+	return rc;
+}
+
+static int __cam_ope_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (!rc) {
+		ctx->state = CAM_CTX_ACQUIRED;
+		trace_cam_context_state("OPE", ctx);
+	}
+
+	return rc;
+}
+
+static int __cam_ope_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_OPE, "Unable to release device");
+
+	ctx->state = CAM_CTX_AVAILABLE;
+	trace_cam_context_state("OPE", ctx);
+	return rc;
+}
+
+static int __cam_ope_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (!rc) {
+		ctx->state = CAM_CTX_READY;
+		trace_cam_context_state("OPE", ctx);
+	}
+
+	return rc;
+}
+
+static int __cam_ope_flush_dev_in_ready(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_OPE, "Failed to flush device");
+
+	return rc;
+}
+
+static int __cam_ope_config_dev_in_ready(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+	size_t len;
+	uintptr_t packet_addr;
+
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		&packet_addr, &len);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "[%s][%d] Can not get packet address",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+
+	if (rc)
+		CAM_ERR(CAM_OPE, "Failed to prepare device");
+
+	return rc;
+}
+
+static int __cam_ope_stop_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc)
+		CAM_ERR(CAM_OPE, "Failed to stop device");
+
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("OPE", ctx);
+	return rc;
+}
+
+static int __cam_ope_release_dev_in_ready(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = __cam_ope_stop_dev_in_ready(ctx, NULL);
+	if (rc)
+		CAM_ERR(CAM_OPE, "Failed to stop device");
+
+	rc = __cam_ope_release_dev_in_acquired(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_OPE, "Failed to release device");
+
+	return rc;
+}
+
+static int __cam_ope_handle_buf_done_in_ready(void *ctx,
+	uint32_t evt_id, void *done)
+{
+	return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+static struct cam_ctx_ops
+	cam_ope_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_ope_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_ope_release_dev_in_acquired,
+			.start_dev = __cam_ope_start_dev_in_acquired,
+			.config_dev = __cam_ope_config_dev_in_ready,
+			.flush_dev = __cam_ope_flush_dev_in_ready,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_ope_handle_buf_done_in_ready,
+		.pagefault_ops = cam_ope_context_dump_active_request,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_ope_stop_dev_in_ready,
+			.release_dev = __cam_ope_release_dev_in_ready,
+			.config_dev = __cam_ope_config_dev_in_ready,
+			.flush_dev = __cam_ope_flush_dev_in_ready,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_ope_handle_buf_done_in_ready,
+		.pagefault_ops = cam_ope_context_dump_active_request,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+		.pagefault_ops = cam_ope_context_dump_active_request,
+	},
+};
+
+int cam_ope_context_init(struct cam_ope_context *ctx,
+	struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id)
+{
+	int rc;
+
+	if ((!ctx) || (!ctx->base) || (!hw_intf)) {
+		CAM_ERR(CAM_OPE, "Invalid params: %pK %pK", ctx, hw_intf);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	rc = cam_context_init(ctx->base, ope_dev_name, CAM_OPE, ctx_id,
+		NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "Camera Context Base init failed");
+		goto err;
+	}
+
+	ctx->base->state_machine = cam_ope_ctx_state_machine;
+	ctx->base->ctx_priv = ctx;
+	ctx->ctxt_to_hw_map = NULL;
+
+err:
+	return rc;
+}
+
+int cam_ope_context_deinit(struct cam_ope_context *ctx)
+{
+	if ((!ctx) || (!ctx->base)) {
+		CAM_ERR(CAM_OPE, "Invalid params: %pK", ctx);
+		return -EINVAL;
+	}
+
+	cam_context_deinit(ctx->base);
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
+
+

+ 44 - 0
drivers/cam_ope/cam_ope_context.h

@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_OPE_CONTEXT_H_
+#define _CAM_OPE_CONTEXT_H_
+
+#include "cam_context.h"
+
+#define OPE_CTX_MAX 32
+
+/**
+ * struct cam_ope_context - ope context
+ * @base:           ope context object
+ * @state_machine:  state machine for OPE context
+ * @req_base:       common request structure
+ * @state:          ope context state
+ * @ctxt_to_hw_map: context to FW handle mapping
+ */
+struct cam_ope_context {
+	struct cam_context *base;
+	struct cam_ctx_ops *state_machine;
+	struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+	uint32_t state;
+	void *ctxt_to_hw_map;
+};
+
+/**
+ * cam_ope_context_init() - OPE context init
+ * @ctx:     Pointer to context
+ * @hw_intf: Pointer to OPE hardware interface
+ * @ctx_id:  ID for this context
+ */
+int cam_ope_context_init(struct cam_ope_context *ctx,
+	struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id);
+
+/**
+ * cam_ope_context_deinit() - OPE context deinit
+ * @ctx: Pointer to context
+ */
+int cam_ope_context_deinit(struct cam_ope_context *ctx);
+
+#endif /* _CAM_OPE_CONTEXT_H_ */

+ 277 - 0
drivers/cam_ope/cam_ope_subdev.c

@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_defs.h>
+#include <media/cam_ope.h>
+#include "cam_req_mgr_dev.h"
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_ope_context.h"
+#include "cam_ope_hw_mgr_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
+
+#define OPE_DEV_NAME        "cam-ope"
+
+struct cam_ope_subdev {
+	struct cam_subdev sd;
+	struct cam_node *node;
+	struct cam_context ctx[OPE_CTX_MAX];
+	struct cam_ope_context ctx_ope[OPE_CTX_MAX];
+	struct mutex ope_lock;
+	int32_t open_cnt;
+	int32_t reserved;
+};
+
+static struct cam_ope_subdev g_ope_dev;
+
+static void cam_ope_dev_iommu_fault_handler(
+	struct iommu_domain *domain, struct device *dev, unsigned long iova,
+	int flags, void *token, uint32_t buf_info)
+{
+	int i = 0;
+	struct cam_node *node = NULL;
+
+	if (!token) {
+		CAM_ERR(CAM_OPE, "invalid token in page handler cb");
+		return;
+	}
+
+	node = (struct cam_node *)token;
+
+	for (i = 0; i < node->ctx_size; i++)
+		cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+			buf_info);
+}
+
+static int cam_ope_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	mutex_lock(&g_ope_dev.ope_lock);
+	if (g_ope_dev.open_cnt >= 1) {
+		CAM_ERR(CAM_OPE, "OPE subdev is already opened");
+		rc = -EALREADY;
+		goto end;
+	}
+
+	if (!node) {
+		CAM_ERR(CAM_OPE, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hw_mgr_intf = &node->hw_mgr_intf;
+	rc = hw_mgr_intf->hw_open(hw_mgr_intf->hw_mgr_priv, NULL);
+	if (rc < 0) {
+		CAM_ERR(CAM_OPE, "OPE HW open failed: %d", rc);
+		goto end;
+	}
+	g_ope_dev.open_cnt++;
+	CAM_DBG(CAM_OPE, "OPE HW open success: %d", rc);
+end:
+	mutex_unlock(&g_ope_dev.ope_lock);
+	return rc;
+}
+
+static int cam_ope_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&g_ope_dev.ope_lock);
+	if (g_ope_dev.open_cnt <= 0) {
+		CAM_DBG(CAM_OPE, "OPE subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
+	g_ope_dev.open_cnt--;
+	if (!node) {
+		CAM_ERR(CAM_OPE, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hw_mgr_intf = &node->hw_mgr_intf;
+	if (!hw_mgr_intf) {
+		CAM_ERR(CAM_OPE, "hw_mgr_intf is not initialized");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = cam_node_shutdown(node);
+	if (rc < 0) {
+		CAM_ERR(CAM_OPE, "HW close failed");
+		goto end;
+	}
+	CAM_DBG(CAM_OPE, "OPE HW close success: %d", rc);
+
+end:
+	mutex_unlock(&g_ope_dev.ope_lock);
+	return rc;
+}
+
+const struct v4l2_subdev_internal_ops cam_ope_subdev_internal_ops = {
+	.open = cam_ope_subdev_open,
+	.close = cam_ope_subdev_close,
+};
+
+static int cam_ope_subdev_probe(struct platform_device *pdev)
+{
+	int rc = 0, i = 0;
+	struct cam_node *node;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+	int iommu_hdl = -1;
+
+	CAM_DBG(CAM_OPE, "OPE subdev probe start");
+	if (!pdev) {
+		CAM_ERR(CAM_OPE, "pdev is NULL");
+		return -EINVAL;
+	}
+
+	g_ope_dev.sd.pdev = pdev;
+	g_ope_dev.sd.internal_ops = &cam_ope_subdev_internal_ops;
+	rc = cam_subdev_probe(&g_ope_dev.sd, pdev, OPE_DEV_NAME,
+		CAM_OPE_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "OPE cam_subdev_probe failed:%d", rc);
+		return rc;
+	}
+
+	node = (struct cam_node *) g_ope_dev.sd.token;
+
+	hw_mgr_intf = kzalloc(sizeof(*hw_mgr_intf), GFP_KERNEL);
+	if (!hw_mgr_intf) {
+		rc = -EINVAL;
+		goto hw_alloc_fail;
+	}
+
+	rc = cam_ope_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf,
+		&iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "OPE HW manager init failed: %d", rc);
+		goto hw_init_fail;
+	}
+
+	for (i = 0; i < OPE_CTX_MAX; i++) {
+		g_ope_dev.ctx_ope[i].base = &g_ope_dev.ctx[i];
+		rc = cam_ope_context_init(&g_ope_dev.ctx_ope[i],
+			hw_mgr_intf, i);
+		if (rc) {
+			CAM_ERR(CAM_OPE, "OPE context init failed");
+			goto ctx_fail;
+		}
+	}
+
+	rc = cam_node_init(node, hw_mgr_intf, g_ope_dev.ctx,
+		OPE_CTX_MAX, OPE_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "OPE node init failed");
+		goto ctx_fail;
+	}
+
+	cam_smmu_set_client_page_fault_handler(iommu_hdl,
+		cam_ope_dev_iommu_fault_handler, node);
+
+	g_ope_dev.open_cnt = 0;
+	mutex_init(&g_ope_dev.ope_lock);
+
+	CAM_DBG(CAM_OPE, "OPE subdev probe complete");
+
+	return rc;
+
+ctx_fail:
+	for (--i; i >= 0; i--)
+		cam_ope_context_deinit(&g_ope_dev.ctx_ope[i]);
+hw_init_fail:
+	kfree(hw_mgr_intf);
+hw_alloc_fail:
+	cam_subdev_remove(&g_ope_dev.sd);
+	return rc;
+}
+
+static int cam_ope_subdev_remove(struct platform_device *pdev)
+{
+	int i;
+	struct v4l2_subdev *sd;
+	struct cam_subdev *subdev;
+
+	if (!pdev) {
+		CAM_ERR(CAM_OPE, "pdev is NULL");
+		return -ENODEV;
+	}
+
+	sd = platform_get_drvdata(pdev);
+	if (!sd) {
+		CAM_ERR(CAM_OPE, "V4l2 subdev is NULL");
+		return -ENODEV;
+	}
+
+	subdev = v4l2_get_subdevdata(sd);
+	if (!subdev) {
+		CAM_ERR(CAM_OPE, "cam subdev is NULL");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < OPE_CTX_MAX; i++)
+		cam_ope_context_deinit(&g_ope_dev.ctx_ope[i]);
+	cam_node_deinit(g_ope_dev.node);
+	cam_subdev_remove(&g_ope_dev.sd);
+	mutex_destroy(&g_ope_dev.ope_lock);
+
+	return 0;
+}
+
+static const struct of_device_id cam_ope_dt_match[] = {
+	{.compatible = "qcom,cam-ope"},
+	{}
+};
+
+
+static struct platform_driver cam_ope_driver = {
+	.probe = cam_ope_subdev_probe,
+	.remove = cam_ope_subdev_remove,
+	.driver = {
+		.name = "cam_ope",
+		.of_match_table = cam_ope_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+int cam_ope_subdev_init_module(void)
+{
+	return platform_driver_register(&cam_ope_driver);
+}
+
+void cam_ope_subdev_exit_module(void)
+{
+	platform_driver_unregister(&cam_ope_driver);
+}
+
+MODULE_DESCRIPTION("MSM OPE driver");
+MODULE_LICENSE("GPL v2");
+

+ 2248 - 0
drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c

@@ -0,0 +1,2248 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_ope.h>
+#include <media/cam_cpas.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_ope_hw_mgr_intf.h"
+#include "cam_ope_hw_mgr.h"
+#include "ope_hw.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_debug_util.h"
+#include "cam_soc_util.h"
+#include "cam_trace.h"
+#include "cam_cpas_api.h"
+#include "cam_common_util.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm.h"
+#include "ope_dev_intf.h"
+
+static struct cam_ope_hw_mgr *ope_hw_mgr;
+
+static int cam_ope_mgr_get_rsc_idx(struct cam_ope_ctx *ctx_data,
+	struct ope_io_buf_info *in_io_buf)
+{
+	int k = 0;
+	int rsc_idx = -EINVAL;
+
+	if (in_io_buf->direction == CAM_BUF_INPUT) {
+		for (k = 0; k < OPE_IN_RES_MAX; k++) {
+			if (ctx_data->ope_acquire.in_res[k].res_id ==
+				in_io_buf->resource_type)
+				break;
+		}
+		if (k == OPE_IN_RES_MAX) {
+			CAM_ERR(CAM_OPE, "Invalid res_id %d",
+				in_io_buf->resource_type);
+			goto end;
+		}
+		rsc_idx = k;
+	} else if (in_io_buf->direction == CAM_BUF_OUTPUT) {
+		for (k = 0; k < OPE_OUT_RES_MAX; k++) {
+			if (ctx_data->ope_acquire.out_res[k].res_id ==
+				in_io_buf->resource_type)
+				break;
+		}
+		if (k == OPE_OUT_RES_MAX) {
+			CAM_ERR(CAM_OPE, "Invalid res_id %d",
+				in_io_buf->resource_type);
+			goto end;
+		}
+		rsc_idx = k;
+	}
+
+end:
+	return rsc_idx;
+}
+
+static int cam_ope_mgr_process_cmd(void *priv, void *data)
+{
+	int rc;
+	struct ope_cmd_work_data *task_data = NULL;
+	struct cam_ope_ctx *ctx_data;
+	struct cam_cdm_bl_request *cdm_cmd;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_OPE, "Invalid params%pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	ctx_data = priv;
+	task_data = (struct ope_cmd_work_data *)data;
+	cdm_cmd = task_data->data;
+
+	CAM_DBG(CAM_OPE, "cam_cdm_submit_bls: handle = %u",
+		ctx_data->ope_cdm.cdm_handle);
+	rc = cam_cdm_submit_bls(ctx_data->ope_cdm.cdm_handle, cdm_cmd);
+
+	if (!rc)
+		ctx_data->req_cnt++;
+	else
+		CAM_ERR(CAM_OPE, "submit failed for %lld", cdm_cmd->cookie);
+
+	return rc;
+}
+
+static int cam_ope_mgr_reset_hw(void)
+{
+	struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr;
+	int i, rc = 0;
+
+	for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RESET,
+			NULL, 0);
+		if (rc) {
+			CAM_ERR(CAM_OPE, "OPE Reset failed: %d", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_ope_req_timer_modify(struct cam_ope_ctx *ctx_data,
+	int32_t expires)
+{
+	if (ctx_data->req_watch_dog) {
+		CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+		crm_timer_modify(ctx_data->req_watch_dog, expires);
+	}
+	return 0;
+}
+
+static int cam_ope_req_timer_stop(struct cam_ope_ctx *ctx_data)
+{
+	if (ctx_data->req_watch_dog) {
+		CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+		crm_timer_exit(&ctx_data->req_watch_dog);
+		ctx_data->req_watch_dog = NULL;
+	}
+	return 0;
+}
+
+static int cam_ope_req_timer_reset(struct cam_ope_ctx *ctx_data)
+{
+	if (ctx_data && ctx_data->req_watch_dog)
+		crm_timer_reset(ctx_data->req_watch_dog);
+
+	return 0;
+}
+
+
+static int cam_ope_mgr_reapply_config(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_ope_ctx *ctx_data,
+	struct cam_ope_request *ope_req)
+{
+	int rc = 0;
+	uint64_t request_id = 0;
+	struct crm_workq_task *task;
+	struct ope_cmd_work_data *task_data;
+
+	request_id = ope_req->request_id;
+	CAM_DBG(CAM_OPE, "reapply req_id = %lld", request_id);
+
+	task = cam_req_mgr_workq_get_task(ope_hw_mgr->cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_OPE, "no empty task");
+		return -ENOMEM;
+	}
+
+	task_data = (struct ope_cmd_work_data *)task->payload;
+	task_data->data = (void *)ope_req->cdm_cmd;
+	task_data->req_id = request_id;
+	task_data->type = OPE_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_ope_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, ctx_data,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static bool cam_ope_is_pending_request(struct cam_ope_ctx *ctx_data)
+{
+	return !bitmap_empty(ctx_data->bitmap, CAM_CTX_REQ_MAX);
+}
+
+static int32_t cam_ope_process_request_timer(void *priv, void *data)
+{
+	struct ope_clk_work_data *task_data = (struct ope_clk_work_data *)data;
+	struct cam_ope_ctx *ctx_data = (struct cam_ope_ctx *)task_data->data;
+
+	if (cam_ope_is_pending_request(ctx_data)) {
+		CAM_DBG(CAM_OPE, "pending requests means, issue is with HW");
+		cam_cdm_handle_error(ctx_data->ope_cdm.cdm_handle);
+		cam_ope_req_timer_reset(ctx_data);
+	} else {
+		cam_ope_req_timer_modify(ctx_data, ~0);
+	}
+	return 0;
+}
+
+static void cam_ope_req_timer_cb(struct timer_list *timer_data)
+{
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct ope_clk_work_data *task_data;
+	struct cam_req_mgr_timer *timer =
+	container_of(timer_data, struct cam_req_mgr_timer, sys_timer);
+
+	spin_lock_irqsave(&ope_hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(ope_hw_mgr->timer_work);
+	if (!task) {
+		CAM_ERR(CAM_OPE, "no empty task");
+		spin_unlock_irqrestore(&ope_hw_mgr->hw_mgr_lock, flags);
+		return;
+	}
+
+	task_data = (struct ope_clk_work_data *)task->payload;
+	task_data->data = timer->parent;
+	task_data->type = OPE_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_ope_process_request_timer;
+	cam_req_mgr_workq_enqueue_task(task, ope_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&ope_hw_mgr->hw_mgr_lock, flags);
+}
+
+static int cam_ope_start_req_timer(struct cam_ope_ctx *ctx_data)
+{
+	int rc = 0;
+
+	rc = crm_timer_init(&ctx_data->req_watch_dog,
+		200, ctx_data, &cam_ope_req_timer_cb);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to start timer");
+
+	return rc;
+}
+
+static int cam_get_valid_ctx_id(void)
+{
+	struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr;
+	int i;
+
+
+	for (i = 0; i < OPE_CTX_MAX; i++) {
+		if (hw_mgr->ctx[i].ctx_state == OPE_CTX_STATE_ACQUIRED)
+			break;
+	}
+
+	return i;
+}
+
+
+static void cam_ope_ctx_cdm_callback(uint32_t handle, void *userdata,
+	enum cam_cdm_cb_status status, uint64_t cookie)
+{
+	int rc = 0;
+	struct cam_ope_ctx *ctx;
+	struct cam_ope_request *ope_req;
+	struct cam_hw_done_event_data buf_data;
+	bool flag = false;
+
+	if (!userdata) {
+		CAM_ERR(CAM_OPE, "Invalid ctx from CDM callback");
+		return;
+	}
+
+	CAM_DBG(CAM_FD, "CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
+		handle, userdata, status, cookie);
+
+	ctx = userdata;
+	ope_req = ctx->req_list[cookie];
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->ctx_state != OPE_CTX_STATE_ACQUIRED) {
+		CAM_DBG(CAM_OPE, "ctx %u is in %d state",
+			ctx->ctx_id, ctx->ctx_state);
+		mutex_unlock(&ctx->ctx_mutex);
+		return;
+	}
+
+	if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+		CAM_DBG(CAM_OPE,
+			"hdl=%x, udata=%pK, status=%d, cookie=%d  req_id=%llu ctx_id=%d",
+			handle, userdata, status, cookie,
+			ope_req->request_id, ctx->ctx_id);
+		cam_ope_req_timer_reset(ctx);
+	} else if (status == CAM_CDM_CB_STATUS_HW_RESUBMIT) {
+		CAM_INFO(CAM_OPE, "After reset of CDM and OPE, reapply req");
+		rc = cam_ope_mgr_reapply_config(ope_hw_mgr, ctx, ope_req);
+		if (!rc)
+			goto end;
+	} else {
+		CAM_ERR(CAM_OPE,
+			"CDM hdl=%x, udata=%pK, status=%d, cookie=%d req_id = %llu",
+			 handle, userdata, status, cookie, ope_req->request_id);
+		CAM_ERR(CAM_OPE, "Rst of CDM and OPE for error reqid = %lld",
+			ope_req->request_id);
+		rc = cam_ope_mgr_reset_hw();
+		flag = true;
+	}
+
+	ctx->req_cnt--;
+
+	buf_data.request_id = ope_req->request_id;
+	ope_req->request_id = 0;
+	kzfree(ctx->req_list[cookie]->cdm_cmd);
+	ctx->req_list[cookie]->cdm_cmd = NULL;
+	kzfree(ctx->req_list[cookie]);
+	ctx->req_list[cookie] = NULL;
+	clear_bit(cookie, ctx->bitmap);
+	ctx->ctxt_event_cb(ctx->context_priv, flag, &buf_data);
+
+end:
+	mutex_unlock(&ctx->ctx_mutex);
+}
+
+static int32_t cam_ope_mgr_process_msg(void *priv, void *data)
+{
+	struct ope_msg_work_data *task_data;
+	struct cam_ope_hw_mgr *hw_mgr;
+	struct cam_ope_ctx *ctx;
+	uint32_t irq_status;
+	int32_t ctx_id;
+	int rc = 0, i;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_OPE, "Invalid data");
+		return -EINVAL;
+	}
+
+	task_data = data;
+	hw_mgr = priv;
+	irq_status = task_data->irq_status;
+	ctx_id = cam_get_valid_ctx_id();
+	if (ctx_id < 0) {
+		CAM_ERR(CAM_OPE, "No valid context to handle error");
+		return ctx_id;
+	}
+
+	ctx = &hw_mgr->ctx[ctx_id];
+
+	/* Indicate about this error to CDM and reset OPE*/
+	rc = cam_cdm_handle_error(ctx->ope_cdm.cdm_handle);
+
+	for (i = 0; i < hw_mgr->num_ope; i++) {
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RESET,
+			NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_OPE, "OPE Dev acquire failed: %d", rc);
+	}
+
+	return rc;
+}
+
+int32_t cam_ope_hw_mgr_cb(uint32_t irq_status, void *data)
+{
+	int32_t rc = 0;
+	unsigned long flags;
+	struct cam_ope_hw_mgr *hw_mgr = data;
+	struct crm_workq_task *task;
+	struct ope_msg_work_data *task_data;
+
+	if (!data) {
+		CAM_ERR(CAM_OPE, "irq cb data is NULL");
+		return rc;
+	}
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(ope_hw_mgr->msg_work);
+	if (!task) {
+		CAM_ERR(CAM_OPE, "no empty task");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+		return -ENOMEM;
+	}
+
+	task_data = (struct ope_msg_work_data *)task->payload;
+	task_data->data = hw_mgr;
+	task_data->irq_status = irq_status;
+	task_data->type = OPE_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_ope_mgr_process_msg;
+	rc = cam_req_mgr_workq_enqueue_task(task, ope_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+	return rc;
+}
+
+static int cam_ope_mgr_create_kmd_buf(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_packet *packet,
+	struct cam_hw_prepare_update_args *prepare_args,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uintptr_t   ope_cmd_buf_addr)
+{
+	int i, rc = 0;
+	struct cam_ope_dev_prepare_req prepare_req;
+
+	prepare_req.ctx_data = ctx_data;
+	prepare_req.hw_mgr = hw_mgr;
+	prepare_req.packet = packet;
+	prepare_req.prepare_args = prepare_args;
+	prepare_req.req_idx = req_idx;
+	prepare_req.kmd_buf_offset = 0;
+	prepare_req.frame_process =
+		(struct ope_frame_process *)ope_cmd_buf_addr;
+
+	for (i = 0; i < ope_hw_mgr->num_ope; i++)
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->ope_dev_intf[i]->hw_priv,
+			OPE_HW_PREPARE, &prepare_req, sizeof(prepare_req));
+		if (rc) {
+			CAM_ERR(CAM_OPE, "OPE Dev prepare failed: %d", rc);
+			goto end;
+		}
+
+end:
+	return rc;
+}
+
+static int cam_ope_mgr_process_io_cfg(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_packet *packet,
+	struct cam_hw_prepare_update_args *prep_args,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx)
+{
+
+	int i, j = 0, k = 0, l, rc = 0;
+	struct ope_io_buf *io_buf;
+	int32_t sync_in_obj[CAM_MAX_IN_RES];
+	int32_t merged_sync_in_obj;
+	struct cam_ope_request *ope_request;
+
+	ope_request = ctx_data->req_list[req_idx];
+	prep_args->num_out_map_entries = 0;
+	prep_args->num_in_map_entries = 0;
+
+	ope_request = ctx_data->req_list[req_idx];
+	CAM_DBG(CAM_OPE, "E: req_idx = %u %x", req_idx, packet);
+
+	for (i = 0; i < ope_request->num_batch; i++) {
+		for (l = 0; l < ope_request->num_io_bufs[i]; l++) {
+			io_buf = &ope_request->io_buf[i][l];
+			if (io_buf->direction == CAM_BUF_INPUT) {
+				if (io_buf->fence != -1) {
+					sync_in_obj[j++] = io_buf->fence;
+					prep_args->num_in_map_entries++;
+				} else {
+					CAM_ERR(CAM_OPE, "Invalid fence %d %d",
+						io_buf->resource_type,
+						ope_request->request_id);
+				}
+			} else {
+				if (io_buf->fence != -1) {
+					prep_args->out_map_entries[k].sync_id =
+						io_buf->fence;
+					k++;
+					prep_args->num_out_map_entries++;
+				} else {
+					CAM_ERR(CAM_OPE, "Invalid fence %d %d",
+						io_buf->resource_type,
+						ope_request->request_id);
+				}
+			}
+			CAM_DBG(CAM_REQ,
+				"ctx_id: %u req_id: %llu dir[%d] %u, fence: %d",
+				ctx_data->ctx_id, packet->header.request_id, i,
+				io_buf->direction, io_buf->fence);
+			CAM_DBG(CAM_REQ, "rsc_type = %u fmt = %d",
+				io_buf->resource_type,
+				io_buf->format);
+		}
+	}
+
+	if (prep_args->num_in_map_entries > 1)
+		prep_args->num_in_map_entries =
+			cam_common_util_remove_duplicate_arr(
+			sync_in_obj, prep_args->num_in_map_entries);
+
+	if (prep_args->num_in_map_entries > 1) {
+		rc = cam_sync_merge(&sync_in_obj[0],
+			prep_args->num_in_map_entries, &merged_sync_in_obj);
+		if (rc) {
+			prep_args->num_out_map_entries = 0;
+			prep_args->num_in_map_entries = 0;
+			return rc;
+		}
+
+		ope_request->in_resource = merged_sync_in_obj;
+
+		prep_args->in_map_entries[0].sync_id = merged_sync_in_obj;
+		prep_args->num_in_map_entries = 1;
+		CAM_DBG(CAM_REQ, "ctx_id: %u req_id: %llu Merged Sync obj: %d",
+			ctx_data->ctx_id, packet->header.request_id,
+			merged_sync_in_obj);
+	} else if (prep_args->num_in_map_entries == 1) {
+		prep_args->in_map_entries[0].sync_id = sync_in_obj[0];
+		prep_args->num_in_map_entries = 1;
+		ope_request->in_resource = 0;
+		CAM_DBG(CAM_OPE, "fence = %d", sync_in_obj[0]);
+	} else {
+		CAM_DBG(CAM_OPE, "No input fences");
+		prep_args->num_in_map_entries = 0;
+		ope_request->in_resource = 0;
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static void cam_ope_mgr_print_stripe_info(uint32_t batch,
+	uint32_t io_buf, uint32_t plane, uint32_t stripe,
+	struct ope_stripe_io *stripe_info, uint64_t iova_addr)
+{
+	CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d: E",
+		batch, io_buf, plane, stripe);
+	CAM_DBG(CAM_OPE, "width: %d s_w: %u s_h: %u s_s: %u",
+		stripe_info->width, stripe_info->width,
+		stripe_info->height, stripe_info->stride);
+	CAM_DBG(CAM_OPE, "s_xinit = %u iova = %x s_loc = %u",
+		 stripe_info->s_location, stripe_info->x_init,
+		 iova_addr);
+	CAM_DBG(CAM_OPE, "s_off = %u s_format = %u s_len = %u",
+		stripe_info->offset, stripe_info->format,
+		stripe_info->len);
+	CAM_DBG(CAM_OPE, "s_align = %u s_pack = %u s_unpack = %u",
+		stripe_info->alignment, stripe_info->pack_format,
+		stripe_info->unpack_format);
+	CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d: E",
+		batch, io_buf, plane, stripe);
+}
+
+static int cam_ope_mgr_process_cmd_io_buf_req(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_packet *packet, struct cam_ope_ctx *ctx_data,
+	uintptr_t frame_process_addr, size_t length, uint32_t req_idx)
+{
+	int rc = 0;
+	int i, j, k, l;
+	uint64_t iova_addr;
+	size_t len;
+	struct ope_frame_process *in_frame_process;
+	struct ope_frame_set *in_frame_set;
+	struct ope_io_buf_info *in_io_buf;
+	struct ope_stripe_info *in_stripe_info;
+	struct cam_ope_request *ope_request;
+	struct ope_io_buf *io_buf;
+	struct ope_stripe_io *stripe_info;
+	uint32_t alignment;
+	uint32_t rsc_idx;
+	uint32_t pack_format;
+	uint32_t unpack_format;
+	struct ope_in_res_info *in_res;
+	struct ope_out_res_info *out_res;
+
+	in_frame_process = (struct ope_frame_process *)frame_process_addr;
+
+	ope_request = ctx_data->req_list[req_idx];
+	ope_request->num_batch = in_frame_process->batch_size;
+
+	for (i = 0; i < in_frame_process->batch_size; i++) {
+		in_frame_set = &in_frame_process->frame_set[i];
+		for (j = 0; j < in_frame_set->num_io_bufs; j++) {
+			in_io_buf = &in_frame_set->io_buf[j];
+			CAM_DBG(CAM_OPE, "i:%d j:%d dir: %x rsc: %u plane: %d",
+				i, j, in_io_buf->direction,
+				in_io_buf->resource_type,
+				in_io_buf->num_planes);
+			for (k = 0; k < in_io_buf->num_planes; k++) {
+				CAM_DBG(CAM_OPE, "i:%d j:%d k:%d numstripe: %d",
+					i, j, k, in_io_buf->num_stripes[k]);
+				CAM_DBG(CAM_OPE, "m_hdl: %d len: %d",
+					in_io_buf->mem_handle[k],
+					in_io_buf->length[k]);
+				for (l = 0; l < in_io_buf->num_stripes[k];
+					l++) {
+					in_stripe_info =
+						&in_io_buf->stripe_info[k][l];
+					CAM_DBG(CAM_OPE, "i:%d j:%d k:%d l:%d",
+						i, j, k, l);
+					CAM_DBG(CAM_OPE, "%d s_loc:%d w:%d",
+						in_stripe_info->x_init,
+						in_stripe_info->stripe_location,
+						in_stripe_info->width);
+					CAM_DBG(CAM_OPE,  "s_off: %d d_bus: %d",
+						in_stripe_info->offset,
+						in_stripe_info->disable_bus);
+				}
+			}
+		}
+	}
+
+	for (i = 0; i < ope_request->num_batch; i++) {
+		in_frame_set = &in_frame_process->frame_set[i];
+		ope_request->num_io_bufs[i] = in_frame_set->num_io_bufs;
+		if (in_frame_set->num_io_bufs > OPE_MAX_IO_BUFS) {
+			CAM_ERR(CAM_OPE, "Wrong number of io buffers: %d",
+				in_frame_set->num_io_bufs);
+			return -EINVAL;
+		}
+
+		for (j = 0; j < in_frame_set->num_io_bufs; j++) {
+			in_io_buf = &in_frame_set->io_buf[j];
+			io_buf = &ope_request->io_buf[i][j];
+			if (in_io_buf->num_planes > OPE_MAX_PLANES) {
+				CAM_ERR(CAM_OPE, "wrong number of planes: %u",
+					in_io_buf->num_planes);
+				return -EINVAL;
+			}
+
+			io_buf->num_planes = in_io_buf->num_planes;
+			io_buf->resource_type = in_io_buf->resource_type;
+			io_buf->direction = in_io_buf->direction;
+			io_buf->fence = in_io_buf->fence;
+			io_buf->format = in_io_buf->format;
+
+			rc = cam_ope_mgr_get_rsc_idx(ctx_data, in_io_buf);
+			if (rc < 0) {
+				CAM_ERR(CAM_OPE, "Invalid rsc idx = %d", rc);
+				return rc;
+			}
+			rsc_idx = rc;
+			if (in_io_buf->direction == CAM_BUF_INPUT) {
+				in_res =
+					&ctx_data->ope_acquire.in_res[rsc_idx];
+				alignment = in_res->alignment;
+				unpack_format = in_res->unpacker_format;
+				pack_format = 0;
+			} else if (in_io_buf->direction == CAM_BUF_OUTPUT) {
+				out_res =
+					&ctx_data->ope_acquire.out_res[rsc_idx];
+				alignment = out_res->alignment;
+				pack_format = out_res->packer_format;
+				unpack_format = 0;
+			}
+
+			CAM_DBG(CAM_OPE, "i:%d j:%d dir:%d rsc type:%d fmt:%d",
+				i, j, io_buf->direction, io_buf->resource_type,
+				io_buf->format);
+			for (k = 0; k < in_io_buf->num_planes; k++) {
+				io_buf->num_stripes[k] =
+					in_io_buf->num_stripes[k];
+				rc = cam_mem_get_io_buf(
+					in_io_buf->mem_handle[k],
+					hw_mgr->iommu_hdl, &iova_addr, &len);
+				if (rc) {
+					CAM_ERR(CAM_OPE, "get buf failed: %d",
+						rc);
+					return -EINVAL;
+				}
+				if (len < in_io_buf->length[k]) {
+					CAM_ERR(CAM_OPE, "Invalid length");
+					return -EINVAL;
+				}
+				iova_addr += in_io_buf->plane_offset[k];
+				for (l = 0; l < in_io_buf->num_stripes[k];
+					l++) {
+					in_stripe_info =
+						&in_io_buf->stripe_info[k][l];
+					stripe_info = &io_buf->s_io[k][l];
+					stripe_info->offset =
+						in_stripe_info->offset;
+					stripe_info->format = in_io_buf->format;
+					stripe_info->s_location =
+						in_stripe_info->stripe_location;
+					stripe_info->iova_addr =
+						iova_addr + stripe_info->offset;
+					stripe_info->width =
+						in_stripe_info->width;
+					stripe_info->height =
+						in_io_buf->height[k];
+					stripe_info->stride =
+						in_io_buf->plane_stride[k];
+					stripe_info->x_init =
+						in_stripe_info->x_init;
+					stripe_info->len = len;
+					stripe_info->alignment = alignment;
+					stripe_info->pack_format = pack_format;
+					stripe_info->unpack_format =
+						unpack_format;
+					cam_ope_mgr_print_stripe_info(i, j,
+						k, l, stripe_info, iova_addr);
+				}
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int cam_ope_mgr_process_cmd_buf_req(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_packet *packet, struct cam_ope_ctx *ctx_data,
+	uintptr_t frame_process_addr, size_t length, uint32_t req_idx)
+{
+	int rc = 0;
+	int i, j;
+	uint64_t iova_addr;
+	uint64_t iova_cdm_addr;
+	uintptr_t cpu_addr;
+	size_t len;
+	struct ope_frame_process *frame_process;
+	struct ope_cmd_buf_info *cmd_buf;
+	struct cam_ope_request *ope_request;
+	bool is_kmd_buf_valid = false;
+
+	frame_process = (struct ope_frame_process *)frame_process_addr;
+
+	if (frame_process->batch_size > OPE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_OPE, "Invalid batch: %d",
+			frame_process->batch_size);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < frame_process->batch_size; i++) {
+		if (frame_process->num_cmd_bufs[i] > OPE_MAX_CMD_BUFS) {
+			CAM_ERR(CAM_OPE, "Invalid cmd bufs for batch %d %d",
+				i, frame_process->num_cmd_bufs[i]);
+			return -EINVAL;
+		}
+	}
+
+	CAM_DBG(CAM_OPE, "cmd buf for req id = %lld b_size = %d",
+		packet->header.request_id, frame_process->batch_size);
+
+	for (i = 0; i < frame_process->batch_size; i++) {
+		CAM_DBG(CAM_OPE, "batch: %d count %d", i,
+			frame_process->num_cmd_bufs[i]);
+		for (j = 0; j < frame_process->num_cmd_bufs[i]; j++) {
+			CAM_DBG(CAM_OPE, "batch: %d cmd_buf_idx :%d mem_hdl:%x",
+				i, j, frame_process->cmd_buf[i][j].mem_handle);
+			CAM_DBG(CAM_OPE, "size = %u scope = %d buf_type = %d",
+				frame_process->cmd_buf[i][j].size,
+				frame_process->cmd_buf[i][j].cmd_buf_scope,
+				frame_process->cmd_buf[i][j].type);
+			CAM_DBG(CAM_OPE, "usage = %d buffered = %d s_idx = %d",
+			frame_process->cmd_buf[i][j].cmd_buf_usage,
+			frame_process->cmd_buf[i][j].cmd_buf_buffered,
+			frame_process->cmd_buf[i][j].stripe_idx);
+		}
+	}
+
+	ope_request = ctx_data->req_list[req_idx];
+	ope_request->num_batch = frame_process->batch_size;
+
+	for (i = 0; i < frame_process->batch_size; i++) {
+		for (j = 0; j < frame_process->num_cmd_bufs[i]; j++) {
+			cmd_buf = &frame_process->cmd_buf[i][j];
+
+			switch (cmd_buf->cmd_buf_scope) {
+			case OPE_CMD_BUF_SCOPE_FRAME: {
+				rc = cam_mem_get_io_buf(cmd_buf->mem_handle,
+					hw_mgr->iommu_hdl, &iova_addr, &len);
+				if (rc) {
+					CAM_ERR(CAM_OPE, "get cmd buffailed %x",
+						hw_mgr->iommu_hdl);
+					goto end;
+				}
+				iova_addr = iova_addr + cmd_buf->offset;
+
+				rc = cam_mem_get_io_buf(cmd_buf->mem_handle,
+					hw_mgr->iommu_cdm_hdl,
+					&iova_cdm_addr, &len);
+				if (rc) {
+					CAM_ERR(CAM_OPE, "get cmd buffailed %x",
+						hw_mgr->iommu_hdl);
+					goto end;
+				}
+				iova_cdm_addr = iova_cdm_addr + cmd_buf->offset;
+
+				rc = cam_mem_get_cpu_buf(cmd_buf->mem_handle,
+					&cpu_addr, &len);
+				if (rc || !cpu_addr) {
+					CAM_ERR(CAM_OPE, "get cmd buffailed %x",
+						hw_mgr->iommu_hdl);
+					goto end;
+				}
+				cpu_addr = cpu_addr +
+					frame_process->cmd_buf[i][j].offset;
+				CAM_DBG(CAM_OPE, "Hdl %x size %d len %d off %d",
+					cmd_buf->mem_handle, cmd_buf->size,
+					cmd_buf->length,
+					cmd_buf->offset);
+				if (cmd_buf->cmd_buf_usage == OPE_CMD_BUF_KMD) {
+					ope_request->ope_kmd_buf.mem_handle =
+						cmd_buf->mem_handle;
+					ope_request->ope_kmd_buf.cpu_addr =
+						cpu_addr;
+					ope_request->ope_kmd_buf.iova_addr =
+						iova_addr;
+					ope_request->ope_kmd_buf.iova_cdm_addr =
+						iova_cdm_addr;
+					ope_request->ope_kmd_buf.len = len;
+					ope_request->ope_kmd_buf.size =
+						cmd_buf->size;
+					is_kmd_buf_valid = true;
+					CAM_DBG(CAM_OPE, "kbuf:%x io:%x cdm:%x",
+					ope_request->ope_kmd_buf.cpu_addr,
+					ope_request->ope_kmd_buf.iova_addr,
+					ope_request->ope_kmd_buf.iova_cdm_addr);
+					break;
+				} else if (cmd_buf->cmd_buf_usage ==
+					OPE_CMD_BUF_DEBUG) {
+					ope_request->ope_debug_buf.cpu_addr =
+						cpu_addr;
+					ope_request->ope_debug_buf.iova_addr =
+						iova_addr;
+					ope_request->ope_debug_buf.len =
+						len;
+					ope_request->ope_debug_buf.size =
+						cmd_buf->size;
+					CAM_DBG(CAM_OPE, "dbg buf = %x",
+					ope_request->ope_debug_buf.cpu_addr);
+					break;
+				}
+				break;
+			}
+			case OPE_CMD_BUF_SCOPE_STRIPE: {
+				uint32_t num_cmd_bufs = 0;
+				uint32_t s_idx = 0;
+
+				s_idx = cmd_buf->stripe_idx;
+				num_cmd_bufs =
+				ope_request->num_stripe_cmd_bufs[i][s_idx];
+
+				if (!num_cmd_bufs)
+					ope_request->num_stripes[i]++;
+
+				ope_request->num_stripe_cmd_bufs[i][s_idx]++;
+				break;
+			}
+
+			default:
+				break;
+			}
+		}
+	}
+
+
+	for (i = 0; i < frame_process->batch_size; i++) {
+		CAM_DBG(CAM_OPE, "num of stripes for batch %d is %d",
+			i, ope_request->num_stripes[i]);
+		for (j = 0; j < ope_request->num_stripes[i]; j++) {
+			CAM_DBG(CAM_OPE, "cmd buffers for stripe: %d:%d is %d",
+				i, j, ope_request->num_stripe_cmd_bufs[i][j]);
+		}
+	}
+
+	if (!is_kmd_buf_valid) {
+		CAM_DBG(CAM_OPE, "Invalid kmd buffer");
+		rc = -EINVAL;
+	}
+end:
+	return rc;
+}
+
+static int cam_ope_mgr_process_cmd_desc(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_packet *packet, struct cam_ope_ctx *ctx_data,
+	uintptr_t *ope_cmd_buf_addr, uint32_t req_idx)
+{
+	int rc = 0;
+	int i;
+	int num_cmd_buf = 0;
+	size_t len;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	uintptr_t cpu_addr = 0;
+	struct cam_ope_request *ope_request;
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+
+	*ope_cmd_buf_addr = 0;
+	for (i = 0; i < packet->num_cmd_buf; i++, num_cmd_buf++) {
+		if (cmd_desc[i].type != CAM_CMD_BUF_GENERIC ||
+			cmd_desc[i].meta_data == OPE_CMD_META_GENERIC_BLOB)
+			continue;
+
+		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+			&cpu_addr, &len);
+		if (rc || !cpu_addr) {
+			CAM_ERR(CAM_OPE, "get cmd buf failed %x",
+				hw_mgr->iommu_hdl);
+			num_cmd_buf = (num_cmd_buf > 0) ?
+				num_cmd_buf-- : 0;
+			goto end;
+		}
+		if ((len <= cmd_desc[i].offset) ||
+			(cmd_desc[i].size < cmd_desc[i].length) ||
+			((len - cmd_desc[i].offset) <
+			cmd_desc[i].length)) {
+			CAM_ERR(CAM_OPE, "Invalid offset or length");
+			goto end;
+		}
+		cpu_addr = cpu_addr + cmd_desc[i].offset;
+		*ope_cmd_buf_addr = cpu_addr;
+	}
+
+	if (!cpu_addr) {
+		CAM_ERR(CAM_OPE, "invalid number of cmd buf");
+		*ope_cmd_buf_addr = 0;
+		return -EINVAL;
+	}
+
+	ope_request = ctx_data->req_list[req_idx];
+	ope_request->request_id = packet->header.request_id;
+	ope_request->req_idx = req_idx;
+
+	rc = cam_ope_mgr_process_cmd_buf_req(hw_mgr, packet, ctx_data,
+		cpu_addr, len, req_idx);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "Process OPE cmd request is failed: %d", rc);
+		goto end;
+	}
+
+	rc = cam_ope_mgr_process_cmd_io_buf_req(hw_mgr, packet, ctx_data,
+		cpu_addr, len, req_idx);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "Process OPE cmd io request is failed: %d",
+			rc);
+		goto end;
+	}
+
+	return rc;
+
+end:
+	*ope_cmd_buf_addr = 0;
+	return rc;
+}
+
+static bool cam_ope_mgr_is_valid_inconfig(struct cam_packet *packet)
+{
+	int i, num_in_map_entries = 0;
+	bool in_config_valid = false;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+					packet->io_configs_offset/4);
+
+	for (i = 0 ; i < packet->num_io_configs; i++)
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT)
+			num_in_map_entries++;
+
+	if (num_in_map_entries <= OPE_IN_RES_MAX) {
+		in_config_valid = true;
+	} else {
+		CAM_ERR(CAM_OPE, "In config entries(%u) more than allowed(%u)",
+				num_in_map_entries, OPE_IN_RES_MAX);
+	}
+
+	CAM_DBG(CAM_OPE, "number of in_config info: %u %u %u %u",
+			packet->num_io_configs, OPE_MAX_IO_BUFS,
+			num_in_map_entries, OPE_IN_RES_MAX);
+
+	return in_config_valid;
+}
+
+static bool cam_ope_mgr_is_valid_outconfig(struct cam_packet *packet)
+{
+	int i, num_out_map_entries = 0;
+	bool out_config_valid = false;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+					packet->io_configs_offset/4);
+
+	for (i = 0 ; i < packet->num_io_configs; i++)
+		if (io_cfg_ptr[i].direction == CAM_BUF_OUTPUT)
+			num_out_map_entries++;
+
+	if (num_out_map_entries <= OPE_OUT_RES_MAX) {
+		out_config_valid = true;
+	} else {
+		CAM_ERR(CAM_OPE, "Out config entries(%u) more than allowed(%u)",
+				num_out_map_entries, OPE_OUT_RES_MAX);
+	}
+
+	CAM_DBG(CAM_OPE, "number of out_config info: %u %u %u %u",
+			packet->num_io_configs, OPE_MAX_IO_BUFS,
+			num_out_map_entries, OPE_OUT_RES_MAX);
+
+	return out_config_valid;
+}
+
+static int cam_ope_mgr_pkt_validation(struct cam_packet *packet)
+{
+	if ((packet->header.op_code & 0xff) !=
+		OPE_OPCODE_CONFIG) {
+		CAM_ERR(CAM_OPE, "Invalid Opcode in pkt: %d",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+
+	if (packet->num_io_configs > OPE_MAX_IO_BUFS) {
+		CAM_ERR(CAM_OPE, "Invalid number of io configs: %d %d",
+			OPE_MAX_IO_BUFS, packet->num_io_configs);
+		return -EINVAL;
+	}
+
+	if (packet->num_cmd_buf > OPE_PACKET_MAX_CMD_BUFS) {
+		CAM_ERR(CAM_OPE, "Invalid number of cmd buffers: %d %d",
+			OPE_PACKET_MAX_CMD_BUFS, packet->num_cmd_buf);
+		return -EINVAL;
+	}
+
+	if (!cam_ope_mgr_is_valid_inconfig(packet) ||
+		!cam_ope_mgr_is_valid_outconfig(packet)) {
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_OPE, "number of cmd/patch info: %u %u %u %u",
+			packet->num_cmd_buf,
+			packet->num_io_configs, OPE_MAX_IO_BUFS,
+			packet->num_patches);
+	return 0;
+}
+
+static int cam_ope_get_acquire_info(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_hw_acquire_args *args,
+	struct cam_ope_ctx *ctx)
+{
+	int i = 0;
+
+	if (args->num_acq > 1) {
+		CAM_ERR(CAM_OPE, "Invalid number of resources: %d",
+			args->num_acq);
+		return -EINVAL;
+	}
+
+	if (args->acquire_info_size <
+		sizeof(struct ope_acquire_dev_info)) {
+		CAM_ERR(CAM_OPE, "Invalid acquire size = %d",
+			args->acquire_info_size);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&ctx->ope_acquire,
+		(void __user *)args->acquire_info,
+		sizeof(struct ope_acquire_dev_info))) {
+		CAM_ERR(CAM_OPE, "Failed in acquire");
+		return -EFAULT;
+	}
+
+	if (ctx->ope_acquire.secure_mode > CAM_SECURE_MODE_SECURE) {
+		CAM_ERR(CAM_OPE, "Invalid mode:%d",
+			ctx->ope_acquire.secure_mode);
+		return -EINVAL;
+	}
+
+	if (ctx->ope_acquire.num_out_res > OPE_OUT_RES_MAX) {
+		CAM_ERR(CAM_OPE, "num of out resources exceeding : %u",
+			ctx->ope_acquire.num_out_res);
+		return -EINVAL;
+	}
+
+	if (ctx->ope_acquire.num_in_res > OPE_IN_RES_MAX) {
+		CAM_ERR(CAM_OPE, "num of in resources exceeding : %u",
+			ctx->ope_acquire.num_in_res);
+		return -EINVAL;
+	}
+
+	if (ctx->ope_acquire.dev_type >= OPE_DEV_TYPE_MAX) {
+		CAM_ERR(CAM_OPE, "Invalid device type: %d",
+			ctx->ope_acquire.dev_type);
+		return -EFAULT;
+	}
+
+	if (ctx->ope_acquire.hw_type >= OPE_HW_TYPE_MAX) {
+		CAM_ERR(CAM_OPE, "Invalid HW type: %d",
+			ctx->ope_acquire.hw_type);
+		return -EFAULT;
+	}
+
+	CAM_DBG(CAM_OPE, "top: %u %u %s %u %u %u %u %u",
+		ctx->ope_acquire.hw_type, ctx->ope_acquire.dev_type,
+		ctx->ope_acquire.dev_name,
+		ctx->ope_acquire.nrt_stripes_for_arb,
+		ctx->ope_acquire.secure_mode, ctx->ope_acquire.batch_size,
+		ctx->ope_acquire.num_in_res, ctx->ope_acquire.num_out_res);
+
+	for (i = 0; i < ctx->ope_acquire.num_in_res; i++) {
+		CAM_DBG(CAM_OPE, "IN: %u %u %u %u %u %u %u %u",
+		ctx->ope_acquire.in_res[i].res_id,
+		ctx->ope_acquire.in_res[i].format,
+		ctx->ope_acquire.in_res[i].width,
+		ctx->ope_acquire.in_res[i].height,
+		ctx->ope_acquire.in_res[i].alignment,
+		ctx->ope_acquire.in_res[i].unpacker_format,
+		ctx->ope_acquire.in_res[i].max_stripe_size,
+		ctx->ope_acquire.in_res[i].fps);
+	}
+
+	for (i = 0; i < ctx->ope_acquire.num_out_res; i++) {
+		CAM_DBG(CAM_OPE, "OUT: %u %u %u %u %u %u %u %u",
+		ctx->ope_acquire.out_res[i].res_id,
+		ctx->ope_acquire.out_res[i].format,
+		ctx->ope_acquire.out_res[i].width,
+		ctx->ope_acquire.out_res[i].height,
+		ctx->ope_acquire.out_res[i].alignment,
+		ctx->ope_acquire.out_res[i].packer_format,
+		ctx->ope_acquire.out_res[i].subsample_period,
+		ctx->ope_acquire.out_res[i].subsample_pattern);
+	}
+
+	return 0;
+}
+
+static int cam_ope_get_free_ctx(struct cam_ope_hw_mgr *hw_mgr)
+{
+	int i;
+
+	i = find_first_zero_bit(hw_mgr->ctx_bitmap, hw_mgr->ctx_bits);
+	if (i >= OPE_CTX_MAX || i < 0) {
+		CAM_ERR(CAM_OPE, "Invalid ctx id = %d", i);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx[i].ctx_mutex);
+	if (hw_mgr->ctx[i].ctx_state != OPE_CTX_STATE_FREE) {
+		CAM_ERR(CAM_OPE, "Invalid ctx %d state %d",
+			i, hw_mgr->ctx[i].ctx_state);
+		mutex_unlock(&hw_mgr->ctx[i].ctx_mutex);
+		return -EINVAL;
+	}
+	set_bit(i, hw_mgr->ctx_bitmap);
+	mutex_unlock(&hw_mgr->ctx[i].ctx_mutex);
+
+	return i;
+}
+
+
+static int cam_ope_put_free_ctx(struct cam_ope_hw_mgr *hw_mgr, uint32_t ctx_id)
+{
+	if (ctx_id >= OPE_CTX_MAX) {
+		CAM_ERR(CAM_OPE, "Invalid ctx_id: %d", ctx_id);
+		return 0;
+	}
+
+	hw_mgr->ctx[ctx_id].ctx_state = OPE_CTX_STATE_FREE;
+	clear_bit(ctx_id, hw_mgr->ctx_bitmap);
+
+	return 0;
+}
+
+static int cam_ope_mgr_get_hw_caps(void *hw_priv, void *hw_caps_args)
+{
+	struct cam_ope_hw_mgr *hw_mgr;
+	struct cam_query_cap_cmd *query_cap = hw_caps_args;
+	struct ope_hw_ver hw_ver;
+	int rc = 0, i;
+
+	if (!hw_priv || !hw_caps_args) {
+		CAM_ERR(CAM_OPE, "Invalid args: %x %x", hw_priv, hw_caps_args);
+		return -EINVAL;
+	}
+
+	hw_mgr = hw_priv;
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (copy_from_user(&hw_mgr->ope_caps,
+		u64_to_user_ptr(query_cap->caps_handle),
+		sizeof(struct ope_query_cap_cmd))) {
+		CAM_ERR(CAM_OPE, "copy_from_user failed: size = %d",
+			sizeof(struct ope_query_cap_cmd));
+		rc = -EFAULT;
+		goto end;
+	}
+
+	for (i = 0; i < hw_mgr->num_ope; i++) {
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.get_hw_caps(
+			hw_mgr->ope_dev_intf[i]->hw_priv,
+			&hw_ver, sizeof(hw_ver));
+		if (rc)
+			goto end;
+
+		hw_mgr->ope_caps.hw_ver[i] = hw_ver;
+	}
+
+	hw_mgr->ope_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
+	hw_mgr->ope_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
+	hw_mgr->ope_caps.cdm_iommu_handle.non_secure = hw_mgr->iommu_cdm_hdl;
+	hw_mgr->ope_caps.cdm_iommu_handle.secure = hw_mgr->iommu_sec_cdm_hdl;
+	hw_mgr->ope_caps.num_ope = hw_mgr->num_ope;
+
+	CAM_DBG(CAM_OPE, "iommu sec %d iommu ns %d cdm s %d cdm ns %d",
+		hw_mgr->ope_caps.dev_iommu_handle.secure,
+		hw_mgr->ope_caps.dev_iommu_handle.non_secure,
+		hw_mgr->ope_caps.cdm_iommu_handle.secure,
+		hw_mgr->ope_caps.cdm_iommu_handle.non_secure);
+
+	if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
+		&hw_mgr->ope_caps, sizeof(struct ope_query_cap_cmd))) {
+		CAM_ERR(CAM_OPE, "copy_to_user failed: size = %d",
+			sizeof(struct ope_query_cap_cmd));
+		rc = -EFAULT;
+	}
+
+end:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_ope_mgr_acquire_hw(void *hw_priv, void *hw_acquire_args)
+{
+	int rc = 0, i;
+	int ctx_id;
+	struct cam_ope_hw_mgr *hw_mgr = hw_priv;
+	struct cam_ope_ctx *ctx;
+	struct cam_hw_acquire_args *args = hw_acquire_args;
+	struct cam_ope_dev_acquire ope_dev_acquire;
+	struct cam_ope_dev_release ope_dev_release;
+	struct cam_cdm_acquire_data cdm_acquire;
+	struct cam_ope_dev_init init;
+	struct cam_ope_dev_clk_update clk_update;
+	struct cam_ope_dev_bw_update bw_update;
+	struct cam_ope_set_irq_cb irq_cb;
+
+	if ((!hw_priv) || (!hw_acquire_args)) {
+		CAM_ERR(CAM_OPE, "Invalid args: %x %x",
+			hw_priv, hw_acquire_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_id = cam_ope_get_free_ctx(hw_mgr);
+	if (ctx_id < 0) {
+		CAM_ERR(CAM_OPE, "No free ctx");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return ctx_id;
+	}
+
+	ctx = &hw_mgr->ctx[ctx_id];
+	ctx->ctx_id = ctx_id;
+	mutex_lock(&ctx->ctx_mutex);
+	rc = cam_ope_get_acquire_info(hw_mgr, args, ctx);
+	if (rc < 0) {
+		CAM_ERR(CAM_OPE, "get_acquire info failed: %d", rc);
+		goto end;
+	}
+
+
+	if (!hw_mgr->ope_ctx_cnt) {
+		for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+			init.hfi_en = ope_hw_mgr->hfi_en;
+			rc = hw_mgr->ope_dev_intf[i]->hw_ops.init(
+				hw_mgr->ope_dev_intf[i]->hw_priv, &init,
+				sizeof(init));
+			if (rc) {
+				CAM_ERR(CAM_OPE, "OPE Dev init failed: %d", rc);
+				goto end;
+			}
+		}
+
+		/* Install IRQ CB */
+		irq_cb.ope_hw_mgr_cb = cam_ope_hw_mgr_cb;
+		irq_cb.data = hw_mgr;
+		for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+			init.hfi_en = ope_hw_mgr->hfi_en;
+			rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+				hw_mgr->ope_dev_intf[i]->hw_priv,
+				OPE_HW_SET_IRQ_CB,
+				&irq_cb, sizeof(irq_cb));
+			if (rc) {
+				CAM_ERR(CAM_OPE, "OPE Dev init failed: %d", rc);
+				goto ope_irq_set_failed;
+			}
+		}
+	}
+
+	ope_dev_acquire.ctx_id = ctx_id;
+	ope_dev_acquire.ope_acquire = &ctx->ope_acquire;
+
+	for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_ACQUIRE,
+			&ope_dev_acquire, sizeof(ope_dev_acquire));
+		if (rc) {
+			CAM_ERR(CAM_OPE, "OPE Dev acquire failed: %d", rc);
+			goto ope_dev_acquire_failed;
+		}
+	}
+
+	memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+	strlcpy(cdm_acquire.identifier, "ope", sizeof("ope"));
+	if (ctx->ope_acquire.dev_type == OPE_DEV_TYPE_OPE_RT)
+		cdm_acquire.priority = CAM_CDM_BL_FIFO_3;
+	else if (ctx->ope_acquire.dev_type ==
+		OPE_DEV_TYPE_OPE_NRT)
+		cdm_acquire.priority = CAM_CDM_BL_FIFO_0;
+	else
+		goto ope_dev_acquire_failed;
+
+	cdm_acquire.cell_index = 0;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = ctx;
+	cdm_acquire.cam_cdm_callback = cam_ope_ctx_cdm_callback;
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.base_array_cnt = 1;
+	cdm_acquire.base_array[0] = hw_mgr->cdm_reg_map[OPE_DEV_OPE][0];
+
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "cdm_acquire is failed: %d", rc);
+		goto cdm_acquire_failed;
+	}
+
+	ctx->ope_cdm.cdm_ops = cdm_acquire.ops;
+	ctx->ope_cdm.cdm_handle = cdm_acquire.handle;
+
+	rc = cam_cdm_stream_on(cdm_acquire.handle);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "cdm stream on failure: %d", rc);
+		goto cdm_stream_on_failure;
+	}
+
+	for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+		clk_update.clk_rate = 600000000;
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_CLK_UPDATE,
+			&clk_update, sizeof(clk_update));
+		if (rc) {
+			CAM_ERR(CAM_OPE, "OPE Dev clk update failed: %d", rc);
+			goto ope_clk_update_failed;
+		}
+	}
+
+	for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+		bw_update.axi_vote.num_paths = 1;
+		bw_update.axi_vote_valid = true;
+		bw_update.axi_vote.axi_path[0].camnoc_bw = 600000000;
+		bw_update.axi_vote.axi_path[0].mnoc_ab_bw = 600000000;
+		bw_update.axi_vote.axi_path[0].mnoc_ib_bw = 600000000;
+		bw_update.axi_vote.axi_path[0].ddr_ab_bw = 600000000;
+		bw_update.axi_vote.axi_path[0].ddr_ib_bw = 600000000;
+		bw_update.axi_vote.axi_path[0].transac_type =
+			CAM_AXI_TRANSACTION_WRITE;
+		bw_update.axi_vote.axi_path[0].path_data_type =
+			CAM_AXI_PATH_DATA_ALL;
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_BW_UPDATE,
+			&bw_update, sizeof(bw_update));
+		if (rc) {
+			CAM_ERR(CAM_OPE, "OPE Dev clk update failed: %d", rc);
+			goto ope_bw_update_failed;
+		}
+	}
+
+	cam_ope_start_req_timer(ctx);
+	hw_mgr->ope_ctx_cnt++;
+	ctx->context_priv = args->context_data;
+	args->ctxt_to_hw_map = ctx;
+	ctx->ctxt_event_cb = args->event_cb;
+	ctx->ctx_state = OPE_CTX_STATE_ACQUIRED;
+
+	mutex_unlock(&ctx->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+
+ope_clk_update_failed:
+ope_bw_update_failed:
+cdm_stream_on_failure:
+	cam_cdm_release(cdm_acquire.handle);
+	ctx->ope_cdm.cdm_ops = NULL;
+	ctx->ope_cdm.cdm_handle = 0;
+cdm_acquire_failed:
+	ope_dev_release.ctx_id = ctx_id;
+	for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RELEASE,
+			&ope_dev_release, sizeof(ope_dev_release));
+		if (rc)
+			CAM_ERR(CAM_OPE, "OPE Dev release failed: %d", rc);
+	}
+
+ope_dev_acquire_failed:
+	if (!hw_mgr->ope_ctx_cnt) {
+		irq_cb.ope_hw_mgr_cb = NULL;
+		irq_cb.data = hw_mgr;
+		for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+			init.hfi_en = ope_hw_mgr->hfi_en;
+			rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+				hw_mgr->ope_dev_intf[i]->hw_priv,
+				OPE_HW_SET_IRQ_CB,
+				&irq_cb, sizeof(irq_cb));
+			CAM_ERR(CAM_OPE, "OPE IRQ de register failed");
+		}
+	}
+ope_irq_set_failed:
+	if (!hw_mgr->ope_ctx_cnt) {
+		for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+			rc = hw_mgr->ope_dev_intf[i]->hw_ops.deinit(
+				hw_mgr->ope_dev_intf[i]->hw_priv, NULL, 0);
+			if (rc)
+				CAM_ERR(CAM_OPE, "OPE deinit fail: %d", rc);
+		}
+	}
+end:
+	cam_ope_put_free_ctx(hw_mgr, ctx_id);
+	mutex_unlock(&ctx->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_ope_mgr_release_ctx(struct cam_ope_hw_mgr *hw_mgr, int ctx_id)
+{
+	int i = 0, rc = 0;
+	struct cam_ope_dev_release ope_dev_release;
+
+	if (ctx_id >= OPE_CTX_MAX) {
+		CAM_ERR(CAM_OPE, "ctx_id is wrong: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+	if (hw_mgr->ctx[ctx_id].ctx_state !=
+		OPE_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+		CAM_DBG(CAM_OPE, "ctx id: %d not in right state: %d",
+			ctx_id, hw_mgr->ctx[ctx_id].ctx_state);
+		return 0;
+	}
+
+	hw_mgr->ctx[ctx_id].ctx_state = OPE_CTX_STATE_RELEASE;
+
+	for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+		ope_dev_release.ctx_id = ctx_id;
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RELEASE,
+			&ope_dev_release, sizeof(ope_dev_release));
+		if (rc)
+			CAM_ERR(CAM_OPE, "OPE Dev release failed: %d", rc);
+	}
+
+	rc = cam_cdm_stream_off(hw_mgr->ctx[ctx_id].ope_cdm.cdm_handle);
+	if (rc)
+		CAM_ERR(CAM_OPE, "OPE CDM streamoff failed: %d", rc);
+
+	rc = cam_cdm_release(hw_mgr->ctx[ctx_id].ope_cdm.cdm_handle);
+	if (rc)
+		CAM_ERR(CAM_OPE, "OPE CDM relase failed: %d", rc);
+
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		if (!hw_mgr->ctx[ctx_id].req_list[i])
+			continue;
+
+		if (hw_mgr->ctx[ctx_id].req_list[i]->cdm_cmd) {
+			kzfree(hw_mgr->ctx[ctx_id].req_list[i]->cdm_cmd);
+			hw_mgr->ctx[ctx_id].req_list[i]->cdm_cmd = NULL;
+		}
+		kzfree(hw_mgr->ctx[ctx_id].req_list[i]);
+		hw_mgr->ctx[ctx_id].req_list[i] = NULL;
+		clear_bit(i, hw_mgr->ctx[ctx_id].bitmap);
+	}
+
+	cam_ope_req_timer_stop(&hw_mgr->ctx[ctx_id]);
+	hw_mgr->ctx[ctx_id].ope_cdm.cdm_handle = 0;
+	hw_mgr->ctx[ctx_id].req_cnt = 0;
+	cam_ope_put_free_ctx(hw_mgr, ctx_id);
+	hw_mgr->ope_ctx_cnt--;
+	mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+	CAM_DBG(CAM_OPE, "X: ctx_id = %d", ctx_id);
+
+	return 0;
+}
+
+static int cam_ope_mgr_release_hw(void *hw_priv, void *hw_release_args)
+{
+	int i, rc = 0;
+	int ctx_id = 0;
+	struct cam_hw_release_args *release_hw = hw_release_args;
+	struct cam_ope_hw_mgr *hw_mgr = hw_priv;
+	struct cam_ope_ctx *ctx_data = NULL;
+	struct cam_ope_set_irq_cb irq_cb;
+	struct cam_hw_intf *dev_intf;
+
+	if (!release_hw || !hw_mgr) {
+		CAM_ERR(CAM_OPE, "Invalid args: %pK %pK", release_hw, hw_mgr);
+		return -EINVAL;
+	}
+
+	ctx_data = release_hw->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_OPE, "NULL ctx data");
+		return -EINVAL;
+	}
+
+	ctx_id = ctx_data->ctx_id;
+	if (ctx_id < 0 || ctx_id >= OPE_CTX_MAX) {
+		CAM_ERR(CAM_OPE, "Invalid ctx id: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+	if (hw_mgr->ctx[ctx_id].ctx_state != OPE_CTX_STATE_ACQUIRED) {
+		CAM_DBG(CAM_OPE, "ctx is not in use: %d", ctx_id);
+		mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex);
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = cam_ope_mgr_release_ctx(hw_mgr, ctx_id);
+	if (!hw_mgr->ope_ctx_cnt) {
+		CAM_DBG(CAM_OPE, "Last Release");
+		if (!hw_mgr->ope_ctx_cnt) {
+			for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+				dev_intf = hw_mgr->ope_dev_intf[i];
+				irq_cb.ope_hw_mgr_cb = NULL;
+				irq_cb.data = NULL;
+				rc = dev_intf->hw_ops.process_cmd(
+					hw_mgr->ope_dev_intf[i]->hw_priv,
+					OPE_HW_SET_IRQ_CB,
+					&irq_cb, sizeof(irq_cb));
+				if (rc)
+					CAM_ERR(CAM_OPE, "IRQ dereg failed: %d",
+						rc);
+			}
+			for (i = 0; i < ope_hw_mgr->num_ope; i++) {
+				dev_intf = hw_mgr->ope_dev_intf[i];
+				rc = dev_intf->hw_ops.deinit(
+					hw_mgr->ope_dev_intf[i]->hw_priv,
+					NULL, 0);
+				if (rc)
+					CAM_ERR(CAM_OPE, "deinit failed: %d",
+						rc);
+			}
+		}
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	CAM_DBG(CAM_OPE, "Release done for ctx_id %d", ctx_id);
+	return rc;
+}
+
+static int cam_ope_mgr_prepare_hw_update(void *hw_priv,
+	void *hw_prepare_update_args)
+{
+	int rc = 0;
+	struct cam_packet *packet = NULL;
+	struct cam_ope_hw_mgr *hw_mgr = hw_priv;
+	struct cam_hw_prepare_update_args *prepare_args =
+		hw_prepare_update_args;
+	struct cam_ope_ctx *ctx_data = NULL;
+	uintptr_t   ope_cmd_buf_addr;
+	uint32_t request_idx = 0;
+	struct cam_ope_request *ope_req;
+
+	if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
+		CAM_ERR(CAM_OPE, "Invalid args: %x %x",
+			prepare_args, hw_mgr);
+		return -EINVAL;
+	}
+
+	ctx_data = prepare_args->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_OPE, "Invalid Context");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (ctx_data->ctx_state != OPE_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_OPE, "ctx id %u is not acquired state: %d",
+			ctx_data->ctx_id, ctx_data->ctx_state);
+		return -EINVAL;
+	}
+
+	packet = prepare_args->packet;
+	rc = cam_packet_util_validate_packet(packet, prepare_args->remain_len);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_OPE, "packet validation is failed: %d", rc);
+		return rc;
+	}
+
+	rc = cam_ope_mgr_pkt_validation(packet);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_OPE, "ope packet validation is failed");
+		return -EINVAL;
+	}
+
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_cdm_hdl,
+		hw_mgr->iommu_sec_cdm_hdl);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_OPE, "Patching is failed: %d", rc);
+		return -EINVAL;
+	}
+
+	request_idx  = find_first_zero_bit(ctx_data->bitmap, ctx_data->bits);
+	if (request_idx >= CAM_CTX_REQ_MAX || request_idx < 0) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_OPE, "Invalid ctx req slot = %d", request_idx);
+		return -EINVAL;
+	}
+	set_bit(request_idx, ctx_data->bitmap);
+
+	ctx_data->req_list[request_idx] =
+		kzalloc(sizeof(struct cam_ope_request), GFP_KERNEL);
+	if (!ctx_data->req_list[request_idx]) {
+		rc = -ENOMEM;
+		mutex_unlock(&ctx_data->ctx_mutex);
+		goto req_mem_alloc_failed;
+	}
+
+	ope_req = ctx_data->req_list[request_idx];
+	ope_req->cdm_cmd =
+		kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+			((OPE_MAX_CDM_BLS - 1) *
+			sizeof(struct cam_cdm_bl_cmd))),
+			GFP_KERNEL);
+	if (!ope_req->cdm_cmd) {
+		rc = -ENOMEM;
+		mutex_unlock(&ctx_data->ctx_mutex);
+		goto req_cdm_mem_alloc_failed;
+	}
+
+	rc = cam_ope_mgr_process_cmd_desc(hw_mgr, packet,
+		ctx_data, &ope_cmd_buf_addr, request_idx);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_OPE, "cmd desc processing failed: %d", rc);
+		goto end;
+	}
+
+	rc = cam_ope_mgr_process_io_cfg(hw_mgr, packet, prepare_args,
+		ctx_data, request_idx);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_OPE, "IO cfg processing failed: %d", rc);
+		goto end;
+	}
+
+	rc = cam_ope_mgr_create_kmd_buf(hw_mgr, packet, prepare_args,
+		ctx_data, request_idx, ope_cmd_buf_addr);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_OPE, "cam_ope_mgr_create_kmd_buf failed: %d", rc);
+		goto end;
+	}
+
+	prepare_args->num_hw_update_entries = 1;
+	prepare_args->hw_update_entries[0].addr =
+		(uintptr_t)ctx_data->req_list[request_idx]->cdm_cmd;
+	prepare_args->priv = ctx_data->req_list[request_idx];
+
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	return rc;
+
+end:
+	kzfree(ctx_data->req_list[request_idx]->cdm_cmd);
+	ctx_data->req_list[request_idx]->cdm_cmd = NULL;
+req_cdm_mem_alloc_failed:
+	kzfree(ctx_data->req_list[request_idx]);
+	ctx_data->req_list[request_idx] = NULL;
+req_mem_alloc_failed:
+	clear_bit(request_idx, ctx_data->bitmap);
+	return rc;
+}
+
+static int cam_ope_mgr_handle_config_err(
+	struct cam_hw_config_args *config_args,
+	struct cam_ope_ctx *ctx_data)
+{
+	struct cam_hw_done_event_data buf_data;
+	struct cam_ope_request *ope_req;
+	uint32_t req_idx;
+
+	ope_req = config_args->priv;
+
+	buf_data.request_id = ope_req->request_id;
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
+
+	req_idx = ope_req->req_idx;
+	ope_req->request_id = 0;
+	kzfree(ctx_data->req_list[req_idx]->cdm_cmd);
+	ctx_data->req_list[req_idx]->cdm_cmd = NULL;
+	kzfree(ctx_data->req_list[req_idx]);
+	ctx_data->req_list[req_idx] = NULL;
+	clear_bit(req_idx, ctx_data->bitmap);
+
+	return 0;
+}
+
+static int cam_ope_mgr_enqueue_config(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_ope_ctx *ctx_data,
+	struct cam_hw_config_args *config_args)
+{
+	int rc = 0;
+	uint64_t request_id = 0;
+	struct crm_workq_task *task;
+	struct ope_cmd_work_data *task_data;
+	struct cam_hw_update_entry *hw_update_entries;
+	struct cam_ope_request *ope_req = NULL;
+
+	ope_req = config_args->priv;
+	request_id = ope_req->request_id;
+	hw_update_entries = config_args->hw_update_entries;
+	CAM_DBG(CAM_OPE, "req_id = %lld %pK", request_id, config_args->priv);
+
+	task = cam_req_mgr_workq_get_task(ope_hw_mgr->cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_OPE, "no empty task");
+		return -ENOMEM;
+	}
+
+	task_data = (struct ope_cmd_work_data *)task->payload;
+	task_data->data = (void *)hw_update_entries->addr;
+	task_data->req_id = request_id;
+	task_data->type = OPE_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_ope_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, ctx_data,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int cam_ope_mgr_config_hw(void *hw_priv, void *hw_config_args)
+{
+	int rc = 0;
+	struct cam_ope_hw_mgr *hw_mgr = hw_priv;
+	struct cam_hw_config_args *config_args = hw_config_args;
+	struct cam_ope_ctx *ctx_data = NULL;
+	struct cam_ope_request *ope_req = NULL;
+	struct cam_cdm_bl_request *cdm_cmd;
+
+	CAM_DBG(CAM_OPE, "E");
+	if (!hw_mgr || !config_args) {
+		CAM_ERR(CAM_OPE, "Invalid arguments %pK %pK",
+			hw_mgr, config_args);
+		return -EINVAL;
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_OPE, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	ctx_data = config_args->ctxt_to_hw_map;
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (ctx_data->ctx_state != OPE_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_OPE, "ctx id :%u is not in use",
+			ctx_data->ctx_id);
+		return -EINVAL;
+	}
+
+	ope_req = config_args->priv;
+	cdm_cmd = (struct cam_cdm_bl_request *)
+		config_args->hw_update_entries->addr;
+	cdm_cmd->cookie = ope_req->req_idx;
+
+	rc = cam_ope_mgr_enqueue_config(hw_mgr, ctx_data, config_args);
+	if (rc)
+		goto config_err;
+
+	CAM_DBG(CAM_OPE, "req_id %llu, io config", ope_req->request_id);
+
+	cam_ope_req_timer_modify(ctx_data, 200);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+config_err:
+	cam_ope_mgr_handle_config_err(config_args, ctx_data);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_ope_mgr_hw_open_u(void *hw_priv, void *fw_download_args)
+{
+	struct cam_ope_hw_mgr *hw_mgr;
+	int rc = 0;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_OPE, "Invalid args: %pK", hw_priv);
+		return -EINVAL;
+	}
+
+	hw_mgr = hw_priv;
+	if (!hw_mgr->open_cnt) {
+		hw_mgr->open_cnt++;
+	} else {
+		rc = -EBUSY;
+		CAM_ERR(CAM_OPE, "Multiple opens are not supported");
+	}
+
+	return rc;
+}
+
+static cam_ope_mgr_hw_close_u(void *hw_priv, void *hw_close_args)
+{
+	struct cam_ope_hw_mgr *hw_mgr;
+	int rc = 0;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_OPE, "Invalid args: %pK", hw_priv);
+		return -EINVAL;
+	}
+
+	hw_mgr = hw_priv;
+	if (!hw_mgr->open_cnt) {
+		rc = -EINVAL;
+		CAM_ERR(CAM_OPE, "device is already closed");
+	} else {
+		hw_mgr->open_cnt--;
+	}
+
+	return rc;
+}
+
+static int cam_ope_mgr_flush_req(struct cam_ope_ctx *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	int idx;
+	int64_t request_id;
+
+	request_id = *(int64_t *)flush_args->flush_req_pending[0];
+	for (idx = 0; idx < CAM_CTX_REQ_MAX; idx++) {
+		if (!ctx_data->req_list[idx])
+			continue;
+
+		if (ctx_data->req_list[idx]->request_id != request_id)
+			continue;
+
+		ctx_data->req_list[idx]->request_id = 0;
+		kzfree(ctx_data->req_list[idx]->cdm_cmd);
+		ctx_data->req_list[idx]->cdm_cmd = NULL;
+		kzfree(ctx_data->req_list[idx]);
+		ctx_data->req_list[idx] = NULL;
+		clear_bit(idx, ctx_data->bitmap);
+	}
+
+	return 0;
+}
+
+static int cam_ope_mgr_flush_all(struct cam_ope_ctx *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	int i, rc;
+	struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr;
+
+	rc = cam_cdm_flush_hw(ctx_data->ope_cdm.cdm_handle);
+
+	for (i = 0; i < hw_mgr->num_ope; i++) {
+		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
+			hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RESET,
+			NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_OPE, "OPE Dev reset failed: %d", rc);
+	}
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		if (!ctx_data->req_list[i])
+			continue;
+
+		ctx_data->req_list[i]->request_id = 0;
+		kzfree(ctx_data->req_list[i]->cdm_cmd);
+		ctx_data->req_list[i]->cdm_cmd = NULL;
+		kzfree(ctx_data->req_list[i]);
+		ctx_data->req_list[i] = NULL;
+		clear_bit(i, ctx_data->bitmap);
+	}
+
+	return rc;
+}
+
+static int cam_ope_mgr_hw_flush(void *hw_priv, void *hw_flush_args)
+{
+	struct cam_hw_flush_args *flush_args = hw_flush_args;
+	struct cam_ope_ctx *ctx_data;
+
+	if ((!hw_priv) || (!hw_flush_args)) {
+		CAM_ERR(CAM_OPE, "Input params are Null");
+		return -EINVAL;
+	}
+
+	ctx_data = flush_args->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_OPE, "Ctx data is NULL");
+		return -EINVAL;
+	}
+
+	if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+		(flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+		CAM_ERR(CAM_OPE, "Invalid flush type: %d",
+			flush_args->flush_type);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_REQ, "ctx_id %d Flush type %d",
+			ctx_data->ctx_id, flush_args->flush_type);
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_ALL:
+		mutex_lock(&ctx_data->ctx_mutex);
+		cam_ope_mgr_flush_all(ctx_data, flush_args);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		break;
+	case CAM_FLUSH_TYPE_REQ:
+		mutex_lock(&ctx_data->ctx_mutex);
+		if (flush_args->num_req_active) {
+			CAM_ERR(CAM_OPE, "Flush request is not supported");
+			mutex_unlock(&ctx_data->ctx_mutex);
+			return -EINVAL;
+		}
+		if (flush_args->num_req_pending)
+			cam_ope_mgr_flush_req(ctx_data, flush_args);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		break;
+	default:
+		CAM_ERR(CAM_OPE, "Invalid flush type: %d",
+				flush_args->flush_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_ope_mgr_alloc_devs(struct device_node *of_node)
+{
+	int rc;
+	uint32_t num_dev;
+
+	rc = of_property_read_u32(of_node, "num-ope", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "getting num of ope failed: %d", rc);
+		return -EINVAL;
+	}
+
+	ope_hw_mgr->devices[OPE_DEV_OPE] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+	if (!ope_hw_mgr->devices[OPE_DEV_OPE])
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int cam_ope_mgr_init_devs(struct device_node *of_node)
+{
+	int rc = 0;
+	int count, i;
+	const char *name = NULL;
+	struct device_node *child_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *child_dev_intf = NULL;
+	struct cam_hw_info *ope_dev;
+	struct cam_hw_soc_info *soc_info = NULL;
+
+	rc = cam_ope_mgr_alloc_devs(of_node);
+	if (rc)
+		return rc;
+
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count) {
+		CAM_ERR(CAM_OPE, "no compat hw found in dev tree, cnt = %d",
+			count);
+		rc = -EINVAL;
+		goto compat_hw_name_failed;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "compat-hw-name",
+			i, &name);
+		if (rc) {
+			CAM_ERR(CAM_OPE, "getting dev object name failed");
+			goto compat_hw_name_failed;
+		}
+
+		child_node = of_find_node_by_name(NULL, name);
+		if (!child_node) {
+			CAM_ERR(CAM_OPE, "Cannot find node in dtsi %s", name);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+
+		child_pdev = of_find_device_by_node(child_node);
+		if (!child_pdev) {
+			CAM_ERR(CAM_OPE, "failed to find device on bus %s",
+				child_node->name);
+			rc = -ENODEV;
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+
+		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+			child_pdev);
+		if (!child_dev_intf) {
+			CAM_ERR(CAM_OPE, "no child device");
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+		ope_hw_mgr->devices[child_dev_intf->hw_type]
+			[child_dev_intf->hw_idx] = child_dev_intf;
+
+		if (!child_dev_intf->hw_ops.process_cmd)
+			goto compat_hw_name_failed;
+
+		of_node_put(child_node);
+	}
+
+	ope_hw_mgr->num_ope = count;
+	for (i = 0; i < count; i++) {
+		ope_hw_mgr->ope_dev_intf[i] =
+			ope_hw_mgr->devices[OPE_DEV_OPE][i];
+			ope_dev = ope_hw_mgr->ope_dev_intf[i]->hw_priv;
+			soc_info = &ope_dev->soc_info;
+			ope_hw_mgr->cdm_reg_map[i][0] =
+				soc_info->reg_map[0].mem_base;
+	}
+
+	ope_hw_mgr->hfi_en = of_property_read_bool(of_node, "hfi_en");
+
+	return 0;
+compat_hw_name_failed:
+	kfree(ope_hw_mgr->devices[OPE_DEV_OPE]);
+	ope_hw_mgr->devices[OPE_DEV_OPE] = NULL;
+	return rc;
+}
+
+static int cam_ope_mgr_create_wq(void)
+{
+
+	int rc;
+	int i;
+
+	rc = cam_req_mgr_workq_create("ope_command_queue", OPE_WORKQ_NUM_TASK,
+		&ope_hw_mgr->cmd_work, CRM_WORKQ_USAGE_NON_IRQ,
+		0);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "unable to create a command worker");
+		goto cmd_work_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("ope_message_queue", OPE_WORKQ_NUM_TASK,
+		&ope_hw_mgr->msg_work, CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "unable to create a message worker");
+		goto msg_work_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("ope_timer_queue", OPE_WORKQ_NUM_TASK,
+		&ope_hw_mgr->timer_work, CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "unable to create a timer worker");
+		goto timer_work_failed;
+	}
+
+	ope_hw_mgr->cmd_work_data =
+		kzalloc(sizeof(struct ope_cmd_work_data) * OPE_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!ope_hw_mgr->cmd_work_data) {
+		rc = -ENOMEM;
+		goto cmd_work_data_failed;
+	}
+
+	ope_hw_mgr->msg_work_data =
+		kzalloc(sizeof(struct ope_msg_work_data) * OPE_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!ope_hw_mgr->msg_work_data) {
+		rc = -ENOMEM;
+		goto msg_work_data_failed;
+	}
+
+	ope_hw_mgr->timer_work_data =
+		kzalloc(sizeof(struct ope_clk_work_data) * OPE_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!ope_hw_mgr->timer_work_data) {
+		rc = -ENOMEM;
+		goto timer_work_data_failed;
+	}
+
+	for (i = 0; i < OPE_WORKQ_NUM_TASK; i++)
+		ope_hw_mgr->msg_work->task.pool[i].payload =
+				&ope_hw_mgr->msg_work_data[i];
+
+	for (i = 0; i < OPE_WORKQ_NUM_TASK; i++)
+		ope_hw_mgr->cmd_work->task.pool[i].payload =
+				&ope_hw_mgr->cmd_work_data[i];
+
+	for (i = 0; i < OPE_WORKQ_NUM_TASK; i++)
+		ope_hw_mgr->timer_work->task.pool[i].payload =
+				&ope_hw_mgr->timer_work_data[i];
+	return 0;
+
+
+timer_work_data_failed:
+	kfree(ope_hw_mgr->msg_work_data);
+msg_work_data_failed:
+	kfree(ope_hw_mgr->cmd_work_data);
+cmd_work_data_failed:
+	cam_req_mgr_workq_destroy(&ope_hw_mgr->timer_work);
+timer_work_failed:
+	cam_req_mgr_workq_destroy(&ope_hw_mgr->msg_work);
+msg_work_failed:
+	cam_req_mgr_workq_destroy(&ope_hw_mgr->cmd_work);
+cmd_work_failed:
+	return rc;
+}
+
+
+int cam_ope_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+	int *iommu_hdl)
+{
+	int i, rc = 0, j;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+	struct cam_iommu_handle cdm_handles;
+
+	if (!of_node || !hw_mgr_hdl) {
+		CAM_ERR(CAM_OPE, "Invalid args of_node %pK hw_mgr %pK",
+			of_node, hw_mgr_hdl);
+		return -EINVAL;
+	}
+	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+
+	ope_hw_mgr = kzalloc(sizeof(struct cam_ope_hw_mgr), GFP_KERNEL);
+	if (!ope_hw_mgr) {
+		CAM_ERR(CAM_OPE, "Unable to allocate mem for: size = %d",
+			sizeof(struct cam_ope_hw_mgr));
+		return -ENOMEM;
+	}
+
+	hw_mgr_intf->hw_mgr_priv = ope_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_ope_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_ope_mgr_acquire_hw;
+	hw_mgr_intf->hw_release = cam_ope_mgr_release_hw;
+	hw_mgr_intf->hw_start   = NULL;
+	hw_mgr_intf->hw_stop    = NULL;
+	hw_mgr_intf->hw_prepare_update = cam_ope_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config_stream_settings = NULL;
+	hw_mgr_intf->hw_config = cam_ope_mgr_config_hw;
+	hw_mgr_intf->hw_read   = NULL;
+	hw_mgr_intf->hw_write  = NULL;
+	hw_mgr_intf->hw_cmd = NULL;
+	hw_mgr_intf->hw_open = cam_ope_mgr_hw_open_u;
+	hw_mgr_intf->hw_close = cam_ope_mgr_hw_close_u;
+	hw_mgr_intf->hw_flush = cam_ope_mgr_hw_flush;
+
+	ope_hw_mgr->secure_mode = false;
+	mutex_init(&ope_hw_mgr->hw_mgr_mutex);
+	spin_lock_init(&ope_hw_mgr->hw_mgr_lock);
+
+	for (i = 0; i < OPE_CTX_MAX; i++) {
+		ope_hw_mgr->ctx[i].bitmap_size =
+			BITS_TO_LONGS(CAM_CTX_REQ_MAX) *
+			sizeof(long);
+		ope_hw_mgr->ctx[i].bitmap = kzalloc(
+			ope_hw_mgr->ctx[i].bitmap_size, GFP_KERNEL);
+		if (!ope_hw_mgr->ctx[i].bitmap) {
+			CAM_ERR(CAM_OPE, "bitmap allocation failed: size = %d",
+				ope_hw_mgr->ctx[i].bitmap_size);
+			rc = -ENOMEM;
+			goto ope_ctx_bitmap_failed;
+		}
+		ope_hw_mgr->ctx[i].bits = ope_hw_mgr->ctx[i].bitmap_size *
+			BITS_PER_BYTE;
+		mutex_init(&ope_hw_mgr->ctx[i].ctx_mutex);
+	}
+
+	rc = cam_ope_mgr_init_devs(of_node);
+	if (rc)
+		goto dev_init_failed;
+
+	ope_hw_mgr->ctx_bitmap_size =
+		BITS_TO_LONGS(OPE_CTX_MAX) * sizeof(long);
+	ope_hw_mgr->ctx_bitmap = kzalloc(ope_hw_mgr->ctx_bitmap_size,
+		GFP_KERNEL);
+	if (!ope_hw_mgr->ctx_bitmap) {
+		rc = -ENOMEM;
+		goto ctx_bitmap_alloc_failed;
+	}
+
+	ope_hw_mgr->ctx_bits = ope_hw_mgr->ctx_bitmap_size *
+		BITS_PER_BYTE;
+
+	rc = cam_smmu_get_handle("ope", &ope_hw_mgr->iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "get mmu handle failed: %d", rc);
+		goto ope_get_hdl_failed;
+	}
+
+	rc = cam_smmu_get_handle("cam-secure", &ope_hw_mgr->iommu_sec_hdl);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "get secure mmu handle failed: %d", rc);
+		goto secure_hdl_failed;
+	}
+
+	rc = cam_cdm_get_iommu_handle("ope", &cdm_handles);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "ope cdm handle get is failed: %d", rc);
+		goto ope_cdm_hdl_failed;
+	}
+
+	ope_hw_mgr->iommu_cdm_hdl = cdm_handles.non_secure;
+	ope_hw_mgr->iommu_sec_cdm_hdl = cdm_handles.secure;
+	CAM_DBG(CAM_OPE, "iommu hdls %x %x cdm %x %x",
+		ope_hw_mgr->iommu_hdl, ope_hw_mgr->iommu_sec_hdl,
+		ope_hw_mgr->iommu_cdm_hdl,
+		ope_hw_mgr->iommu_sec_cdm_hdl);
+
+	rc = cam_ope_mgr_create_wq();
+	if (rc)
+		goto ope_wq_create_failed;
+
+	if (iommu_hdl)
+		*iommu_hdl = ope_hw_mgr->iommu_hdl;
+
+	return rc;
+
+ope_wq_create_failed:
+	ope_hw_mgr->iommu_cdm_hdl = -1;
+	ope_hw_mgr->iommu_sec_cdm_hdl = -1;
+ope_cdm_hdl_failed:
+	cam_smmu_destroy_handle(ope_hw_mgr->iommu_sec_hdl);
+	ope_hw_mgr->iommu_sec_hdl = -1;
+secure_hdl_failed:
+	cam_smmu_destroy_handle(ope_hw_mgr->iommu_hdl);
+	ope_hw_mgr->iommu_hdl = -1;
+ope_get_hdl_failed:
+	kzfree(ope_hw_mgr->ctx_bitmap);
+	ope_hw_mgr->ctx_bitmap = NULL;
+	ope_hw_mgr->ctx_bitmap_size = 0;
+	ope_hw_mgr->ctx_bits = 0;
+ctx_bitmap_alloc_failed:
+	kzfree(ope_hw_mgr->devices[OPE_DEV_OPE]);
+	ope_hw_mgr->devices[OPE_DEV_OPE] = NULL;
+dev_init_failed:
+ope_ctx_bitmap_failed:
+	mutex_destroy(&ope_hw_mgr->hw_mgr_mutex);
+	for (j = i - 1; j >= 0; j--) {
+		mutex_destroy(&ope_hw_mgr->ctx[j].ctx_mutex);
+		kzfree(ope_hw_mgr->ctx[j].bitmap);
+		ope_hw_mgr->ctx[j].bitmap = NULL;
+		ope_hw_mgr->ctx[j].bitmap_size = 0;
+		ope_hw_mgr->ctx[j].bits = 0;
+	}
+	kzfree(ope_hw_mgr);
+	ope_hw_mgr = NULL;
+
+	return rc;
+}
+

+ 399 - 0
drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.h

@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_OPE_HW_MGR_H
+#define CAM_OPE_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_ope.h>
+#include "ope_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+#include "cam_soc_util.h"
+#include "cam_req_mgr_timer.h"
+#include "cam_context.h"
+#include "ope_hw.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_req_mgr_timer.h"
+
+#define OPE_CTX_MAX               32
+#define CAM_FRAME_CMD_MAX         20
+
+
+#define OPE_WORKQ_NUM_TASK        100
+#define OPE_WORKQ_TASK_CMD_TYPE   1
+#define OPE_WORKQ_TASK_MSG_TYPE   2
+
+#define OPE_PACKET_SIZE           0
+#define OPE_PACKET_TYPE           1
+#define OPE_PACKET_OPCODE         2
+
+#define OPE_PACKET_MAX_CMD_BUFS   4
+
+#define OPE_MAX_OUTPUT_SUPPORTED  8
+#define OPE_MAX_INPUT_SUPPORTED   3
+
+#define OPE_FRAME_PROCESS_SUCCESS 0
+#define OPE_FRAME_PROCESS_FAILURE 1
+
+#define OPE_CTX_STATE_FREE        0
+#define OPE_CTX_STATE_IN_USE      1
+#define OPE_CTX_STATE_ACQUIRED    2
+#define OPE_CTX_STATE_RELEASE     3
+
+#define OPE_CMDS                  OPE_MAX_CMD_BUFS
+#define CAM_MAX_IN_RES            8
+
+#define OPE_MAX_CDM_BLS           16
+
+/**
+ * struct ope_cmd_work_data
+ *
+ * @type:       Type of work data
+ * @data:       Private data
+ * @req_id:     Request Id
+ */
+struct ope_cmd_work_data {
+	uint32_t type;
+	void *data;
+	int64_t req_id;
+};
+
+/**
+ * struct ope_msg_work_data
+ *
+ * @type:       Type of work data
+ * @data:       Private data
+ * @irq_status: IRQ status
+ */
+struct ope_msg_work_data {
+	uint32_t type;
+	void *data;
+	uint32_t irq_status;
+};
+
+/**
+ * struct ope_clk_work_data
+ *
+ * @type: Type of work data
+ * @data: Private data
+ */
+struct ope_clk_work_data {
+	uint32_t type;
+	void *data;
+};
+
+/**
+ * struct cdm_dmi_cmd
+ *
+ * @length:   Number of bytes in LUT
+ * @reserved: reserved bits
+ * @cmd:      Command ID (CDMCmd)
+ * @addr:     Address of the LUT in memory
+ * @DMIAddr:  Address of the target DMI config register
+ * @DMISel:   DMI identifier
+ */
+struct cdm_dmi_cmd {
+	unsigned int length   : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+	unsigned int addr;
+	unsigned int DMIAddr  : 24;
+	unsigned int DMISel   : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct ope_debug_buffer
+ *
+ * @cpu_addr:         CPU address
+ * @iova_addr:        IOVA address
+ * @len:              Buffer length
+ * @size:             Buffer Size
+ */
+struct ope_debug_buffer {
+	uintptr_t cpu_addr;
+	dma_addr_t iova_addr;
+	size_t len;
+	uint32_t size;
+};
+
+/**
+ * struct ope_kmd_buffer
+ *
+ * @mem_handle:       Memory handle
+ * @cpu_addr:         CPU address
+ * @iova_addr:        IOVA address
+ * @iova_cdm_addr:    CDM IOVA address
+ * @len:              Buffer length
+ * @size:             Buffer Size
+ */
+struct ope_kmd_buffer {
+	uint32_t mem_handle;
+	uintptr_t cpu_addr;
+	dma_addr_t iova_addr;
+	dma_addr_t iova_cdm_addr;
+	size_t len;
+	uint32_t size;
+};
+
+struct ope_stripe_settings {
+	uintptr_t cpu_addr;
+	dma_addr_t iova_addr;
+	size_t len;
+	uint32_t size;
+	uint32_t buf_type;
+	uint32_t type_buffered;
+};
+
+/**
+ * struct ope_pass_settings
+ *
+ * @cpu_addr:         CPU address
+ * @iova_addr:        IOVA address
+ * @len:              Buffer length
+ * @size:             Buffer Size
+ * @idx:              Pass Index
+ * @buf_type:         Direct/Indirect type
+ * @type_buffered:    SB/DB types
+ */
+struct ope_pass_settings {
+	uintptr_t cpu_addr;
+	dma_addr_t iova_addr;
+	size_t len;
+	uint32_t size;
+	uint32_t idx;
+	uint32_t buf_type;
+	uint32_t type_buffered;
+};
+
+/**
+ * struct ope_frame_settings
+ *
+ * @cpu_addr:         CPU address
+ * @iova_addr:        IOVA address
+ * @offset:           offset
+ * @len:              Buffer length
+ * @size:             Buffer Size
+ * @buf_type:         Direct/Indirect type
+ * @type_buffered:    SB/DB types
+ * @prefecth_disable: Disable prefetch
+ */
+struct ope_frame_settings {
+	uintptr_t cpu_addr;
+	dma_addr_t iova_addr;
+	uint32_t offset;
+	size_t len;
+	uint32_t size;
+	uint32_t buf_type;
+	uint32_t type_buffered;
+	uint32_t prefecth_disable;
+};
+
+/**
+ * struct ope_stripe_io
+ *
+ * @format:            Stripe format
+ * @s_location:        Stripe location
+ * @cpu_addr:          Stripe CPU address
+ * @iova_addr:         Stripe IOVA address
+ * @width:             Stripe width
+ * @height:            Stripe height
+ * @stride:            Stripe stride
+ * @unpack_format:     Unpack format
+ * @pack_format:       Packing format
+ * @alignment:         Stripe alignment
+ * @offset:            Stripe offset
+ * @x_init:            X_init
+ * @subsample_period:  Subsample period
+ * @subsample_pattern: Subsample pattern
+ * @len:               Stripe buffer length
+ * @disable_bus:       disable bus for the stripe
+ */
+struct ope_stripe_io {
+	uint32_t format;
+	uint32_t s_location;
+	uintptr_t cpu_addr;
+	dma_addr_t iova_addr;
+	uint32_t width;
+	uint32_t height;
+	uint32_t stride;
+	uint32_t unpack_format;
+	uint32_t pack_format;
+	uint32_t alignment;
+	uint32_t offset;
+	uint32_t x_init;
+	uint32_t subsample_period;
+	uint32_t subsample_pattern;
+	size_t len;
+	uint32_t disable_bus;
+};
+
+/**
+ * struct ope_io_buf
+ *
+ * @direction:     Direction of a buffer
+ * @resource_type: Resource type of IO Buffer
+ * @format:        Format
+ * @fence:         Fence
+ * @num_planes:    Number of planes
+ * @num_stripes:   Number of stripes
+ * @s_io:          Stripe info
+ */
+struct ope_io_buf {
+	uint32_t direction;
+	uint32_t resource_type;
+	uint32_t format;
+	uint32_t fence;
+	uint32_t num_planes;
+	uint32_t num_stripes[OPE_MAX_PLANES];
+	struct ope_stripe_io s_io[OPE_MAX_PLANES][OPE_MAX_STRIPES];
+};
+
+/**
+ * struct cam_ope_request
+ *
+ * @request_id:          Request Id
+ * @req_idx:             Index in request list
+ * @state:               Request state
+ * @num_batch:           Number of batches
+ * @num_cmd_bufs:        Number of command buffers
+ * @num_frame_bufs:      Number of frame buffers
+ * @num_pass_bufs:       Number of pass Buffers
+ * @num_stripes:         Number of Stripes
+ * @num_io_bufs:         Number of IO Buffers
+ * @in_resource:         Input resource
+ * @num_stripe_cmd_bufs: Command buffers per stripe
+ * @ope_kmd_buf:         KMD buffer for OPE programming
+ * @ope_debug_buf:       Debug buffer
+ * @io_buf:              IO config info of a request
+ * @cdm_cmd:             CDM command for OPE CDM
+ */
+struct cam_ope_request {
+	uint64_t request_id;
+	uint32_t req_idx;
+	uint32_t state;
+	uint32_t num_batch;
+	uint32_t num_cmd_bufs;
+	uint32_t num_frame_bufs;
+	uint32_t num_pass_bufs;
+	uint32_t num_stripes[OPE_MAX_BATCH_SIZE];
+	uint32_t num_io_bufs[OPE_MAX_BATCH_SIZE];
+	uint32_t in_resource;
+	uint8_t num_stripe_cmd_bufs[OPE_MAX_BATCH_SIZE][OPE_MAX_STRIPES];
+	struct ope_kmd_buffer ope_kmd_buf;
+	struct ope_debug_buffer ope_debug_buf;
+	struct ope_io_buf io_buf[OPE_MAX_BATCH_SIZE][OPE_MAX_IO_BUFS];
+	struct cam_cdm_bl_request *cdm_cmd;
+};
+
+/**
+ * struct cam_ope_cdm
+ *
+ * @cdm_handle: OPE CDM Handle
+ * @cdm_ops:    OPE CDM Operations
+ */
+struct cam_ope_cdm {
+	uint32_t cdm_handle;
+	struct cam_cdm_utils_ops *cdm_ops;
+};
+
+/**
+ * struct cam_ope_ctx
+ *
+ * @context_priv:    Private data of context
+ * @bitmap:          Context bit map
+ * @bitmap_size:     Context bit map size
+ * @bits:            Context bit map bits
+ * @ctx_id:          Context ID
+ * @ctx_state:       State of a context
+ * @req_cnt:         Requests count
+ * @ctx_mutex:       Mutex for context
+ * @acquire_dev_cmd: Cam acquire command
+ * @ope_acquire:     OPE acquire command
+ * @ctxt_event_cb:   Callback of a context
+ * @req_list:        Request List
+ * @ope_cdm:         OPE CDM info
+ * @req_watch_dog:   Watchdog for requests
+ */
+struct cam_ope_ctx {
+	void *context_priv;
+	size_t bitmap_size;
+	void *bitmap;
+	size_t bits;
+	uint32_t ctx_id;
+	uint32_t ctx_state;
+	uint32_t req_cnt;
+	struct mutex ctx_mutex;
+	struct cam_acquire_dev_cmd acquire_dev_cmd;
+	struct ope_acquire_dev_info ope_acquire;
+	cam_hw_event_cb_func ctxt_event_cb;
+	struct cam_ope_request *req_list[CAM_CTX_REQ_MAX];
+	struct cam_ope_cdm ope_cdm;
+	struct cam_req_mgr_timer *req_watch_dog;
+};
+
+/**
+ * struct cam_ope_hw_mgr
+ *
+ * @open_cnt:          OPE device open count
+ * @ope_ctx_cnt:       Open context count
+ * @hw_mgr_mutex:      Mutex for HW manager
+ * @hw_mgr_lock:       Spinlock for HW manager
+ * @hfi_en:            Flag for HFI
+ * @iommu_hdl:         OPE Handle
+ * @iommu_sec_hdl:     OPE Handle for secure
+ * @iommu_cdm_hdl:     CDM Handle
+ * @iommu_sec_cdm_hdl: CDM Handle for secure
+ * @num_ope:           Number of OPE
+ * @secure_mode:       Mode of OPE operation
+ * @ctx_bitmap:        Context bit map
+ * @ctx_bitmap_size:   Context bit map size
+ * @ctx_bits:          Context bit map bits
+ * @ctx:               OPE context
+ * @devices:           OPE devices
+ * @ope_caps:          OPE capabilities
+ * @cmd_work:          Command work
+ * @msg_work:          Message work
+ * @timer_work:        Timer work
+ * @cmd_work_data:     Command work data
+ * @msg_work_data:     Message work data
+ * @timer_work_data:   Timer work data
+ * @ope_dev_intf:      OPE device interface
+ * @cdm_reg_map:       OPE CDM register map
+ */
+struct cam_ope_hw_mgr {
+	int32_t             open_cnt;
+	uint32_t            ope_ctx_cnt;
+	struct mutex        hw_mgr_mutex;
+	spinlock_t          hw_mgr_lock;
+	bool                hfi_en;
+	int32_t             iommu_hdl;
+	int32_t             iommu_sec_hdl;
+	int32_t             iommu_cdm_hdl;
+	int32_t             iommu_sec_cdm_hdl;
+	uint32_t            num_ope;
+	bool                secure_mode;
+	void *ctx_bitmap;
+	size_t ctx_bitmap_size;
+	size_t ctx_bits;
+	struct cam_ope_ctx  ctx[OPE_CTX_MAX];
+	struct cam_hw_intf  **devices[OPE_DEV_MAX];
+	struct ope_query_cap_cmd ope_caps;
+
+	struct cam_req_mgr_core_workq *cmd_work;
+	struct cam_req_mgr_core_workq *msg_work;
+	struct cam_req_mgr_core_workq *timer_work;
+	struct ope_cmd_work_data *cmd_work_data;
+	struct ope_msg_work_data *msg_work_data;
+	struct ope_clk_work_data *timer_work_data;
+	struct cam_hw_intf *ope_dev_intf[OPE_DEV_MAX];
+	struct cam_soc_reg_map *cdm_reg_map[OPE_DEV_MAX][OPE_BASE_MAX];
+};
+
+#endif /* CAM_OPE_HW_MGR_H */

+ 16 - 0
drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr_intf.h

@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_OPE_HW_MGR_INTF_H
+#define CAM_OPE_HW_MGR_INTF_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_ope.h>
+
+int cam_ope_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+	int *iommu_hdl);
+
+#endif /* CAM_OPE_HW_MGR_INTF_H */

+ 693 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.c

@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include <media/cam_ope.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "ope_core.h"
+#include "ope_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+#include "ope_hw.h"
+#include "ope_dev_intf.h"
+#include "ope_bus_rd.h"
+
+static struct ope_bus_rd *bus_rd;
+
+static int cam_ope_bus_rd_in_port_idx(uint32_t input_port_id)
+{
+	int i;
+
+	for (i = 0; i < OPE_IN_RES_MAX; i++)
+		if (bus_rd->in_port_to_rm[i].input_port_id ==
+			input_port_id)
+			return i;
+
+	return -EINVAL;
+}
+
+static int cam_ope_bus_rd_combo_idx(uint32_t format)
+{
+	int rc = -EINVAL;
+
+	switch (format) {
+	case CAM_FORMAT_YUV422:
+	case CAM_FORMAT_NV21:
+	case CAM_FORMAT_NV12:
+		rc = BUS_RD_YUV;
+		break;
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+	case CAM_FORMAT_MIPI_RAW_14:
+	case CAM_FORMAT_MIPI_RAW_16:
+	case CAM_FORMAT_MIPI_RAW_20:
+	case CAM_FORMAT_QTI_RAW_8:
+	case CAM_FORMAT_QTI_RAW_10:
+	case CAM_FORMAT_QTI_RAW_12:
+	case CAM_FORMAT_QTI_RAW_14:
+	case CAM_FORMAT_PLAIN8:
+	case CAM_FORMAT_PLAIN16_8:
+	case CAM_FORMAT_PLAIN16_10:
+	case CAM_FORMAT_PLAIN16_12:
+	case CAM_FORMAT_PLAIN16_14:
+	case CAM_FORMAT_PLAIN16_16:
+	case CAM_FORMAT_PLAIN32_20:
+	case CAM_FORMAT_PLAIN64:
+	case CAM_FORMAT_PLAIN128:
+		rc = BUS_RD_BAYER;
+		break;
+	default:
+		break;
+		}
+
+	CAM_DBG(CAM_OPE, "Input format = %u rc = %d",
+		format, rc);
+	return rc;
+}
+
+static uint32_t *cam_ope_bus_rd_update(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, uint32_t *kmd_buf, int batch_idx,
+	int io_idx, struct cam_ope_dev_prepare_req *prepare)
+{
+	int k, l, m;
+	uint32_t idx;
+	int32_t combo_idx;
+	uint32_t req_idx, count = 0, temp;
+	uint32_t temp_reg[128] = {0};
+	uint32_t rm_id, header_size;
+	uint32_t rsc_type;
+	struct cam_hw_prepare_update_args *prepare_args;
+	struct cam_ope_ctx *ctx_data;
+	struct cam_ope_request *ope_request;
+	struct ope_io_buf *io_buf;
+	struct ope_stripe_io *stripe_io;
+	struct ope_bus_rd_ctx *bus_rd_ctx;
+	struct cam_ope_bus_rd_reg *rd_reg;
+	struct cam_ope_bus_rd_client_reg *rd_reg_client;
+	struct cam_ope_bus_rd_reg_val *rd_reg_val;
+	struct cam_ope_bus_rd_client_reg_val *rd_res_val_client;
+	struct ope_bus_in_port_to_rm *in_port_to_rm;
+	struct ope_bus_rd_io_port_cdm_info *io_port_cdm;
+	struct cam_cdm_utils_ops *cdm_ops;
+	struct ope_bus_rd_io_port_info *io_port_info;
+
+
+	if (ctx_id < 0 || !prepare) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, prepare);
+		return NULL;
+	}
+
+	if (batch_idx >= OPE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx);
+		return NULL;
+	}
+
+	if (io_idx >= OPE_MAX_IO_BUFS) {
+		CAM_ERR(CAM_OPE, "Invalid IO idx: %d", io_idx);
+		return NULL;
+	}
+
+	prepare_args = prepare->prepare_args;
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	ope_request = ctx_data->req_list[req_idx];
+	CAM_DBG(CAM_OPE, "req_idx = %d req_id = %lld KMDbuf %x offset %d",
+		req_idx, ope_request->request_id,
+		kmd_buf, prepare->kmd_buf_offset);
+	bus_rd_ctx = &bus_rd->bus_rd_ctx[ctx_id];
+	io_port_info = &bus_rd_ctx->io_port_info;
+	rd_reg = ope_hw_info->bus_rd_reg;
+	rd_reg_val = ope_hw_info->bus_rd_reg_val;
+
+	io_buf = &ope_request->io_buf[batch_idx][io_idx];
+
+	CAM_DBG(CAM_OPE, "batch:%d iobuf:%d direction:%d",
+		batch_idx, io_idx, io_buf->direction);
+	io_port_cdm =
+	&bus_rd_ctx->io_port_cdm_batch.io_port_cdm[batch_idx];
+	in_port_to_rm =
+	&bus_rd->in_port_to_rm[io_buf->resource_type - 1];
+	combo_idx = cam_ope_bus_rd_combo_idx(io_buf->format);
+	if (combo_idx < 0) {
+		CAM_ERR(CAM_OPE, "Invalid combo_idx");
+		return NULL;
+	}
+
+	for (k = 0; k < io_buf->num_planes; k++) {
+		for (l = 0; l < io_buf->num_stripes[k]; l++) {
+			stripe_io = &io_buf->s_io[k][l];
+			rsc_type = io_buf->resource_type - 1;
+			/* frame level info */
+			/* stripe level info */
+			rm_id = in_port_to_rm->rm_port_id[combo_idx][k];
+			rd_reg_client = &rd_reg->rd_clients[rm_id];
+			rd_res_val_client = &rd_reg_val->rd_clients[rm_id];
+
+			/* security cfg */
+			temp_reg[count++] = rd_reg->offset +
+				rd_reg->security_cfg;
+			temp_reg[count++] =
+				ctx_data->ope_acquire.secure_mode;
+
+			/* enable client */
+			temp_reg[count++] = rd_reg->offset +
+				rd_reg_client->core_cfg;
+			temp_reg[count++] = 1;
+
+			/* ccif meta data */
+			temp_reg[count++] = rd_reg->offset +
+				rd_reg_client->ccif_meta_data;
+			temp = 0;
+			temp |= stripe_io->s_location &
+				rd_res_val_client->stripe_location_mask;
+			temp |=	(io_port_info->pixel_pattern[rsc_type] &
+				rd_res_val_client->pix_pattern_mask) <<
+				rd_res_val_client->pix_pattern_shift;
+			temp_reg[count++] = temp;
+
+			/* Address of the Image */
+			temp_reg[count++] = rd_reg->offset +
+				rd_reg_client->img_addr;
+			temp_reg[count++] = stripe_io->iova_addr;
+
+			/* Buffer size */
+			temp_reg[count++] = rd_reg->offset +
+				rd_reg_client->img_cfg;
+			temp = 0;
+			temp = stripe_io->height;
+			temp |=
+			(stripe_io->width &
+				rd_res_val_client->img_width_mask) <<
+				rd_res_val_client->img_width_shift;
+			temp_reg[count++] = temp;
+
+			/* stride */
+			temp_reg[count++] = rd_reg->offset +
+				rd_reg_client->stride;
+			temp_reg[count++] = stripe_io->stride;
+
+			/* Unpack cfg : Mode and alignment */
+			temp_reg[count++] = rd_reg->offset +
+				rd_reg_client->unpack_cfg;
+			temp = 0;
+			temp |= (stripe_io->unpack_format &
+				rd_res_val_client->mode_mask) <<
+				rd_res_val_client->mode_shift;
+			temp |= (stripe_io->alignment &
+				rd_res_val_client->alignment_mask) <<
+				rd_res_val_client->alignment_shift;
+			temp_reg[count++] = temp;
+
+			/* latency buffer allocation */
+			temp_reg[count++] = rd_reg->offset +
+				rd_reg_client->latency_buf_allocation;
+			temp_reg[count++] = io_port_info->latency_buf_size;
+
+			header_size = cdm_ops->cdm_get_cmd_header_size(
+				CAM_CDM_CMD_REG_RANDOM);
+			idx = io_port_cdm->num_s_cmd_bufs[l];
+			io_port_cdm->s_cdm_info[l][idx].len = sizeof(temp) *
+				(count + header_size);
+			io_port_cdm->s_cdm_info[l][idx].offset =
+				prepare->kmd_buf_offset;
+			io_port_cdm->s_cdm_info[l][idx].addr = kmd_buf;
+			io_port_cdm->num_s_cmd_bufs[l]++;
+
+			kmd_buf = cdm_ops->cdm_write_regrandom(
+				kmd_buf, count/2, temp_reg);
+			prepare->kmd_buf_offset += ((count + header_size) *
+				sizeof(temp));
+			CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d",
+				batch_idx, io_idx, k, l);
+			for (m = 0; m < count; m++)
+				CAM_DBG(CAM_OPE, "%d:temp:%x",
+					m, temp_reg[m]);
+			CAM_DBG(CAM_OPE, "kmd_buf:%x offset:%d",
+			kmd_buf, prepare->kmd_buf_offset);
+			CAM_DBG(CAM_OPE, "%x count: %d size:%d",
+				 temp_reg, count, header_size);
+			CAM_DBG(CAM_OPE, "RD cmdbufs:%d off:%d",
+			io_port_cdm->num_s_cmd_bufs[l],
+			io_port_cdm->s_cdm_info[l][idx].offset);
+			CAM_DBG(CAM_OPE, "len:%d",
+			io_port_cdm->s_cdm_info[l][idx].len);
+			CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d",
+				batch_idx, io_idx, k, l);
+			count = 0;
+		}
+	}
+
+	return kmd_buf;
+}
+
+static int cam_ope_bus_rd_prepare(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	int i, j;
+	int32_t combo_idx;
+	uint32_t req_idx, count = 0, temp;
+	uint32_t temp_reg[32] = {0};
+	uint32_t header_size;
+	uint32_t *kmd_buf;
+	struct cam_ope_dev_prepare_req *prepare;
+	struct cam_ope_ctx *ctx_data;
+	struct cam_ope_request *ope_request;
+	struct ope_io_buf *io_buf;
+	struct ope_bus_rd_ctx *bus_rd_ctx;
+	struct cam_ope_bus_rd_reg *rd_reg;
+	struct cam_ope_bus_rd_reg_val *rd_reg_val;
+	struct ope_bus_rd_io_port_cdm_batch *io_port_cdm_batch;
+	struct ope_bus_rd_io_port_cdm_info *io_port_cdm;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+	if (ctx_id < 0 || !data) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data);
+		return -EINVAL;
+	}
+	prepare = data;
+
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	ope_request = ctx_data->req_list[req_idx];
+	kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr +
+		prepare->kmd_buf_offset;
+	CAM_DBG(CAM_OPE, "req_idx = %d req_id = %lld",
+		req_idx, ope_request->request_id);
+	CAM_DBG(CAM_OPE, "KMD buf and offset = %x %d",
+		kmd_buf, prepare->kmd_buf_offset);
+	bus_rd_ctx = &bus_rd->bus_rd_ctx[ctx_id];
+	io_port_cdm_batch =
+		&bus_rd_ctx->io_port_cdm_batch;
+	memset(io_port_cdm_batch, 0,
+		sizeof(struct ope_bus_rd_io_port_cdm_batch));
+	rd_reg = ope_hw_info->bus_rd_reg;
+	rd_reg_val = ope_hw_info->bus_rd_reg_val;
+
+	for (i = 0; i < ope_request->num_batch; i++) {
+		for (j = 0; j < ope_request->num_io_bufs[i]; j++) {
+			io_buf = &ope_request->io_buf[i][j];
+			if (io_buf->direction != CAM_BUF_INPUT)
+				continue;
+
+			CAM_DBG(CAM_OPE, "batch:%d iobuf:%d direction:%d",
+				i, j, io_buf->direction);
+			io_port_cdm =
+			&bus_rd_ctx->io_port_cdm_batch.io_port_cdm[i];
+
+			combo_idx = cam_ope_bus_rd_combo_idx(io_buf->format);
+			if (combo_idx < 0) {
+				CAM_ERR(CAM_OPE, "Invalid combo_idx");
+				return combo_idx;
+			}
+
+			kmd_buf = cam_ope_bus_rd_update(ope_hw_info,
+				ctx_id, kmd_buf, i, j, prepare);
+			if (!kmd_buf) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	if (!io_port_cdm) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Go command */
+	count = 0;
+	temp_reg[count++] = rd_reg->offset +
+		rd_reg->input_if_cmd;
+	temp = 0;
+	temp |= rd_reg_val->go_cmd;
+	temp_reg[count++] = temp;
+
+	header_size =
+	cdm_ops->cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+	io_port_cdm->go_cmd_addr = kmd_buf;
+	io_port_cdm->go_cmd_len =
+		sizeof(temp) * (count + header_size);
+	io_port_cdm->go_cmd_offset =
+		prepare->kmd_buf_offset;
+	kmd_buf = cdm_ops->cdm_write_regrandom(
+		kmd_buf, count/2, temp_reg);
+	prepare->kmd_buf_offset +=
+		((count + header_size) * sizeof(temp));
+	CAM_DBG(CAM_OPE, "kmd_buf:%x,offset:%d",
+		kmd_buf, prepare->kmd_buf_offset);
+	CAM_DBG(CAM_OPE, "t_reg:%xcount: %d size:%d",
+		 temp_reg, count, header_size);
+	prepare->rd_cdm_batch = &bus_rd_ctx->io_port_cdm_batch;
+
+end:
+	return rc;
+}
+
+static int cam_ope_bus_rd_release(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0, i;
+	struct ope_acquire_dev_info *in_acquire;
+	struct ope_bus_rd_ctx *bus_rd_ctx;
+
+	if (ctx_id < 0) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	in_acquire = bus_rd->bus_rd_ctx[ctx_id].ope_acquire;
+	bus_rd->bus_rd_ctx[ctx_id].ope_acquire = NULL;
+	bus_rd_ctx = &bus_rd->bus_rd_ctx[ctx_id];
+	bus_rd_ctx->num_in_ports = 0;
+
+	for (i = 0; i < bus_rd_ctx->num_in_ports; i++) {
+		bus_rd_ctx->io_port_info.input_port_id[i] = 0;
+		bus_rd_ctx->io_port_info.input_format_type[i - 1] = 0;
+		bus_rd_ctx->io_port_info.pixel_pattern[i - 1] = 0;
+	}
+
+	return rc;
+}
+
+static int cam_ope_bus_rd_acquire(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0, i;
+	struct ope_acquire_dev_info *in_acquire;
+	struct ope_bus_rd_ctx *bus_rd_ctx;
+	struct ope_bus_in_port_to_rm *in_port_to_rm;
+	struct cam_ope_bus_rd_reg_val *bus_rd_reg_val;
+	int combo_idx;
+	int in_port_idx;
+
+
+	if (ctx_id < 0 || !data || !ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d %x %x",
+			ctx_id, data, ope_hw_info);
+		return -EINVAL;
+	}
+
+	bus_rd->bus_rd_ctx[ctx_id].ope_acquire = data;
+	in_acquire = data;
+	bus_rd_ctx = &bus_rd->bus_rd_ctx[ctx_id];
+	bus_rd_ctx->num_in_ports = in_acquire->num_in_res;
+	bus_rd_ctx->security_flag = in_acquire->secure_mode;
+	bus_rd_reg_val = ope_hw_info->bus_rd_reg_val;
+
+	for (i = 0; i < in_acquire->num_in_res; i++) {
+		if (!in_acquire->in_res[i].width)
+			continue;
+
+		CAM_DBG(CAM_OPE, "i = %d format = %u width = %x height = %x",
+			i, in_acquire->in_res[i].format,
+			in_acquire->in_res[i].width,
+			in_acquire->in_res[i].height);
+		CAM_DBG(CAM_OPE, "pix_pattern:%u alignment:%u unpack_format:%u",
+			in_acquire->in_res[i].pixel_pattern,
+			in_acquire->in_res[i].alignment,
+			in_acquire->in_res[i].unpacker_format);
+		CAM_DBG(CAM_OPE, "max_stripe = %u fps = %u",
+			in_acquire->in_res[i].max_stripe_size,
+			in_acquire->in_res[i].fps);
+
+		in_port_idx = cam_ope_bus_rd_in_port_idx(i + 1);
+		if (in_port_idx < 0) {
+			CAM_ERR(CAM_OPE, "Invalid in_port_idx: %d", i + 1);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		in_port_to_rm = &bus_rd->in_port_to_rm[in_port_idx];
+		combo_idx = cam_ope_bus_rd_combo_idx(
+			in_acquire->in_res[i].format);
+		if (combo_idx < 0) {
+			CAM_ERR(CAM_OPE, "Invalid format: %d",
+				in_acquire->in_res[i].format);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (!in_port_to_rm->num_rm[combo_idx]) {
+			CAM_ERR(CAM_OPE, "Invalid format for Input port");
+			rc = -EINVAL;
+			goto end;
+		}
+
+		bus_rd_ctx->io_port_info.input_port_id[i] =
+			in_acquire->in_res[i].res_id;
+		bus_rd_ctx->io_port_info.input_format_type[i] =
+			in_acquire->in_res[i].format;
+		if (in_acquire->in_res[i].pixel_pattern >
+			PIXEL_PATTERN_CRYCBY) {
+			CAM_ERR(CAM_OPE, "Invalid pix pattern = %u",
+				in_acquire->in_res[i].pixel_pattern);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		bus_rd_ctx->io_port_info.pixel_pattern[i] =
+			in_acquire->in_res[i].pixel_pattern;
+		bus_rd_ctx->io_port_info.latency_buf_size =
+			bus_rd_reg_val->latency_buf_size;
+
+		CAM_DBG(CAM_OPE, "i:%d port_id = %u format %u pix_pattern = %u",
+			i, bus_rd_ctx->io_port_info.input_port_id[i],
+			bus_rd_ctx->io_port_info.input_format_type[i],
+			bus_rd_ctx->io_port_info.pixel_pattern[i]);
+		CAM_DBG(CAM_OPE, "latency_buf_size = %u",
+			bus_rd_ctx->io_port_info.latency_buf_size);
+	}
+
+end:
+	return rc;
+}
+
+static int cam_ope_bus_rd_init(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	struct cam_ope_bus_rd_reg_val *bus_rd_reg_val;
+	struct cam_ope_bus_rd_reg *bus_rd_reg;
+	struct cam_ope_dev_init *dev_init = data;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+
+	bus_rd_reg_val = ope_hw_info->bus_rd_reg_val;
+	bus_rd_reg = ope_hw_info->bus_rd_reg;
+	bus_rd_reg->base = dev_init->core_info->ope_hw_info->ope_bus_rd_base;
+
+	/* OPE SW RESET */
+	init_completion(&bus_rd->reset_complete);
+
+	/* enable interrupt mask */
+	cam_io_w_mb(bus_rd_reg_val->irq_mask,
+		ope_hw_info->bus_rd_reg->base + bus_rd_reg->irq_mask);
+
+	cam_io_w_mb(bus_rd_reg_val->sw_reset,
+		ope_hw_info->bus_rd_reg->base + bus_rd_reg->sw_reset);
+
+	rc = wait_for_completion_timeout(
+		&bus_rd->reset_complete, msecs_to_jiffies(30000));
+
+	if (!rc || rc < 0) {
+		CAM_ERR(CAM_OPE, "reset error result = %d", rc);
+		if (!rc)
+			rc = -ETIMEDOUT;
+	} else {
+		rc = 0;
+	}
+
+	cam_io_w_mb(bus_rd_reg_val->irq_mask,
+		ope_hw_info->bus_rd_reg->base + bus_rd_reg->irq_mask);
+
+	return rc;
+}
+
+static int cam_ope_bus_rd_probe(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0, i, j, combo_idx, k;
+	struct cam_ope_bus_rd_reg_val *bus_rd_reg_val;
+	struct cam_ope_bus_rd_reg *bus_rd_reg;
+	struct ope_bus_in_port_to_rm *in_port_to_rm;
+	uint32_t input_port_idx;
+	uint32_t rm_idx;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+	bus_rd = kzalloc(sizeof(struct ope_bus_rd), GFP_KERNEL);
+	if (!bus_rd) {
+		CAM_ERR(CAM_OPE, "Out of memory");
+		return -ENOMEM;
+	}
+	bus_rd->ope_hw_info = ope_hw_info;
+	bus_rd_reg_val = ope_hw_info->bus_rd_reg_val;
+	bus_rd_reg = ope_hw_info->bus_rd_reg;
+
+	for (i = 0; i < bus_rd_reg_val->num_clients; i++) {
+		input_port_idx =
+			bus_rd_reg_val->rd_clients[i].input_port_id - 1;
+		in_port_to_rm = &bus_rd->in_port_to_rm[input_port_idx];
+		if (bus_rd_reg_val->rd_clients[i].format_type &
+			BUS_RD_COMBO_BAYER_MASK) {
+			combo_idx = BUS_RD_BAYER;
+			rm_idx = in_port_to_rm->num_rm[combo_idx];
+			in_port_to_rm->input_port_id =
+				bus_rd_reg_val->rd_clients[i].input_port_id;
+			in_port_to_rm->rm_port_id[combo_idx][rm_idx] =
+				bus_rd_reg_val->rd_clients[i].rm_port_id;
+			if (!in_port_to_rm->num_rm[combo_idx])
+				in_port_to_rm->num_combos++;
+			in_port_to_rm->num_rm[combo_idx]++;
+		}
+		if (bus_rd_reg_val->rd_clients[i].format_type &
+			BUS_RD_COMBO_YUV_MASK) {
+			combo_idx = BUS_RD_YUV;
+			rm_idx = in_port_to_rm->num_rm[combo_idx];
+			in_port_to_rm->input_port_id =
+				bus_rd_reg_val->rd_clients[i].input_port_id;
+			in_port_to_rm->rm_port_id[combo_idx][rm_idx] =
+				bus_rd_reg_val->rd_clients[i].rm_port_id;
+			if (!in_port_to_rm->num_rm[combo_idx])
+				in_port_to_rm->num_combos++;
+			in_port_to_rm->num_rm[combo_idx]++;
+		}
+	}
+
+	for (i = 0; i < OPE_IN_RES_MAX; i++) {
+		in_port_to_rm = &bus_rd->in_port_to_rm[i];
+		CAM_DBG(CAM_OPE, "input port id = %d num_combos = %d",
+			in_port_to_rm->input_port_id,
+			in_port_to_rm->num_combos);
+		for (j = 0; j < in_port_to_rm->num_combos; j++) {
+			CAM_DBG(CAM_OPE, "combo idx = %d num_rms = %d",
+				j, in_port_to_rm->num_rm[j]);
+			for (k = 0; k < in_port_to_rm->num_rm[j]; k++) {
+				CAM_DBG(CAM_OPE, "rm port id = %d",
+					in_port_to_rm->rm_port_id[j][k]);
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int cam_ope_bus_rd_isr(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	uint32_t irq_status;
+	struct cam_ope_bus_rd_reg *bus_rd_reg;
+	struct cam_ope_bus_rd_reg_val *bus_rd_reg_val;
+	struct cam_ope_irq_data *irq_data = data;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+
+	bus_rd_reg = ope_hw_info->bus_rd_reg;
+	bus_rd_reg_val = ope_hw_info->bus_rd_reg_val;
+
+	/* Read and Clear Top Interrupt status */
+	irq_status = cam_io_r_mb(bus_rd_reg->base + bus_rd_reg->irq_status);
+	cam_io_w_mb(irq_status,
+		bus_rd_reg->base + bus_rd_reg->irq_clear);
+
+	cam_io_w_mb(bus_rd_reg_val->irq_set_clear,
+		bus_rd_reg->base + bus_rd_reg->irq_cmd);
+
+	if (irq_status & bus_rd_reg_val->rst_done) {
+		complete(&bus_rd->reset_complete);
+		CAM_ERR(CAM_OPE, "ope bus rd reset done");
+	}
+
+	if ((irq_status & bus_rd_reg_val->violation) ==
+		bus_rd_reg_val->violation) {
+		irq_data->error = 1;
+		CAM_ERR(CAM_OPE, "ope bus rd CCIF vioalation");
+	}
+
+	return rc;
+}
+
+int cam_ope_bus_rd_process(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data)
+{
+	int rc = -EINVAL;
+
+	switch (cmd_id) {
+	case OPE_HW_PROBE:
+		CAM_DBG(CAM_OPE, "OPE_HW_PROBE: E");
+		rc = cam_ope_bus_rd_probe(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_PROBE: X");
+		break;
+	case OPE_HW_INIT:
+		CAM_DBG(CAM_OPE, "OPE_HW_INIT: E");
+		rc = cam_ope_bus_rd_init(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_INIT: X");
+		break;
+	case OPE_HW_ACQUIRE:
+		CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: E");
+		rc = cam_ope_bus_rd_acquire(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: X");
+		break;
+	case OPE_HW_RELEASE:
+		CAM_DBG(CAM_OPE, "OPE_HW_RELEASE: E");
+		rc = cam_ope_bus_rd_release(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_RELEASE: X");
+		break;
+	case OPE_HW_PREPARE:
+		CAM_DBG(CAM_OPE, "OPE_HW_PREPARE: E");
+		rc = cam_ope_bus_rd_prepare(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_PREPARE: X");
+		break;
+	case OPE_HW_ISR:
+		rc = cam_ope_bus_rd_isr(ope_hw_info, 0, data);
+		break;
+	case OPE_HW_DEINIT:
+	case OPE_HW_START:
+	case OPE_HW_STOP:
+	case OPE_HW_FLUSH:
+	case OPE_HW_CLK_UPDATE:
+	case OPE_HW_BW_UPDATE:
+	case OPE_HW_RESET:
+	case OPE_HW_SET_IRQ_CB:
+		rc = 0;
+		CAM_DBG(CAM_OPE, "Unhandled cmds: %d", cmd_id);
+		break;
+	default:
+		CAM_ERR(CAM_OPE, "Unsupported cmd: %d", cmd_id);
+		break;
+	}
+
+	return rc;
+}
+

+ 139 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.h

@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef OPE_BUS_RD_H
+#define OPE_BUS_RD_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_ope.h>
+#include "ope_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_soc_util.h"
+#include "cam_context.h"
+#include "cam_ope_context.h"
+#include "cam_ope_hw_mgr.h"
+
+
+/**
+ * struct ope_bus_rd_cdm_info
+ *
+ * @offset: Offset
+ * @addr:   Address
+ * @len:    Length
+ */
+struct ope_bus_rd_cdm_info {
+	uint32_t offset;
+	uint32_t *addr;
+	uint32_t len;
+};
+
+/**
+ * struct ope_bus_rd_io_port_cdm_info
+ *
+ * @num_frames_cmds: Number of frame commands
+ * @f_cdm_info:      Frame cdm info
+ * @num_stripes:     Number of stripes
+ * @num_s_cmd_bufs:  Number of stripe commands
+ * @s_cdm_info:      Stripe cdm info
+ * @go_cmd_addr:     GO command address
+ * @go_cmd_len:      GO command length
+ */
+struct ope_bus_rd_io_port_cdm_info {
+	uint32_t num_frames_cmds;
+	struct ope_bus_rd_cdm_info f_cdm_info[MAX_RD_CLIENTS];
+	uint32_t num_stripes;
+	uint32_t num_s_cmd_bufs[OPE_MAX_STRIPES];
+	struct ope_bus_rd_cdm_info s_cdm_info[OPE_MAX_STRIPES][MAX_RD_CLIENTS];
+	uint32_t go_cmd_offset;
+	uint32_t *go_cmd_addr;
+	uint32_t go_cmd_len;
+};
+
+/**
+ * struct ope_bus_rd_io_port_cdm_batch
+ *
+ * num_batch:   Number of batches
+ * io_port_cdm: CDM IO Port Info
+ */
+struct ope_bus_rd_io_port_cdm_batch {
+	uint32_t num_batch;
+	struct ope_bus_rd_io_port_cdm_info io_port_cdm[OPE_MAX_BATCH_SIZE];
+};
+
+/**
+ * struct ope_bus_rd_rm
+ *
+ * @rm_port_id:  RM port ID
+ * @format_type: Format type
+ */
+struct ope_bus_rd_rm {
+	uint32_t rm_port_id;
+	uint32_t format_type;
+};
+
+/**
+ * struct ope_bus_in_port_to_rm
+ *
+ * @input_port_id:  Intput port ID
+ * @num_combos:     Number of combos
+ * @num_rm:         Number of RMs
+ * @rm_port_id:     RM port Id
+ */
+struct ope_bus_in_port_to_rm {
+	uint32_t input_port_id;
+	uint32_t num_combos;
+	uint32_t num_rm[BUS_RD_COMBO_MAX];
+	uint32_t rm_port_id[BUS_RD_COMBO_MAX][MAX_RD_CLIENTS];
+};
+
+/**
+ * struct ope_bus_rd_io_port_info
+ *
+ * @pixel_pattern:      Pixel pattern
+ * @input_port_id:      Port Id
+ * @input_format_type:  Format type
+ * @latency_buf_size:   Latency buffer size
+ */
+struct ope_bus_rd_io_port_info {
+	uint32_t pixel_pattern[OPE_IN_RES_MAX];
+	uint32_t input_port_id[OPE_IN_RES_MAX];
+	uint32_t input_format_type[OPE_IN_RES_MAX];
+	uint32_t latency_buf_size;
+};
+
+/**
+ * struct ope_bus_rd_ctx
+ *
+ * @ope_acquire:       OPE acquire structure
+ * @security_flag:     security flag
+ * @num_in_ports:      Number of in ports
+ * @io_port_info:      IO port info
+ * @io_port_cdm_batch: IO port cdm info
+ */
+struct ope_bus_rd_ctx {
+	struct ope_acquire_dev_info *ope_acquire;
+	bool security_flag;
+	uint32_t num_in_ports;
+	struct ope_bus_rd_io_port_info io_port_info;
+	struct ope_bus_rd_io_port_cdm_batch io_port_cdm_batch;
+};
+
+/**
+ * struct ope_bus_rd
+ *
+ * @ope_hw_info:    OPE hardware info
+ * @in_port_to_rm:  IO port to RM mapping
+ * @bus_rd_ctx:     RM context
+ */
+struct ope_bus_rd {
+	struct ope_hw *ope_hw_info;
+	struct ope_bus_in_port_to_rm in_port_to_rm[OPE_IN_RES_MAX];
+	struct ope_bus_rd_ctx bus_rd_ctx[OPE_CTX_MAX];
+	struct completion reset_complete;
+};
+#endif /* OPE_BUS_RD_H */
+

+ 785 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.c

@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include <media/cam_ope.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "ope_core.h"
+#include "ope_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "ope_hw.h"
+#include "ope_dev_intf.h"
+#include "ope_bus_wr.h"
+#include "cam_cdm_util.h"
+
+static struct ope_bus_wr *wr_info;
+
+static int cam_ope_bus_en_port_idx(
+	struct cam_ope_request *ope_request,
+	uint32_t batch_idx,
+	uint32_t output_port_id)
+{
+	int i;
+	struct ope_io_buf *io_buf;
+
+	if (batch_idx >= OPE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ope_request->num_io_bufs[batch_idx]; i++) {
+		io_buf = &ope_request->io_buf[batch_idx][i];
+		if (io_buf->direction != CAM_BUF_OUTPUT)
+			continue;
+		if (io_buf->resource_type == output_port_id)
+			return i;
+	}
+
+	return -EINVAL;
+}
+static int cam_ope_bus_wr_out_port_idx(uint32_t output_port_id)
+{
+	int i;
+
+	for (i = 0; i < OPE_OUT_RES_MAX; i++)
+		if (wr_info->out_port_to_wm[i].output_port_id == output_port_id)
+			return i;
+
+	return -EINVAL;
+}
+
+
+static int cam_ope_bus_wr_subsample(
+	struct cam_ope_ctx *ctx_data,
+	struct ope_hw *ope_hw_info,
+	struct cam_ope_bus_wr_client_reg *wr_reg_client,
+	struct ope_io_buf *io_buf,
+	uint32_t *temp_reg, uint32_t count,
+	int plane_idx, int stripe_idx)
+{
+	int k, l;
+	struct cam_ope_bus_wr_reg *wr_reg;
+	struct cam_ope_bus_wr_reg_val *wr_reg_val;
+
+	wr_reg = ope_hw_info->bus_wr_reg;
+	wr_reg_val = ope_hw_info->bus_wr_reg_val;
+
+	if (plane_idx >= OPE_MAX_PLANES) {
+		CAM_ERR(CAM_OPE, "Invalid plane idx: %d", plane_idx);
+		return count;
+	}
+	k = plane_idx;
+	l = stripe_idx;
+
+	/* subsample period and pattern */
+	if ((ctx_data->ope_acquire.dev_type ==
+		OPE_DEV_TYPE_OPE_RT) && l == 0) {
+		temp_reg[count++] = wr_reg->offset +
+			wr_reg_client->subsample_period;
+		temp_reg[count++] = io_buf->num_stripes[k];
+
+		temp_reg[count++] = wr_reg->offset +
+			wr_reg_client->subsample_pattern;
+		temp_reg[count++] = 1 <<
+			(io_buf->num_stripes[k] - 1);
+	} else if ((ctx_data->ope_acquire.dev_type ==
+		OPE_DEV_TYPE_OPE_NRT) &&
+		((l %
+		ctx_data->ope_acquire.nrt_stripes_for_arb) ==
+		0)) {
+		if (io_buf->num_stripes[k] >=
+			(l +
+			ctx_data->ope_acquire.nrt_stripes_for_arb)){
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->subsample_period;
+			temp_reg[count++] =
+				ctx_data->ope_acquire.nrt_stripes_for_arb;
+
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->subsample_pattern;
+			temp_reg[count++] = 1 <<
+				(ctx_data->ope_acquire.nrt_stripes_for_arb -
+				1);
+		} else {
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->subsample_period;
+			temp_reg[count++] = io_buf->num_stripes[k] - l;
+
+			/* subsample pattern */
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->subsample_pattern;
+			temp_reg[count++] = 1 << (io_buf->num_stripes[k] -
+				l - 1);
+		}
+	}
+	return count;
+}
+
+static int cam_ope_bus_wr_release(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0, i;
+	struct ope_acquire_dev_info *in_acquire;
+	struct ope_bus_wr_ctx *bus_wr_ctx;
+
+	if (ctx_id < 0) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	in_acquire = wr_info->bus_wr_ctx[ctx_id].ope_acquire;
+	wr_info->bus_wr_ctx[ctx_id].ope_acquire = NULL;
+	bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id];
+	bus_wr_ctx->num_out_ports = 0;
+
+	for (i = 0; i < bus_wr_ctx->num_out_ports; i++) {
+		bus_wr_ctx->io_port_info.output_port_id[i] = 0;
+		bus_wr_ctx->io_port_info.output_format_type[i - 1] = 0;
+		bus_wr_ctx->io_port_info.pixel_pattern[i - 1] = 0;
+	}
+
+	return rc;
+}
+
+static uint32_t *cam_ope_bus_wr_update(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, struct cam_ope_dev_prepare_req *prepare,
+	int batch_idx, int io_idx,
+	uint32_t *kmd_buf, uint32_t *num_stripes)
+{
+	int k, l, out_port_idx;
+	uint32_t idx;
+	uint32_t num_wm_ports;
+	uint32_t comb_idx;
+	uint32_t req_idx;
+	uint32_t temp_reg[128];
+	uint32_t count = 0;
+	uint32_t temp = 0;
+	uint32_t wm_port_id;
+	uint32_t header_size;
+	struct cam_hw_prepare_update_args *prepare_args;
+	struct cam_ope_ctx *ctx_data;
+	struct cam_ope_request *ope_request;
+	struct ope_io_buf *io_buf;
+	struct ope_stripe_io *stripe_io;
+	struct ope_bus_wr_ctx *bus_wr_ctx;
+	struct cam_ope_bus_wr_reg *wr_reg;
+	struct cam_ope_bus_wr_client_reg *wr_reg_client;
+	struct cam_ope_bus_wr_reg_val *wr_reg_val;
+	struct cam_ope_bus_wr_client_reg_val *wr_res_val_client;
+	struct ope_bus_out_port_to_wm *out_port_to_wm;
+	struct ope_bus_wr_io_port_cdm_batch *io_port_cdm_batch;
+	struct ope_bus_wr_io_port_cdm_info *io_port_cdm;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+
+	if (ctx_id < 0 || !prepare) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, prepare);
+		return NULL;
+	}
+
+	if (batch_idx >= OPE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx);
+		return NULL;
+	}
+
+	if (io_idx >= OPE_MAX_IO_BUFS) {
+		CAM_ERR(CAM_OPE, "Invalid IO idx: %d", io_idx);
+		return NULL;
+	}
+
+	prepare_args = prepare->prepare_args;
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	ope_request = ctx_data->req_list[req_idx];
+	bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id];
+	io_port_cdm_batch = &bus_wr_ctx->io_port_cdm_batch;
+	wr_reg = ope_hw_info->bus_wr_reg;
+	wr_reg_val = ope_hw_info->bus_wr_reg_val;
+
+	CAM_DBG(CAM_OPE, "kmd_buf = %x req_idx = %d req_id = %lld offset = %d",
+		kmd_buf, req_idx, ope_request->request_id,
+		prepare->kmd_buf_offset);
+
+	io_buf = &ope_request->io_buf[batch_idx][io_idx];
+	CAM_DBG(CAM_OPE, "batch = %d io buf num = %d dir = %d",
+		batch_idx, io_idx, io_buf->direction);
+
+	io_port_cdm =
+		&bus_wr_ctx->io_port_cdm_batch.io_port_cdm[batch_idx];
+	out_port_idx =
+		cam_ope_bus_wr_out_port_idx(io_buf->resource_type);
+	if (out_port_idx < 0) {
+		CAM_ERR(CAM_OPE, "Invalid idx for rsc type: %d",
+			io_buf->resource_type);
+		return NULL;
+	}
+	out_port_to_wm = &wr_info->out_port_to_wm[out_port_idx];
+	comb_idx = BUS_WR_YUV;
+	num_wm_ports = out_port_to_wm->num_wm[comb_idx];
+
+	for (k = 0; k < io_buf->num_planes; k++) {
+		*num_stripes = io_buf->num_stripes[k];
+		for (l = 0; l < io_buf->num_stripes[k]; l++) {
+			stripe_io = &io_buf->s_io[k][l];
+			CAM_DBG(CAM_OPE, "comb_idx = %d p_idx = %d s_idx = %d",
+				comb_idx, k, l);
+			/* frame level info */
+			/* stripe level info */
+			wm_port_id = out_port_to_wm->wm_port_id[comb_idx][k];
+			wr_reg_client = &wr_reg->wr_clients[wm_port_id];
+			wr_res_val_client = &wr_reg_val->wr_clients[wm_port_id];
+
+			/* Core cfg: enable, Mode */
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->core_cfg;
+			temp = 0;
+			if (!stripe_io->disable_bus)
+				temp = wr_res_val_client->core_cfg_en;
+			temp |= ((wr_res_val_client->mode &
+				wr_res_val_client->mode_mask) <<
+				wr_res_val_client->mode_shift);
+			temp_reg[count++] = temp;
+
+			/* Address of the Image */
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->img_addr;
+			temp_reg[count++] = stripe_io->iova_addr;
+
+			/* Buffer size */
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->img_cfg;
+			temp = 0;
+			temp = stripe_io->width;
+			temp |= (stripe_io->height &
+				wr_res_val_client->height_mask) <<
+				wr_res_val_client->height_shift;
+			temp_reg[count++] = temp;
+
+			/* x_init */
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->x_init;
+			temp_reg[count++] = stripe_io->x_init;
+
+			/* stride */
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->stride;
+			temp_reg[count++] = stripe_io->stride;
+
+			/* pack cfg : Format and alignment */
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->pack_cfg;
+			temp = 0;
+			temp |= ((stripe_io->pack_format &
+				wr_res_val_client->format_mask) <<
+				wr_res_val_client->format_shift);
+			temp |= ((stripe_io->alignment &
+				wr_res_val_client->alignment_mask) <<
+				wr_res_val_client->alignment_shift);
+			temp_reg[count++] = temp;
+
+			/* subsample period and pattern */
+			count = cam_ope_bus_wr_subsample(
+					ctx_data, ope_hw_info,
+					wr_reg_client, io_buf,
+					temp_reg, count, k, l);
+
+			header_size = cdm_ops->cdm_get_cmd_header_size(
+				CAM_CDM_CMD_REG_RANDOM);
+			idx = io_port_cdm->num_s_cmd_bufs[l];
+			io_port_cdm->s_cdm_info[l][idx].len =
+				sizeof(temp) * (count + header_size);
+			io_port_cdm->s_cdm_info[l][idx].offset =
+				prepare->kmd_buf_offset;
+			io_port_cdm->s_cdm_info[l][idx].addr = kmd_buf;
+			io_port_cdm->num_s_cmd_bufs[l]++;
+
+			kmd_buf = cdm_ops->cdm_write_regrandom(
+				kmd_buf, count/2, temp_reg);
+			prepare->kmd_buf_offset += ((count + header_size) *
+				sizeof(temp));
+
+			CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d",
+				batch_idx, io_idx, k, l);
+			CAM_DBG(CAM_OPE, "kmdbuf:%x, offset:%d",
+				kmd_buf, prepare->kmd_buf_offset);
+			CAM_DBG(CAM_OPE, "count:%d temp_reg:%x",
+				count, temp_reg, header_size);
+			CAM_DBG(CAM_OPE, "header_size:%d", header_size);
+
+			CAM_DBG(CAM_OPE, "WR cmd bufs = %d",
+				io_port_cdm->num_s_cmd_bufs[l]);
+			CAM_DBG(CAM_OPE, "off:%d len:%d",
+				io_port_cdm->s_cdm_info[l][idx].offset,
+				io_port_cdm->s_cdm_info[l][idx].len);
+			CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d",
+				batch_idx, io_idx, k, l);
+			count = 0;
+		}
+	}
+
+	return kmd_buf;
+}
+
+static uint32_t *cam_ope_bus_wm_disable(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, struct cam_ope_dev_prepare_req *prepare,
+	int batch_idx, int io_idx,
+	uint32_t *kmd_buf, uint32_t num_stripes)
+{
+	int k, l;
+	uint32_t idx;
+	uint32_t num_wm_ports;
+	uint32_t comb_idx;
+	uint32_t req_idx;
+	uint32_t temp_reg[128];
+	uint32_t count = 0;
+	uint32_t temp = 0;
+	uint32_t wm_port_id;
+	uint32_t header_size;
+	struct cam_ope_ctx *ctx_data;
+	struct ope_bus_wr_ctx *bus_wr_ctx;
+	struct cam_ope_bus_wr_reg *wr_reg;
+	struct cam_ope_bus_wr_client_reg *wr_reg_client;
+	struct ope_bus_out_port_to_wm *out_port_to_wm;
+	struct ope_bus_wr_io_port_cdm_batch *io_port_cdm_batch;
+	struct ope_bus_wr_io_port_cdm_info *io_port_cdm;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+
+	if (ctx_id < 0 || !prepare) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, prepare);
+		return NULL;
+	}
+
+	if (batch_idx >= OPE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx);
+		return NULL;
+	}
+
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id];
+	io_port_cdm_batch = &bus_wr_ctx->io_port_cdm_batch;
+	wr_reg = ope_hw_info->bus_wr_reg;
+
+	CAM_DBG(CAM_OPE, "kmd_buf = %x req_idx = %d offset = %d",
+		kmd_buf, req_idx, prepare->kmd_buf_offset);
+
+	io_port_cdm =
+		&bus_wr_ctx->io_port_cdm_batch.io_port_cdm[batch_idx];
+	out_port_to_wm = &wr_info->out_port_to_wm[io_idx];
+	comb_idx = BUS_WR_YUV;
+	num_wm_ports = out_port_to_wm->num_wm[comb_idx];
+
+	for (k = 0; k < num_wm_ports; k++) {
+		for (l = 0; l < num_stripes; l++) {
+			CAM_DBG(CAM_OPE, "comb_idx = %d p_idx = %d s_idx = %d",
+				comb_idx, k, l);
+			/* frame level info */
+			/* stripe level info */
+			wm_port_id = out_port_to_wm->wm_port_id[comb_idx][k];
+			wr_reg_client = &wr_reg->wr_clients[wm_port_id];
+
+			/* Core cfg: enable, Mode */
+			temp_reg[count++] = wr_reg->offset +
+				wr_reg_client->core_cfg;
+			temp_reg[count++] = 0;
+
+			header_size = cdm_ops->cdm_get_cmd_header_size(
+				CAM_CDM_CMD_REG_RANDOM);
+			idx = io_port_cdm->num_s_cmd_bufs[l];
+			io_port_cdm->s_cdm_info[l][idx].len =
+				sizeof(temp) * (count + header_size);
+			io_port_cdm->s_cdm_info[l][idx].offset =
+				prepare->kmd_buf_offset;
+			io_port_cdm->s_cdm_info[l][idx].addr = kmd_buf;
+			io_port_cdm->num_s_cmd_bufs[l]++;
+
+			kmd_buf = cdm_ops->cdm_write_regrandom(
+				kmd_buf, count/2, temp_reg);
+			prepare->kmd_buf_offset += ((count + header_size) *
+				sizeof(temp));
+
+			CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d",
+				batch_idx, io_idx, k, l);
+			CAM_DBG(CAM_OPE, "kmdbuf:%x, offset:%d",
+				kmd_buf, prepare->kmd_buf_offset);
+			CAM_DBG(CAM_OPE, "count:%d temp_reg:%x",
+				count, temp_reg, header_size);
+			CAM_DBG(CAM_OPE, "header_size:%d", header_size);
+
+			CAM_DBG(CAM_OPE, "WR cmd bufs = %d",
+				io_port_cdm->num_s_cmd_bufs[l]);
+			CAM_DBG(CAM_OPE, "off:%d len:%d",
+				io_port_cdm->s_cdm_info[l][idx].offset,
+				io_port_cdm->s_cdm_info[l][idx].len);
+			CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d",
+				batch_idx, io_idx, k, l);
+			count = 0;
+		}
+	}
+
+	prepare->wr_cdm_batch = &bus_wr_ctx->io_port_cdm_batch;
+
+	return kmd_buf;
+}
+
+static int cam_ope_bus_wr_prepare(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	int i, j = 0;
+	uint32_t req_idx;
+	uint32_t *kmd_buf;
+	struct cam_ope_dev_prepare_req *prepare;
+	struct cam_ope_ctx *ctx_data;
+	struct cam_ope_request *ope_request;
+	struct ope_io_buf *io_buf;
+	uint32_t temp;
+	int io_buf_idx;
+	uint32_t num_stripes = 0;
+	struct ope_bus_wr_io_port_cdm_batch *io_port_cdm_batch;
+	struct ope_bus_wr_ctx *bus_wr_ctx;
+
+	if (ctx_id < 0 || !data) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data);
+		return -EINVAL;
+	}
+	prepare = data;
+	ctx_data = prepare->ctx_data;
+	req_idx = prepare->req_idx;
+	bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id];
+
+	ope_request = ctx_data->req_list[req_idx];
+	kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr +
+		(prepare->kmd_buf_offset / sizeof(temp));
+
+
+	CAM_DBG(CAM_OPE, "kmd_buf = %x req_idx = %d req_id = %lld offset = %d",
+		kmd_buf, req_idx, ope_request->request_id,
+		prepare->kmd_buf_offset);
+
+	io_port_cdm_batch = &wr_info->bus_wr_ctx[ctx_id].io_port_cdm_batch;
+	memset(io_port_cdm_batch, 0,
+		sizeof(struct ope_bus_wr_io_port_cdm_batch));
+
+	for (i = 0; i < ope_request->num_batch; i++) {
+		for (j = 0; j < ope_request->num_io_bufs[i]; j++) {
+			io_buf = &ope_request->io_buf[i][j];
+			CAM_DBG(CAM_OPE, "batch = %d io buf num = %d dir = %d",
+				i, j, io_buf->direction);
+			if (io_buf->direction != CAM_BUF_OUTPUT)
+				continue;
+
+			kmd_buf = cam_ope_bus_wr_update(ope_hw_info,
+				ctx_id, prepare, i, j,
+				kmd_buf, &num_stripes);
+			if (!kmd_buf) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	/* Disable WMs which are not enabled */
+	for (i = 0; i < ope_request->num_batch; i++) {
+		for (j = OPE_OUT_RES_VIDEO; j <= OPE_OUT_RES_MAX; j++) {
+			io_buf_idx = cam_ope_bus_en_port_idx(ope_request, i, j);
+			if (io_buf_idx >= 0)
+				continue;
+
+			io_buf_idx = cam_ope_bus_wr_out_port_idx(j);
+			if (io_buf_idx < 0) {
+				CAM_ERR(CAM_OPE, "Invalid idx for rsc type:%d",
+					j);
+				return io_buf_idx;
+			}
+			kmd_buf = cam_ope_bus_wm_disable(ope_hw_info,
+				ctx_id, prepare, i, io_buf_idx,
+				kmd_buf, num_stripes);
+		}
+	}
+	prepare->wr_cdm_batch = &bus_wr_ctx->io_port_cdm_batch;
+
+end:
+	return rc;
+}
+
+static int cam_ope_bus_wr_acquire(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0, i;
+	struct ope_acquire_dev_info *in_acquire;
+	struct ope_bus_wr_ctx *bus_wr_ctx;
+	struct ope_bus_out_port_to_wm *out_port_to_wr;
+	int combo_idx;
+	int out_port_idx;
+
+	if (ctx_id < 0 || !data) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data);
+		return -EINVAL;
+	}
+
+	wr_info->bus_wr_ctx[ctx_id].ope_acquire = data;
+	in_acquire = data;
+	bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id];
+	bus_wr_ctx->num_out_ports = in_acquire->num_out_res;
+	bus_wr_ctx->security_flag = in_acquire->secure_mode;
+
+	for (i = 0; i < in_acquire->num_out_res; i++) {
+		if (!in_acquire->out_res[i].width)
+			continue;
+
+		CAM_DBG(CAM_OPE, "i = %d format = %u width = %x height = %x",
+			i, in_acquire->out_res[i].format,
+			in_acquire->out_res[i].width,
+			in_acquire->out_res[i].height);
+		CAM_DBG(CAM_OPE, "pix_pattern:%u alignment:%u packer_format:%u",
+			in_acquire->out_res[i].pixel_pattern,
+			in_acquire->out_res[i].alignment,
+			in_acquire->out_res[i].packer_format);
+		CAM_DBG(CAM_OPE, "subsample_period = %u subsample_pattern = %u",
+			in_acquire->out_res[i].subsample_period,
+			in_acquire->out_res[i].subsample_pattern);
+
+		out_port_idx =
+		cam_ope_bus_wr_out_port_idx(in_acquire->out_res[i].res_id);
+		if (out_port_idx < 0) {
+			CAM_DBG(CAM_OPE, "Invalid in_port_idx: %d",
+				in_acquire->out_res[i].res_id);
+			rc = -EINVAL;
+			goto end;
+		}
+		out_port_to_wr = &wr_info->out_port_to_wm[out_port_idx];
+		combo_idx = BUS_WR_YUV;
+		if (!out_port_to_wr->num_wm[combo_idx]) {
+			CAM_DBG(CAM_OPE, "Invalid format for Input port");
+			rc = -EINVAL;
+			goto end;
+		}
+
+		bus_wr_ctx->io_port_info.output_port_id[i] =
+			in_acquire->out_res[i].res_id;
+		bus_wr_ctx->io_port_info.output_format_type[i] =
+			in_acquire->out_res[i].format;
+		if (in_acquire->out_res[i].pixel_pattern >
+			PIXEL_PATTERN_CRYCBY) {
+			CAM_DBG(CAM_OPE, "Invalid pix pattern = %u",
+				in_acquire->out_res[i].pixel_pattern);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		bus_wr_ctx->io_port_info.pixel_pattern[i] =
+			in_acquire->out_res[i].pixel_pattern;
+		bus_wr_ctx->io_port_info.latency_buf_size = 4096;
+		CAM_DBG(CAM_OPE, "i:%d port_id = %u format %u pix_pattern = %u",
+			i, bus_wr_ctx->io_port_info.output_port_id[i],
+			bus_wr_ctx->io_port_info.output_format_type[i],
+			bus_wr_ctx->io_port_info.pixel_pattern[i]);
+		CAM_DBG(CAM_OPE, "latency_buf_size = %u",
+			bus_wr_ctx->io_port_info.latency_buf_size);
+	}
+
+end:
+	return rc;
+}
+
+static int cam_ope_bus_wr_init(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	struct cam_ope_bus_wr_reg_val *bus_wr_reg_val;
+	struct cam_ope_bus_wr_reg *bus_wr_reg;
+	struct cam_ope_dev_init *dev_init = data;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+
+	wr_info->ope_hw_info = ope_hw_info;
+	bus_wr_reg_val = ope_hw_info->bus_wr_reg_val;
+	bus_wr_reg = ope_hw_info->bus_wr_reg;
+	bus_wr_reg->base = dev_init->core_info->ope_hw_info->ope_bus_wr_base;
+
+	cam_io_w_mb(bus_wr_reg_val->irq_mask_0,
+		ope_hw_info->bus_wr_reg->base + bus_wr_reg->irq_mask_0);
+	cam_io_w_mb(bus_wr_reg_val->irq_mask_1,
+		ope_hw_info->bus_wr_reg->base + bus_wr_reg->irq_mask_1);
+
+	return rc;
+}
+
+static int cam_ope_bus_wr_probe(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0, i, j, combo_idx, k;
+	struct cam_ope_bus_wr_reg_val *bus_wr_reg_val;
+	struct ope_bus_out_port_to_wm *out_port_to_wm;
+	uint32_t output_port_idx;
+	uint32_t wm_idx;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+	wr_info = kzalloc(sizeof(struct ope_bus_wr), GFP_KERNEL);
+	if (!wr_info) {
+		CAM_ERR(CAM_OPE, "Out of memory");
+		return -ENOMEM;
+	}
+
+	wr_info->ope_hw_info = ope_hw_info;
+	bus_wr_reg_val = ope_hw_info->bus_wr_reg_val;
+
+	for (i = 0; i < bus_wr_reg_val->num_clients; i++) {
+		output_port_idx =
+			bus_wr_reg_val->wr_clients[i].output_port_id - 1;
+		out_port_to_wm = &wr_info->out_port_to_wm[output_port_idx];
+		combo_idx = BUS_WR_YUV;
+		wm_idx = out_port_to_wm->num_wm[combo_idx];
+		out_port_to_wm->output_port_id =
+			bus_wr_reg_val->wr_clients[i].output_port_id;
+		out_port_to_wm->wm_port_id[combo_idx][wm_idx] =
+			bus_wr_reg_val->wr_clients[i].wm_port_id;
+		if (!out_port_to_wm->num_wm[combo_idx])
+			out_port_to_wm->num_combos++;
+		out_port_to_wm->num_wm[combo_idx]++;
+	}
+
+	for (i = 0; i < OPE_OUT_RES_MAX; i++) {
+		out_port_to_wm = &wr_info->out_port_to_wm[i];
+		CAM_DBG(CAM_OPE, "output port id = %d num_combos = %d",
+			out_port_to_wm->output_port_id,
+			out_port_to_wm->num_combos);
+		for (j = 0; j < out_port_to_wm->num_combos; j++) {
+			CAM_DBG(CAM_OPE, "combo idx = %d num_wms = %d",
+				j, out_port_to_wm->num_wm[j]);
+			for (k = 0; k < out_port_to_wm->num_wm[j]; k++) {
+				CAM_DBG(CAM_OPE, "wm port id = %d",
+					out_port_to_wm->wm_port_id[j][k]);
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int cam_ope_bus_wr_isr(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	uint32_t irq_status_0, irq_status_1;
+	struct cam_ope_bus_wr_reg *bus_wr_reg;
+	struct cam_ope_bus_wr_reg_val *bus_wr_reg_val;
+	struct cam_ope_irq_data *irq_data = data;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+
+	bus_wr_reg = ope_hw_info->bus_wr_reg;
+	bus_wr_reg_val = ope_hw_info->bus_wr_reg_val;
+
+	/* Read and Clear Top Interrupt status */
+	irq_status_0 = cam_io_r_mb(bus_wr_reg->base + bus_wr_reg->irq_status_0);
+	irq_status_1 = cam_io_r_mb(bus_wr_reg->base + bus_wr_reg->irq_status_1);
+	cam_io_w_mb(irq_status_0,
+		bus_wr_reg->base + bus_wr_reg->irq_clear_0);
+	cam_io_w_mb(irq_status_1,
+		bus_wr_reg->base + bus_wr_reg->irq_clear_1);
+
+	cam_io_w_mb(bus_wr_reg_val->irq_set_clear,
+		bus_wr_reg->base + bus_wr_reg->irq_cmd);
+
+	if (irq_status_0 & bus_wr_reg_val->cons_violation) {
+		irq_data->error = 1;
+		CAM_ERR(CAM_OPE, "ope bus wr cons_violation");
+	}
+
+	if (irq_status_0 & bus_wr_reg_val->violation) {
+		irq_data->error = 1;
+		CAM_ERR(CAM_OPE, "ope bus wr  vioalation");
+	}
+
+	if (irq_status_0 & bus_wr_reg_val->img_size_violation) {
+		irq_data->error = 1;
+		CAM_ERR(CAM_OPE, "ope bus wr  img_size_violation");
+	}
+
+	return rc;
+}
+
+int cam_ope_bus_wr_process(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data)
+{
+	int rc = 0;
+
+	switch (cmd_id) {
+	case OPE_HW_PROBE:
+		CAM_DBG(CAM_OPE, "OPE_HW_PROBE: E");
+		rc = cam_ope_bus_wr_probe(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_PROBE: X");
+		break;
+	case OPE_HW_INIT:
+		CAM_DBG(CAM_OPE, "OPE_HW_INIT: E");
+		rc = cam_ope_bus_wr_init(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_INIT: X");
+		break;
+	case OPE_HW_ACQUIRE:
+		CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: E");
+		rc = cam_ope_bus_wr_acquire(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: X");
+		break;
+	case OPE_HW_RELEASE:
+		CAM_DBG(CAM_OPE, "OPE_HW_RELEASE: E");
+		rc = cam_ope_bus_wr_release(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_RELEASE: X");
+		break;
+	case OPE_HW_PREPARE:
+		CAM_DBG(CAM_OPE, "OPE_HW_PREPARE: E");
+		rc = cam_ope_bus_wr_prepare(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_PREPARE: X");
+		break;
+	case OPE_HW_DEINIT:
+	case OPE_HW_START:
+	case OPE_HW_STOP:
+	case OPE_HW_FLUSH:
+	case OPE_HW_CLK_UPDATE:
+	case OPE_HW_BW_UPDATE:
+	case OPE_HW_RESET:
+	case OPE_HW_SET_IRQ_CB:
+		rc = 0;
+		CAM_DBG(CAM_OPE, "Unhandled cmds: %d", cmd_id);
+		break;
+	case OPE_HW_ISR:
+		rc = cam_ope_bus_wr_isr(ope_hw_info, 0, NULL);
+		break;
+	default:
+		CAM_ERR(CAM_OPE, "Unsupported cmd: %d", cmd_id);
+		break;
+	}
+
+	return rc;
+}
+

+ 137 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.h

@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef OPE_BUS_WR_H
+#define OPE_BUS_WR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_ope.h>
+#include "ope_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_soc_util.h"
+#include "cam_context.h"
+#include "cam_ope_context.h"
+#include "cam_ope_hw_mgr.h"
+
+/**
+ * struct ope_bus_wr_cdm_info
+ *
+ * @offset: Offset
+ * @addr:   Address
+ * @len:    Length
+ */
+struct ope_bus_wr_cdm_info {
+	uint32_t offset;
+	uint32_t *addr;
+	uint32_t len;
+};
+
+/**
+ * struct ope_bus_wr_io_port_cdm_info
+ *
+ * @num_frames_cmds: Number of frame commands
+ * @f_cdm_info:      Frame cdm info
+ * @num_stripes:     Number of stripes
+ * @num_s_cmd_bufs:  Number of stripe commands
+ * @s_cdm_info:      Stripe cdm info
+ * @go_cmd_addr:     GO command address
+ * @go_cmd_len:      GO command length
+ */
+struct ope_bus_wr_io_port_cdm_info {
+	uint32_t num_frames_cmds;
+	struct ope_bus_wr_cdm_info f_cdm_info[MAX_WR_CLIENTS];
+	uint32_t num_stripes;
+	uint32_t num_s_cmd_bufs[OPE_MAX_STRIPES];
+	struct ope_bus_wr_cdm_info s_cdm_info[OPE_MAX_STRIPES][MAX_WR_CLIENTS];
+	uint32_t *go_cmd_addr;
+	uint32_t go_cmd_len;
+};
+
+/**
+ * struct ope_bus_wr_io_port_cdm_batch
+ *
+ * num_batch:   Number of batches
+ * io_port_cdm: CDM IO Port Info
+ */
+struct ope_bus_wr_io_port_cdm_batch {
+	uint32_t num_batch;
+	struct ope_bus_wr_io_port_cdm_info io_port_cdm[OPE_MAX_BATCH_SIZE];
+};
+
+/**
+ * struct ope_bus_wr_wm
+ *
+ * @wm_port_id:  WM port ID
+ * @format_type: Format type
+ */
+struct ope_bus_wr_wm {
+	uint32_t wm_port_id;
+	uint32_t format_type;
+};
+
+/**
+ * struct ope_bus_out_port_to_wm
+ *
+ * @output_port_id: Output port ID
+ * @num_combos:     Number of combos
+ * @num_wm:         Number of WMs
+ * @wm_port_id:     WM port Id
+ */
+struct ope_bus_out_port_to_wm {
+	uint32_t output_port_id;
+	uint32_t num_combos;
+	uint32_t num_wm[BUS_WR_COMBO_MAX];
+	uint32_t wm_port_id[BUS_WR_COMBO_MAX][MAX_WR_CLIENTS];
+};
+
+/**
+ * struct ope_bus_wr_io_port_info
+ *
+ * @pixel_pattern:      Pixel pattern
+ * @output_port_id:     Port Id
+ * @output_format_type: Format type
+ * @latency_buf_size:   Latency buffer size
+ */
+struct ope_bus_wr_io_port_info {
+	uint32_t pixel_pattern[OPE_OUT_RES_MAX];
+	uint32_t output_port_id[OPE_OUT_RES_MAX];
+	uint32_t output_format_type[OPE_OUT_RES_MAX];
+	uint32_t latency_buf_size;
+};
+
+/**
+ * struct ope_bus_wr_ctx
+ *
+ * @ope_acquire:       OPE acquire structure
+ * @security_flag:     security flag
+ * @num_out_ports:     Number of out ports
+ * @io_port_info:      IO port info
+ * @io_port_cdm_batch: IO port cdm info
+ */
+struct ope_bus_wr_ctx {
+	struct ope_acquire_dev_info *ope_acquire;
+	bool security_flag;
+	uint32_t num_out_ports;
+	struct ope_bus_wr_io_port_info io_port_info;
+	struct ope_bus_wr_io_port_cdm_batch io_port_cdm_batch;
+};
+
+/**
+ * struct ope_bus_wr
+ *
+ * @ope_hw_info:    OPE hardware info
+ * @out_port_to_wm: IO port to WM mapping
+ * @bus_wr_ctx:     WM context
+ */
+struct ope_bus_wr {
+	struct ope_hw *ope_hw_info;
+	struct ope_bus_out_port_to_wm out_port_to_wm[OPE_OUT_RES_MAX];
+	struct ope_bus_wr_ctx bus_wr_ctx[OPE_CTX_MAX];
+};
+
+#endif /* OPE_BUS_WR_H */
+

+ 1781 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.c

@@ -0,0 +1,1781 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include <media/cam_ope.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "ope_core.h"
+#include "ope_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "ope_hw.h"
+#include "ope_dev_intf.h"
+#include "cam_cdm_util.h"
+#include "ope_bus_rd.h"
+#include "ope_bus_wr.h"
+
+static int cam_ope_caps_vote(struct cam_ope_device_core_info *core_info,
+	struct cam_ope_dev_bw_update *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+			&cpas_vote->ahb_vote);
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+			&cpas_vote->axi_vote);
+	if (rc)
+		CAM_ERR(CAM_OPE, "cpas vote is failed: %d", rc);
+
+	return rc;
+}
+
+int cam_ope_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *ope_dev = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ope_device_core_info *core_info = NULL;
+	struct ope_hw_ver *ope_hw_ver;
+	struct cam_ope_top_reg_val *top_reg_val;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_OPE, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &ope_dev->soc_info;
+	core_info = (struct cam_ope_device_core_info *)ope_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_OPE, "soc_info = %x core_info = %x",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	if (!get_hw_cap_args) {
+		CAM_ERR(CAM_OPE, "Invalid caps");
+		return -EINVAL;
+	}
+
+	top_reg_val = core_info->ope_hw_info->ope_hw->top_reg_val;
+	ope_hw_ver = get_hw_cap_args;
+	ope_hw_ver->hw_type = core_info->hw_type;
+	ope_hw_ver->hw_ver.major =
+		(core_info->hw_version & top_reg_val->major_mask) >>
+		top_reg_val->major_shift;
+	ope_hw_ver->hw_ver.minor =
+		(core_info->hw_version & top_reg_val->minor_mask) >>
+		top_reg_val->minor_shift;
+	ope_hw_ver->hw_ver.incr =
+		(core_info->hw_version & top_reg_val->incr_mask) >>
+		top_reg_val->incr_shift;
+
+	return 0;
+}
+
+int cam_ope_start(void *hw_priv, void *start_args, uint32_t arg_size)
+{
+	return 0;
+}
+
+int cam_ope_stop(void *hw_priv, void *start_args, uint32_t arg_size)
+{
+	return 0;
+}
+
+int cam_ope_flush(void *hw_priv, void *flush_args, uint32_t arg_size)
+{
+	return 0;
+}
+
+static int cam_ope_dev_process_init(struct ope_hw *ope_hw,
+	void *cmd_args)
+{
+	int rc = 0;
+
+	rc = cam_ope_top_process(ope_hw, 0, OPE_HW_INIT, cmd_args);
+	if (rc)
+		goto top_init_fail;
+
+	rc = cam_ope_bus_rd_process(ope_hw, 0, OPE_HW_INIT, cmd_args);
+		if (rc)
+			goto bus_rd_init_fail;
+
+	rc = cam_ope_bus_wr_process(ope_hw, 0, OPE_HW_INIT, cmd_args);
+		if (rc)
+			goto bus_wr_init_fail;
+
+	return rc;
+
+bus_wr_init_fail:
+	rc = cam_ope_bus_rd_process(ope_hw, 0,
+		OPE_HW_DEINIT, NULL);
+bus_rd_init_fail:
+	rc = cam_ope_top_process(ope_hw, 0,
+		OPE_HW_DEINIT, NULL);
+top_init_fail:
+	return rc;
+}
+
+static int cam_ope_process_init(struct ope_hw *ope_hw,
+	void *cmd_args, bool hfi_en)
+{
+	if (!hfi_en)
+		return cam_ope_dev_process_init(ope_hw, cmd_args);
+
+	CAM_ERR(CAM_OPE, "hfi_en is not supported");
+	return -EINVAL;
+}
+
+int cam_ope_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *ope_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ope_device_core_info *core_info = NULL;
+	struct cam_ope_cpas_vote cpas_vote;
+	int rc = 0;
+	struct cam_ope_dev_init *init;
+	struct ope_hw *ope_hw;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_OPE, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &ope_dev->soc_info;
+	core_info = (struct cam_ope_device_core_info *)ope_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_OPE, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+	ope_hw = core_info->ope_hw_info->ope_hw;
+
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.num_paths = 1;
+	cpas_vote.axi_vote.axi_path[0].path_data_type =
+		CAM_AXI_PATH_DATA_ALL;
+	cpas_vote.axi_vote.axi_path[0].transac_type =
+		CAM_AXI_TRANSACTION_WRITE;
+	cpas_vote.axi_vote.axi_path[0].camnoc_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].ddr_ab_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].ddr_ib_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "cpass start failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_start = true;
+
+	rc = cam_ope_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "soc enable is failed : %d", rc);
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_OPE, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	} else {
+		core_info->clk_enable = true;
+	}
+
+	init = init_hw_args;
+
+	core_info->ope_hw_info->hfi_en = init->hfi_en;
+	init->core_info = core_info;
+
+	rc = cam_ope_process_init(ope_hw, init_hw_args, init->hfi_en);
+
+	return rc;
+}
+
+int cam_ope_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *ope_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ope_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_OPE, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &ope_dev->soc_info;
+	core_info = (struct cam_ope_device_core_info *)ope_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_OPE, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_ope_disable_soc_resources(soc_info, core_info->clk_enable);
+	if (rc)
+		CAM_ERR(CAM_OPE, "soc disable is failed : %d", rc);
+	core_info->clk_enable = false;
+
+	if (core_info->cpas_start) {
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_OPE, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	}
+
+	return rc;
+}
+
+static int cam_ope_dev_process_reset(struct ope_hw *ope_hw, void *cmd_args)
+{
+	int rc = 0;
+
+	rc = cam_ope_top_process(ope_hw, -1,
+		OPE_HW_RESET, NULL);
+
+	return rc;
+}
+
+static int cam_ope_dev_process_release(struct ope_hw *ope_hw, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_ope_dev_release *ope_dev_release;
+
+	ope_dev_release = cmd_args;
+	rc = cam_ope_top_process(ope_hw, ope_dev_release->ctx_id,
+		OPE_HW_RELEASE, NULL);
+
+	rc |= cam_ope_bus_rd_process(ope_hw, ope_dev_release->ctx_id,
+		OPE_HW_RELEASE, NULL);
+
+	rc |= cam_ope_bus_wr_process(ope_hw, ope_dev_release->ctx_id,
+		OPE_HW_RELEASE, NULL);
+
+	return rc;
+}
+
+static int cam_ope_dev_process_acquire(struct ope_hw *ope_hw, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_ope_dev_acquire *ope_dev_acquire;
+
+	if (!cmd_args || !ope_hw) {
+		CAM_ERR(CAM_OPE, "Invalid arguments: %pK %pK",
+		cmd_args, ope_hw);
+		return -EINVAL;
+	}
+
+	ope_dev_acquire = cmd_args;
+	rc = cam_ope_top_process(ope_hw, ope_dev_acquire->ctx_id,
+		OPE_HW_ACQUIRE, ope_dev_acquire->ope_acquire);
+	if (rc)
+		goto top_acquire_fail;
+
+	rc = cam_ope_bus_rd_process(ope_hw, ope_dev_acquire->ctx_id,
+		OPE_HW_ACQUIRE, ope_dev_acquire->ope_acquire);
+	if (rc)
+		goto bus_rd_acquire_fail;
+
+	rc = cam_ope_bus_wr_process(ope_hw, ope_dev_acquire->ctx_id,
+		OPE_HW_ACQUIRE, ope_dev_acquire->ope_acquire);
+	if (rc)
+		goto bus_wr_acquire_fail;
+
+	return 0;
+
+bus_wr_acquire_fail:
+	rc = cam_ope_bus_rd_process(ope_hw, ope_dev_acquire->ctx_id,
+		OPE_HW_RELEASE, ope_dev_acquire->ope_acquire);
+bus_rd_acquire_fail:
+	rc = cam_ope_top_process(ope_hw, ope_dev_acquire->ctx_id,
+		OPE_HW_RELEASE, ope_dev_acquire->ope_acquire);
+	if (rc)
+		goto top_acquire_fail;
+
+top_acquire_fail:
+	return rc;
+}
+
+static int cam_ope_dev_prepare_cdm_request(
+	struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_hw_prepare_update_args *prepare_args,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t kmd_buf_offset,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req,
+	uint32_t len, bool arbitrate)
+{
+	int i;
+	struct cam_ope_request *ope_request;
+	struct cam_cdm_bl_request *cdm_cmd;
+	uint32_t *kmd_buf;
+
+	ope_request = ctx_data->req_list[req_idx];
+	cdm_cmd = ope_request->cdm_cmd;
+	kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr +
+		kmd_buf_offset;
+
+	cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_HW_IOVA;
+	cdm_cmd->flag = true;
+	cdm_cmd->userdata = ctx_data;
+	cdm_cmd->cookie = req_idx;
+	cdm_cmd->gen_irq_arb = true;
+
+	i = cdm_cmd->cmd_arrary_count;
+	cdm_cmd->cmd[i].bl_addr.hw_iova =
+		(uint32_t *)ope_request->ope_kmd_buf.iova_cdm_addr;
+	cdm_cmd->cmd[i].offset = kmd_buf_offset;
+	cdm_cmd->cmd[i].len = len;
+	cdm_cmd->cmd[i].arbitrate = arbitrate;
+
+	cdm_cmd->cmd_arrary_count++;
+
+	CAM_DBG(CAM_OPE, "CDM cmd:Req idx = %d req_id = %lld array cnt = %d",
+		cdm_cmd->cookie, ope_request->request_id,
+		cdm_cmd->cmd_arrary_count);
+	CAM_DBG(CAM_OPE, "CDM cmd:mem_hdl = %d offset = %d len = %d, iova 0x%x",
+		ope_request->ope_kmd_buf.mem_handle, kmd_buf_offset, len,
+		cdm_cmd->cmd[i].bl_addr.hw_iova);
+
+	return 0;
+}
+
+static int dump_dmi_cmd(uint32_t print_idx,
+	uint32_t *print_ptr, struct cdm_dmi_cmd *dmi_cmd,
+	uint32_t *temp)
+{
+	CAM_DBG(CAM_OPE, "%d:dma_ptr:%x l:%d",
+		print_idx, print_ptr,
+		dmi_cmd->length);
+	CAM_DBG(CAM_OPE, "%d:cmd:%hhx addr:%x",
+		print_ptr, dmi_cmd->cmd,
+		dmi_cmd->addr);
+	CAM_DBG(CAM_OPE, "%d: dmiadr:%x sel:%d",
+		print_idx, dmi_cmd->DMIAddr,
+		dmi_cmd->DMISel);
+	CAM_DBG(CAM_OPE, "%d: %x %x %x",
+		print_idx,
+		temp[0], temp[1], temp[2]);
+
+	return 0;
+}
+
+static int dump_frame_direct(uint32_t print_idx,
+	uint32_t *print_ptr,
+	struct ope_frame_process *frm_proc,
+	int batch_idx, int cmd_buf_idx)
+{
+	int len;
+
+	if (cmd_buf_idx >= OPE_MAX_CMD_BUFS ||
+		batch_idx >= OPE_MAX_BATCH_SIZE)
+		return 0;
+
+	len = frm_proc->cmd_buf[batch_idx][cmd_buf_idx].length / 4;
+	CAM_DBG(CAM_OPE, "Frame DB : direct: E");
+	for (print_idx = 0; print_idx < len; print_idx++)
+		CAM_DBG(CAM_OPE, "%d: %x", print_idx, print_ptr[print_idx]);
+	CAM_DBG(CAM_OPE, "Frame DB : direct: X");
+
+	return 0;
+}
+
+static int dump_frame_cmd(struct ope_frame_process *frm_proc,
+	int i, int j, uint64_t iova_addr, uint32_t *kmd_buf, uint32_t buf_len)
+{
+	if (j >= OPE_MAX_CMD_BUFS || i >= OPE_MAX_BATCH_SIZE)
+		return 0;
+
+	CAM_DBG(CAM_OPE, "Frame DB:scope:%d buffer:%d type:%d",
+		frm_proc->cmd_buf[i][j].cmd_buf_scope,
+		frm_proc->cmd_buf[i][j].cmd_buf_buffered,
+		frm_proc->cmd_buf[i][j].type);
+	CAM_DBG(CAM_OPE, "kmdbuf:%x memhdl:%x iova:%x %pK",
+		kmd_buf,
+		frm_proc->cmd_buf[i][j].mem_handle,
+		iova_addr, iova_addr);
+	CAM_DBG(CAM_OPE, "buflen:%d len:%d offset:%d",
+		buf_len,
+		frm_proc->cmd_buf[i][j].length,
+		frm_proc->cmd_buf[i][j].offset);
+
+	return 0;
+}
+
+static int dump_stripe_cmd(struct ope_frame_process *frm_proc,
+	uint32_t stripe_idx, int i, int k, uint64_t iova_addr,
+	uint32_t *kmd_buf, uint32_t buf_len)
+{
+	if (k >= OPE_MAX_CMD_BUFS)
+		return 0;
+
+	CAM_DBG(CAM_OPE, "Stripe:%d scope:%d buffer:%d",
+		stripe_idx,
+		frm_proc->cmd_buf[i][k].cmd_buf_scope,
+		frm_proc->cmd_buf[i][k].cmd_buf_buffered);
+	CAM_DBG(CAM_OPE, "type:%d kmdbuf:%x memhdl:%x",
+		frm_proc->cmd_buf[i][k].type, kmd_buf,
+		frm_proc->cmd_buf[i][k].mem_handle);
+	CAM_DBG(CAM_OPE, "iova:%x %pK buflen:%d len:%d",
+		iova_addr, iova_addr, buf_len,
+		frm_proc->cmd_buf[i][k].length);
+	CAM_DBG(CAM_OPE, "offset:%d",
+		frm_proc->cmd_buf[i][k].offset);
+	return 0;
+}
+
+static uint32_t *ope_create_frame_cmd_prefetch_dis(
+	struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t *kmd_buf, uint32_t buffered, int batch_idx,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req)
+{
+	int rc = 0, i, j;
+	uint32_t temp[3];
+	struct cam_ope_request *ope_request;
+	struct cdm_dmi_cmd *dmi_cmd;
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
+	struct ope_frame_process *frm_proc;
+	uint64_t iova_addr;
+	uintptr_t cpu_addr;
+	size_t buf_len;
+	uint32_t print_idx;
+	uint32_t *print_ptr;
+	int num_dmi = 0;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+	frm_proc = ope_dev_prepare_req->frame_process;
+	ope_request = ctx_data->req_list[req_idx];
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+	wr_cdm_info =
+		&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0];
+	rd_cdm_info =
+		&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0];
+
+	if (batch_idx >= OPE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_OPE, "Invalid input: %d", batch_idx);
+		return NULL;
+	}
+
+	i = batch_idx;
+
+	for (j = 0; j < frm_proc->num_cmd_bufs[i]; j++) {
+		if (frm_proc->cmd_buf[i][j].cmd_buf_scope !=
+			OPE_CMD_BUF_SCOPE_FRAME)
+			continue;
+
+		if (frm_proc->cmd_buf[i][j].cmd_buf_usage ==
+			OPE_CMD_BUF_KMD ||
+			frm_proc->cmd_buf[i][j].cmd_buf_usage ==
+			OPE_CMD_BUF_DEBUG)
+			continue;
+
+		if (frm_proc->cmd_buf[i][j].prefetch_disable &&
+			frm_proc->cmd_buf[i][j].cmd_buf_buffered !=
+			buffered)
+			continue;
+
+		if (!frm_proc->cmd_buf[i][j].mem_handle)
+			continue;
+
+		rc = cam_mem_get_io_buf(
+			frm_proc->cmd_buf[i][j].mem_handle,
+			hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len);
+		if (rc) {
+			CAM_ERR(CAM_OPE, "get cmd buf failed %x",
+				hw_mgr->iommu_hdl);
+			return NULL;
+		}
+		iova_addr = iova_addr + frm_proc->cmd_buf[i][j].offset;
+
+		rc = cam_mem_get_cpu_buf(
+			frm_proc->cmd_buf[i][j].mem_handle,
+			&cpu_addr, &buf_len);
+		if (rc || !cpu_addr) {
+			CAM_ERR(CAM_OPE, "get cmd buf failed %x",
+				hw_mgr->iommu_hdl);
+			return NULL;
+		}
+
+		cpu_addr = cpu_addr + frm_proc->cmd_buf[i][j].offset;
+		if (frm_proc->cmd_buf[i][j].type ==
+			OPE_CMD_BUF_TYPE_DIRECT) {
+			kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+				iova_addr,
+				frm_proc->cmd_buf[i][j].length);
+			print_ptr = (uint32_t *)cpu_addr;
+			dump_frame_direct(print_idx, print_ptr,
+				frm_proc, i, j);
+		} else {
+			num_dmi = frm_proc->cmd_buf[i][j].length /
+				sizeof(struct cdm_dmi_cmd);
+			CAM_DBG(CAM_OPE, "Frame DB : In direct: E");
+			print_ptr = (uint32_t *)cpu_addr;
+			for (print_idx = 0;
+				print_idx < num_dmi; print_idx++) {
+				memcpy(temp, (const void *)print_ptr,
+					sizeof(struct cdm_dmi_cmd));
+				dmi_cmd = (struct cdm_dmi_cmd *)temp;
+				kmd_buf = cdm_ops->cdm_write_dmi(
+					kmd_buf,
+					0, dmi_cmd->DMIAddr,
+					dmi_cmd->DMISel, dmi_cmd->addr,
+					dmi_cmd->length);
+				dump_dmi_cmd(print_idx,
+					print_ptr, dmi_cmd, temp);
+				print_ptr +=
+					sizeof(struct cdm_dmi_cmd) /
+					sizeof(uint32_t);
+			}
+			CAM_DBG(CAM_OPE, "Frame DB : In direct: X");
+		}
+		dump_frame_cmd(frm_proc, i, j,
+			iova_addr, kmd_buf, buf_len);
+	}
+	return kmd_buf;
+
+}
+
+static uint32_t *ope_create_frame_cmd_batch(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t *kmd_buf, uint32_t buffered, int batch_idx,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req)
+{
+	int rc = 0, i, j;
+	uint32_t temp[3];
+	struct cam_ope_request *ope_request;
+	struct cdm_dmi_cmd *dmi_cmd;
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
+	struct ope_frame_process *frm_proc;
+	uint64_t iova_addr;
+	uintptr_t cpu_addr;
+	size_t buf_len;
+	uint32_t print_idx;
+	uint32_t *print_ptr;
+	int num_dmi = 0;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+	frm_proc = ope_dev_prepare_req->frame_process;
+	ope_request = ctx_data->req_list[req_idx];
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+	wr_cdm_info =
+		&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0];
+	rd_cdm_info =
+		&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0];
+
+	if (batch_idx >= OPE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_OPE, "Invalid input: %d", batch_idx);
+		return NULL;
+	}
+	i = batch_idx;
+
+	for (j = 0; j < frm_proc->num_cmd_bufs[i]; j++) {
+		if (frm_proc->cmd_buf[i][j].cmd_buf_scope !=
+			OPE_CMD_BUF_SCOPE_FRAME)
+			continue;
+
+		if (frm_proc->cmd_buf[i][j].cmd_buf_usage ==
+			OPE_CMD_BUF_KMD ||
+			frm_proc->cmd_buf[i][j].cmd_buf_usage ==
+			OPE_CMD_BUF_DEBUG)
+			continue;
+
+		if (frm_proc->cmd_buf[i][j].cmd_buf_buffered !=
+			buffered)
+			continue;
+
+		if (!frm_proc->cmd_buf[i][j].mem_handle)
+			continue;
+
+		rc = cam_mem_get_io_buf(
+			frm_proc->cmd_buf[i][j].mem_handle,
+			hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len);
+		if (rc) {
+			CAM_ERR(CAM_OPE, "get cmd buf failed %x",
+				hw_mgr->iommu_hdl);
+			return NULL;
+		}
+		iova_addr = iova_addr + frm_proc->cmd_buf[i][j].offset;
+
+		rc = cam_mem_get_cpu_buf(
+			frm_proc->cmd_buf[i][j].mem_handle,
+			&cpu_addr, &buf_len);
+		if (rc || !cpu_addr) {
+			CAM_ERR(CAM_OPE, "get cmd buf failed %x",
+				hw_mgr->iommu_hdl);
+			return NULL;
+		}
+
+		cpu_addr = cpu_addr + frm_proc->cmd_buf[i][j].offset;
+		if (frm_proc->cmd_buf[i][j].type ==
+			OPE_CMD_BUF_TYPE_DIRECT) {
+			kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+				iova_addr,
+				frm_proc->cmd_buf[i][j].length);
+			print_ptr = (uint32_t *)cpu_addr;
+			dump_frame_direct(print_idx, print_ptr,
+				frm_proc, i, j);
+		} else {
+			num_dmi = frm_proc->cmd_buf[i][j].length /
+				sizeof(struct cdm_dmi_cmd);
+			CAM_DBG(CAM_OPE, "Frame DB : In direct: E");
+			print_ptr = (uint32_t *)cpu_addr;
+			for (print_idx = 0;
+				print_idx < num_dmi; print_idx++) {
+				memcpy(temp, (const void *)print_ptr,
+					sizeof(struct cdm_dmi_cmd));
+				dmi_cmd = (struct cdm_dmi_cmd *)temp;
+				kmd_buf = cdm_ops->cdm_write_dmi(
+					kmd_buf,
+					0, dmi_cmd->DMIAddr,
+					dmi_cmd->DMISel, dmi_cmd->addr,
+					dmi_cmd->length);
+				dump_dmi_cmd(print_idx,
+					print_ptr, dmi_cmd, temp);
+				print_ptr +=
+					sizeof(struct cdm_dmi_cmd) /
+					sizeof(uint32_t);
+			}
+			CAM_DBG(CAM_OPE, "Frame DB : In direct: X");
+		}
+		dump_frame_cmd(frm_proc, i, j,
+			iova_addr, kmd_buf, buf_len);
+	}
+	return kmd_buf;
+
+}
+
+static uint32_t *ope_create_frame_wr(struct cam_ope_ctx *ctx_data,
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info,
+	uint32_t *kmd_buf, struct cam_ope_request *ope_request)
+{
+	struct cam_cdm_utils_ops *cdm_ops;
+	int i;
+
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	for (i = 0; i < wr_cdm_info->num_frames_cmds; i++) {
+		kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+			(uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr +
+			wr_cdm_info->f_cdm_info[i].offset,
+			wr_cdm_info->f_cdm_info[i].len);
+		CAM_DBG(CAM_OPE, "FrameWR:i:%d kmdbuf:%x len:%d iova:%x %pK",
+			i, kmd_buf, wr_cdm_info->f_cdm_info[i].len,
+			ope_request->ope_kmd_buf.iova_cdm_addr,
+			ope_request->ope_kmd_buf.iova_cdm_addr);
+	}
+	return kmd_buf;
+}
+
+static uint32_t *ope_create_frame_rd(struct cam_ope_ctx *ctx_data,
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info,
+	uint32_t *kmd_buf, struct cam_ope_request *ope_request)
+{
+	struct cam_cdm_utils_ops *cdm_ops;
+	int i;
+
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	/* Frame 0 RD */
+	for (i = 0; i < rd_cdm_info->num_frames_cmds; i++) {
+		kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+			(uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr +
+			rd_cdm_info->f_cdm_info[i].offset,
+			rd_cdm_info->f_cdm_info[i].len);
+		CAM_DBG(CAM_OPE, "FrameRD:i:%d kmdbuf:%x len:%d iova:%x %pK",
+			 i, kmd_buf, rd_cdm_info->f_cdm_info[i].len,
+			 ope_request->ope_kmd_buf.iova_cdm_addr,
+			 ope_request->ope_kmd_buf.iova_cdm_addr);
+	}
+	return kmd_buf;
+}
+
+static uint32_t *ope_create_frame_cmd(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t *kmd_buf, uint32_t buffered,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req)
+{
+	int rc = 0, i, j;
+	uint32_t temp[3];
+	struct cam_ope_request *ope_request;
+	struct cdm_dmi_cmd *dmi_cmd;
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
+	struct ope_frame_process *frm_proc;
+	uint64_t iova_addr;
+	uintptr_t cpu_addr;
+	size_t buf_len;
+	uint32_t print_idx;
+	uint32_t *print_ptr;
+	int num_dmi = 0;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+	frm_proc = ope_dev_prepare_req->frame_process;
+	ope_request = ctx_data->req_list[req_idx];
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+	wr_cdm_info =
+		&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0];
+	rd_cdm_info =
+		&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0];
+
+	for (i = 0; i < frm_proc->batch_size; i++) {
+		for (j = 0; j < frm_proc->num_cmd_bufs[i]; j++) {
+			if (frm_proc->cmd_buf[i][j].cmd_buf_scope !=
+				OPE_CMD_BUF_SCOPE_FRAME)
+				continue;
+
+			if (frm_proc->cmd_buf[i][j].cmd_buf_usage ==
+				OPE_CMD_BUF_KMD ||
+				frm_proc->cmd_buf[i][j].cmd_buf_usage ==
+				OPE_CMD_BUF_DEBUG)
+				continue;
+
+			if (frm_proc->cmd_buf[i][j].cmd_buf_buffered !=
+				buffered)
+				continue;
+
+			if (!frm_proc->cmd_buf[i][j].mem_handle)
+				continue;
+
+			rc = cam_mem_get_io_buf(
+				frm_proc->cmd_buf[i][j].mem_handle,
+				hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len);
+			if (rc) {
+				CAM_ERR(CAM_OPE, "get cmd buf failed %x",
+					hw_mgr->iommu_hdl);
+				return NULL;
+			}
+			iova_addr = iova_addr + frm_proc->cmd_buf[j][i].offset;
+
+			rc = cam_mem_get_cpu_buf(
+				frm_proc->cmd_buf[i][j].mem_handle,
+				&cpu_addr, &buf_len);
+			if (rc || !cpu_addr) {
+				CAM_ERR(CAM_OPE, "get cmd buf failed %x",
+					hw_mgr->iommu_hdl);
+				return NULL;
+			}
+
+			cpu_addr = cpu_addr + frm_proc->cmd_buf[i][j].offset;
+			if (frm_proc->cmd_buf[i][j].type ==
+				OPE_CMD_BUF_TYPE_DIRECT) {
+				kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+					iova_addr,
+					frm_proc->cmd_buf[i][j].length);
+				print_ptr = (uint32_t *)cpu_addr;
+				dump_frame_direct(print_idx, print_ptr,
+					frm_proc, i, j);
+			} else {
+				num_dmi = frm_proc->cmd_buf[i][j].length /
+					sizeof(struct cdm_dmi_cmd);
+				CAM_DBG(CAM_OPE, "Frame DB : In direct: E");
+				print_ptr = (uint32_t *)cpu_addr;
+				for (print_idx = 0;
+					print_idx < num_dmi; print_idx++) {
+					memcpy(temp, (const void *)print_ptr,
+						sizeof(struct cdm_dmi_cmd));
+					dmi_cmd = (struct cdm_dmi_cmd *)temp;
+					kmd_buf = cdm_ops->cdm_write_dmi(
+						kmd_buf,
+						0, dmi_cmd->DMIAddr,
+						dmi_cmd->DMISel, dmi_cmd->addr,
+						dmi_cmd->length);
+					dump_dmi_cmd(print_idx,
+						print_ptr, dmi_cmd, temp);
+					print_ptr +=
+						sizeof(struct cdm_dmi_cmd) /
+						sizeof(uint32_t);
+				}
+				CAM_DBG(CAM_OPE, "Frame DB : In direct: X");
+			}
+			dump_frame_cmd(frm_proc, i, j,
+				iova_addr, kmd_buf, buf_len);
+		}
+	}
+	return kmd_buf;
+}
+
+static uint32_t *ope_create_stripe_cmd(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_ope_ctx *ctx_data,
+	uint32_t *kmd_buf,
+	int batch_idx,
+	int s_idx,
+	uint32_t stripe_idx,
+	struct ope_frame_process *frm_proc)
+{
+	int rc = 0, i, j, k;
+	uint32_t temp[3];
+	struct cdm_dmi_cmd *dmi_cmd;
+	uint64_t iova_addr;
+	uintptr_t cpu_addr;
+	size_t buf_len;
+	uint32_t print_idx;
+	uint32_t *print_ptr;
+	int num_dmi = 0;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+	if (s_idx >= OPE_MAX_CMD_BUFS ||
+		batch_idx >= OPE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_OPE, "Invalid inputs: %d %d",
+			batch_idx, s_idx);
+		return NULL;
+	}
+
+	i = batch_idx;
+	j = s_idx;
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+	/* cmd buffer stripes */
+	for (k = 0; k < frm_proc->num_cmd_bufs[i]; k++) {
+		if (frm_proc->cmd_buf[i][k].cmd_buf_scope !=
+			OPE_CMD_BUF_SCOPE_STRIPE)
+			continue;
+
+		if (frm_proc->cmd_buf[i][k].stripe_idx !=
+			stripe_idx)
+			continue;
+
+		if (!frm_proc->cmd_buf[i][k].mem_handle)
+			continue;
+
+		CAM_DBG(CAM_OPE, "process stripe %d", stripe_idx);
+		rc = cam_mem_get_io_buf(frm_proc->cmd_buf[i][k].mem_handle,
+			hw_mgr->iommu_cdm_hdl,
+			&iova_addr, &buf_len);
+		if (rc) {
+			CAM_DBG(CAM_OPE, "get cmd buf fail %x",
+				hw_mgr->iommu_hdl);
+			return NULL;
+		}
+		iova_addr = iova_addr + frm_proc->cmd_buf[i][k].offset;
+		rc = cam_mem_get_cpu_buf(frm_proc->cmd_buf[i][k].mem_handle,
+			&cpu_addr, &buf_len);
+		if (rc || !cpu_addr) {
+			CAM_DBG(CAM_OPE, "get cmd buf fail %x",
+				hw_mgr->iommu_hdl);
+			return NULL;
+		}
+		cpu_addr = cpu_addr + frm_proc->cmd_buf[i][k].offset;
+
+		if (frm_proc->cmd_buf[i][k].type == OPE_CMD_BUF_TYPE_DIRECT) {
+			kmd_buf = cdm_ops->cdm_write_indirect(
+				kmd_buf,
+				iova_addr,
+				frm_proc->cmd_buf[i][k].length);
+				print_ptr = (uint32_t *)cpu_addr;
+				CAM_DBG(CAM_OPE, "Stripe:%d direct:E",
+					stripe_idx);
+			for (print_idx = 0; print_idx <
+				frm_proc->cmd_buf[i][k].length / 4;
+				print_idx++) {
+				CAM_DBG(CAM_OPE, "%d: %x", print_idx,
+					print_ptr[print_idx]);
+			}
+			CAM_DBG(CAM_OPE, "Stripe:%d direct:X", stripe_idx);
+		} else if (frm_proc->cmd_buf[i][k].type ==
+			OPE_CMD_BUF_TYPE_INDIRECT) {
+			num_dmi = frm_proc->cmd_buf[i][j].length /
+				sizeof(struct cdm_dmi_cmd);
+			CAM_DBG(CAM_OPE, "Stripe:%d Indirect:E", stripe_idx);
+			print_ptr = (uint32_t *)cpu_addr;
+			for (print_idx = 0; print_idx < num_dmi; print_idx++) {
+				memcpy(temp, (const void *)print_ptr,
+					sizeof(struct cdm_dmi_cmd));
+				dmi_cmd = (struct cdm_dmi_cmd *)temp;
+				kmd_buf = cdm_ops->cdm_write_dmi(kmd_buf,
+					0, dmi_cmd->DMIAddr, dmi_cmd->DMISel,
+					dmi_cmd->addr, dmi_cmd->length);
+				dump_dmi_cmd(print_idx,
+					print_ptr, dmi_cmd, temp);
+				print_ptr += sizeof(struct cdm_dmi_cmd) /
+					sizeof(uint32_t);
+			}
+			CAM_DBG(CAM_OPE, "Stripe:%d Indirect:X", stripe_idx);
+		}
+		dump_stripe_cmd(frm_proc, stripe_idx, i, k,
+			iova_addr, kmd_buf, buf_len);
+	}
+	return kmd_buf;
+}
+
+static uint32_t *ope_create_stripe_wr(struct cam_ope_ctx *ctx_data,
+	uint32_t stripe_idx, struct ope_bus_wr_io_port_cdm_info *wr_cdm_info,
+	struct cam_ope_request *ope_request, uint32_t *kmd_buf)
+{
+	struct cam_cdm_utils_ops *cdm_ops;
+	int k;
+
+	if (stripe_idx >= OPE_MAX_STRIPES) {
+		CAM_ERR(CAM_OPE, "invalid s_idx = %d", stripe_idx);
+		return NULL;
+	}
+
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+	for (k = 0; k < wr_cdm_info->num_s_cmd_bufs[stripe_idx]; k++) {
+		kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+			(uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr +
+			wr_cdm_info->s_cdm_info[stripe_idx][k].offset,
+			wr_cdm_info->s_cdm_info[stripe_idx][k].len);
+		CAM_DBG(CAM_OPE, "WR stripe:%d %d kmdbuf:%x",
+			stripe_idx, k, kmd_buf);
+		CAM_DBG(CAM_OPE, "offset:%d len:%d iova:%x %pK",
+			wr_cdm_info->s_cdm_info[stripe_idx][k].offset,
+			wr_cdm_info->s_cdm_info[stripe_idx][k].len,
+			ope_request->ope_kmd_buf.iova_cdm_addr,
+			ope_request->ope_kmd_buf.iova_cdm_addr);
+	}
+	return kmd_buf;
+}
+
+static uint32_t *ope_create_stripe_rd(struct cam_ope_ctx *ctx_data,
+	uint32_t stripe_idx, struct ope_bus_rd_io_port_cdm_info *rd_cdm_info,
+	struct cam_ope_request *ope_request, uint32_t *kmd_buf)
+{
+	struct cam_cdm_utils_ops *cdm_ops;
+	int k;
+
+	if (stripe_idx >= OPE_MAX_STRIPES) {
+		CAM_ERR(CAM_OPE, "invalid s_idx = %d", stripe_idx);
+		return NULL;
+	}
+
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+	for (k = 0; k < rd_cdm_info->num_s_cmd_bufs[stripe_idx]; k++) {
+		kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+			(uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr +
+			rd_cdm_info->s_cdm_info[stripe_idx][k].offset,
+			rd_cdm_info->s_cdm_info[stripe_idx][k].len);
+		CAM_DBG(CAM_OPE, "WR stripe:%d %d kmdbuf:%x",
+			stripe_idx, k, kmd_buf);
+		CAM_DBG(CAM_OPE, "offset:%d len:%d iova:%x %pK",
+			rd_cdm_info->s_cdm_info[stripe_idx][k].offset,
+			rd_cdm_info->s_cdm_info[stripe_idx][k].len,
+			ope_request->ope_kmd_buf.iova_cdm_addr,
+			ope_request->ope_kmd_buf.iova_cdm_addr);
+	}
+	return kmd_buf;
+}
+
+static uint32_t *ope_create_stripes_batch(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t *kmd_buf, int batch_idx,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req)
+{
+	int i, j;
+	struct cam_ope_request *ope_request;
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
+	struct ope_frame_process *frm_proc;
+	uint32_t stripe_idx = 0;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+	frm_proc = ope_dev_prepare_req->frame_process;
+	ope_request = ctx_data->req_list[req_idx];
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	if (batch_idx >= OPE_MAX_BATCH_SIZE) {
+		CAM_ERR(CAM_OPE, "Invalid input: %d", batch_idx);
+		return NULL;
+	}
+	i = batch_idx;
+	/* Stripes */
+
+	wr_cdm_info =
+		&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[i];
+	rd_cdm_info =
+		&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[i];
+	for (j = 0; j < ope_request->num_stripes[i]; j++) {
+		/* cmd buffer stripes */
+		kmd_buf = ope_create_stripe_cmd(hw_mgr, ctx_data,
+			kmd_buf, i, j, stripe_idx, frm_proc);
+		if (!kmd_buf)
+			goto end;
+
+		/* WR stripes */
+		kmd_buf = ope_create_stripe_wr(ctx_data, stripe_idx,
+			wr_cdm_info, ope_request, kmd_buf);
+		if (!kmd_buf)
+			goto end;
+
+		/* RD stripes */
+		kmd_buf = ope_create_stripe_rd(ctx_data, stripe_idx,
+			rd_cdm_info, ope_request, kmd_buf);
+		if (!kmd_buf)
+			goto end;
+
+		/* add go command */
+		kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+		(uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr +
+		rd_cdm_info->go_cmd_offset,
+		rd_cdm_info->go_cmd_len);
+
+		CAM_DBG(CAM_OPE, "Go cmd for stripe:%d kmd_buf:%x",
+			stripe_idx, kmd_buf);
+		CAM_DBG(CAM_OPE, "iova:%x %pK",
+			ope_request->ope_kmd_buf.iova_cdm_addr,
+			ope_request->ope_kmd_buf.iova_cdm_addr);
+
+		/* wait for RUP done */
+		kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf,
+			OPE_WAIT_COMP_RUP, 0x0);
+		CAM_DBG(CAM_OPE, "wait RUP cmd stripe:%d kmd_buf:%x",
+			stripe_idx, kmd_buf);
+		stripe_idx++;
+	}
+
+end:
+	return kmd_buf;
+}
+
+static uint32_t *ope_create_stripes(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t *kmd_buf,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req)
+{
+	int i, j;
+	struct cam_ope_request *ope_request;
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
+	struct ope_frame_process *frm_proc;
+	uint32_t stripe_idx = 0;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+	frm_proc = ope_dev_prepare_req->frame_process;
+	ope_request = ctx_data->req_list[req_idx];
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	/* Stripes */
+	for (i = 0; i < frm_proc->batch_size; i++) {
+		wr_cdm_info =
+		&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[i];
+		rd_cdm_info =
+		&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[i];
+		for (j = 0; j < ope_request->num_stripes[i]; j++) {
+			/* cmd buffer stripes */
+			kmd_buf = ope_create_stripe_cmd(hw_mgr, ctx_data,
+				kmd_buf, i, j, stripe_idx, frm_proc);
+			if (!kmd_buf)
+				goto end;
+
+			/* WR stripes */
+			kmd_buf = ope_create_stripe_wr(ctx_data, stripe_idx,
+				wr_cdm_info, ope_request, kmd_buf);
+			if (!kmd_buf)
+				goto end;
+
+			/* RD stripes */
+			kmd_buf = ope_create_stripe_rd(ctx_data, stripe_idx,
+				rd_cdm_info, ope_request, kmd_buf);
+			if (!kmd_buf)
+				goto end;
+
+			/* add go command */
+			kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+			(uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr +
+			rd_cdm_info->go_cmd_offset,
+			rd_cdm_info->go_cmd_len);
+
+			CAM_DBG(CAM_OPE, "Go cmd for stripe:%d kmd_buf:%x",
+				stripe_idx, kmd_buf);
+			CAM_DBG(CAM_OPE, "iova:%x %pK",
+				ope_request->ope_kmd_buf.iova_cdm_addr,
+				ope_request->ope_kmd_buf.iova_cdm_addr);
+
+			/* wait for RUP done */
+			kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf,
+				OPE_WAIT_COMP_RUP, 0x0);
+			CAM_DBG(CAM_OPE, "wait RUP cmd stripe:%d kmd_buf:%x",
+				stripe_idx, kmd_buf);
+			stripe_idx++;
+		}
+	}
+end:
+	return kmd_buf;
+}
+
+static uint32_t *ope_create_stripes_nrt(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t *kmd_buf,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req,
+	uint32_t kmd_buf_offset)
+{
+	int i, j;
+	struct cam_ope_request *ope_request;
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
+	struct ope_frame_process *frm_proc;
+	uint32_t stripe_idx = 0;
+	struct cam_cdm_utils_ops *cdm_ops;
+	uint32_t len;
+	uint32_t *cdm_kmd_start_addr;
+	int num_nrt_stripes, num_arb;
+
+	frm_proc = ope_dev_prepare_req->frame_process;
+	ope_request = ctx_data->req_list[req_idx];
+	cdm_kmd_start_addr = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr +
+		(kmd_buf_offset / sizeof(len));
+	num_nrt_stripes = ctx_data->ope_acquire.nrt_stripes_for_arb;
+	num_arb = ope_request->num_stripes[0] /
+		ctx_data->ope_acquire.nrt_stripes_for_arb;
+	if (ope_request->num_stripes[0] %
+		ctx_data->ope_acquire.nrt_stripes_for_arb)
+		num_arb++;
+	CAM_DBG(CAM_OPE, "Number of ARB for snap: %d", num_arb);
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	/* Stripes */
+	for (i = 0; i < frm_proc->batch_size; i++) {
+		wr_cdm_info =
+		&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[i];
+		rd_cdm_info =
+		&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[i];
+		for (j = 0; j < ope_request->num_stripes[i]; j++) {
+			CAM_DBG(CAM_OPE, "num_nrt_stripes = %d num_arb = %d",
+				num_nrt_stripes, num_arb);
+			if (!num_nrt_stripes) {
+				kmd_buf = cdm_ops->cdm_write_wait_comp_event(
+					kmd_buf,
+					OPE_WAIT_COMP_IDLE, 0x0);
+				len = (kmd_buf - cdm_kmd_start_addr) *
+					sizeof(uint32_t);
+				cam_ope_dev_prepare_cdm_request(
+					ope_dev_prepare_req->hw_mgr,
+					ope_dev_prepare_req->prepare_args,
+					ope_dev_prepare_req->ctx_data,
+					ope_dev_prepare_req->req_idx,
+					kmd_buf_offset, ope_dev_prepare_req,
+					len, true);
+				cdm_kmd_start_addr = kmd_buf;
+				kmd_buf_offset += len;
+			}
+			/* cmd buffer stripes */
+			kmd_buf = ope_create_stripe_cmd(hw_mgr, ctx_data,
+				kmd_buf, i, j, stripe_idx, frm_proc);
+			if (!kmd_buf)
+				goto end;
+
+			/* WR stripes */
+			kmd_buf = ope_create_stripe_wr(ctx_data, stripe_idx,
+				wr_cdm_info, ope_request, kmd_buf);
+			if (!kmd_buf)
+				goto end;
+
+			/* RD stripes */
+			kmd_buf = ope_create_stripe_rd(ctx_data, stripe_idx,
+				rd_cdm_info, ope_request, kmd_buf);
+			if (!kmd_buf)
+				goto end;
+
+			if (!num_nrt_stripes) {
+				/* For num_nrt_stripes create CDM BL with ARB */
+				/* Add Frame level cmds in this condition */
+				/* Frame 0 DB */
+				kmd_buf = ope_create_frame_cmd(hw_mgr,
+					ctx_data, req_idx,
+					kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED,
+					ope_dev_prepare_req);
+				if (!kmd_buf)
+					goto end;
+
+				/* Frame 0 SB */
+				kmd_buf = ope_create_frame_cmd(hw_mgr,
+					ctx_data, req_idx,
+					kmd_buf, OPE_CMD_BUF_SINGLE_BUFFERED,
+					ope_dev_prepare_req);
+				if (!kmd_buf)
+					goto end;
+
+				/* Frame 0 WR */
+				kmd_buf = ope_create_frame_wr(ctx_data,
+					wr_cdm_info, kmd_buf, ope_request);
+				if (!kmd_buf)
+					goto end;
+
+				/* Frame 0 RD */
+				kmd_buf = ope_create_frame_rd(ctx_data,
+					rd_cdm_info, kmd_buf, ope_request);
+				if (!kmd_buf)
+					goto end;
+				num_arb--;
+				num_nrt_stripes =
+				ctx_data->ope_acquire.nrt_stripes_for_arb;
+			}
+			// add go command
+			kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
+			(uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr +
+			rd_cdm_info->go_cmd_offset,
+			rd_cdm_info->go_cmd_len);
+
+			CAM_DBG(CAM_OPE, "Go cmd for stripe:%d kmd_buf:%x",
+				stripe_idx, kmd_buf);
+			CAM_DBG(CAM_OPE, "iova:%x %pK",
+				ope_request->ope_kmd_buf.iova_cdm_addr,
+				ope_request->ope_kmd_buf.iova_cdm_addr);
+
+			// wait for RUP done
+			kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf,
+				OPE_WAIT_COMP_RUP, 0x0);
+			CAM_DBG(CAM_OPE, "wait RUP cmd stripe:%d kmd_buf:%x",
+				stripe_idx, kmd_buf);
+			stripe_idx++;
+			num_nrt_stripes--;
+		}
+	}
+end:
+	return kmd_buf;
+}
+
+static int cam_ope_dev_create_kmd_buf_nrt(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_hw_prepare_update_args *prepare_args,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t kmd_buf_offset,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req)
+{
+	int rc = 0;
+	uint32_t len;
+	struct cam_ope_request *ope_request;
+	uint32_t *kmd_buf;
+	uint32_t *cdm_kmd_start_addr;
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
+	struct ope_frame_process *frm_proc;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+	frm_proc = ope_dev_prepare_req->frame_process;
+	ope_request = ctx_data->req_list[req_idx];
+	kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr +
+		(kmd_buf_offset / sizeof(len));
+	cdm_kmd_start_addr = kmd_buf;
+	wr_cdm_info =
+		&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0];
+	rd_cdm_info =
+		&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0];
+
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	/* Frame 0 DB */
+	kmd_buf = ope_create_frame_cmd(hw_mgr,
+		ctx_data, req_idx,
+		kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED,
+		ope_dev_prepare_req);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Frame 0 SB */
+	kmd_buf = ope_create_frame_cmd(hw_mgr,
+		ctx_data, req_idx,
+		kmd_buf, OPE_CMD_BUF_SINGLE_BUFFERED,
+		ope_dev_prepare_req);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Frame 0 WR */
+	kmd_buf = ope_create_frame_wr(ctx_data,
+		wr_cdm_info, kmd_buf, ope_request);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Frame 0 RD */
+	kmd_buf = ope_create_frame_rd(ctx_data,
+		rd_cdm_info, kmd_buf, ope_request);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Stripes */
+	kmd_buf = ope_create_stripes_nrt(hw_mgr, ctx_data, req_idx, kmd_buf,
+		ope_dev_prepare_req, kmd_buf_offset);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Last arbitration if there are odd number of stripes */
+	/* wait_idle_irq */
+	kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf,
+		OPE_WAIT_COMP_IDLE, 0x0);
+
+	/* prepare CDM submit packet */
+	len = (kmd_buf - cdm_kmd_start_addr) * sizeof(uint32_t);
+	cam_ope_dev_prepare_cdm_request(ope_dev_prepare_req->hw_mgr,
+		ope_dev_prepare_req->prepare_args,
+		ope_dev_prepare_req->ctx_data, ope_dev_prepare_req->req_idx,
+		kmd_buf_offset, ope_dev_prepare_req,
+		len, false);
+end:
+	return rc;
+}
+
+static int cam_ope_dev_create_kmd_buf_batch(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_hw_prepare_update_args *prepare_args,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t kmd_buf_offset,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req)
+{
+	int rc = 0, i;
+	uint32_t len;
+	struct cam_ope_request *ope_request;
+	uint32_t *kmd_buf;
+	uint32_t *cdm_kmd_start_addr;
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
+	struct ope_frame_process *frm_proc;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+	frm_proc = ope_dev_prepare_req->frame_process;
+	ope_request = ctx_data->req_list[req_idx];
+	kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr +
+		kmd_buf_offset;
+	cdm_kmd_start_addr = kmd_buf;
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+
+	for (i = 0; i < frm_proc->batch_size; i++) {
+		wr_cdm_info =
+		&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[i];
+		rd_cdm_info =
+		&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[i];
+
+		/* After second batch DB programming add prefecth dis */
+		if (i) {
+			/* program db buffered prefecth disable cmds */
+			kmd_buf = ope_create_frame_cmd_prefetch_dis(hw_mgr,
+				ctx_data, req_idx,
+				kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED, i,
+				ope_dev_prepare_req);
+			if (!kmd_buf) {
+				rc = -EINVAL;
+				goto end;
+			}
+			kmd_buf =
+				cdm_ops->cdm_write_wait_prefetch_disable(
+				kmd_buf, 0x0,
+				OPE_WAIT_COMP_IDLE, 0x0);
+		}
+
+		/* Frame i DB */
+		kmd_buf = ope_create_frame_cmd_batch(hw_mgr,
+			ctx_data, req_idx,
+			kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED, i,
+			ope_dev_prepare_req);
+		if (!kmd_buf) {
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* Frame i SB */
+		kmd_buf = ope_create_frame_cmd_batch(hw_mgr,
+			ctx_data, req_idx,
+			kmd_buf, OPE_CMD_BUF_SINGLE_BUFFERED, i,
+			ope_dev_prepare_req);
+		if (!kmd_buf) {
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* Frame i WR */
+		kmd_buf = ope_create_frame_wr(ctx_data,
+			wr_cdm_info, kmd_buf, ope_request);
+		if (!kmd_buf) {
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* Frame i RD */
+		kmd_buf = ope_create_frame_rd(ctx_data,
+			rd_cdm_info, kmd_buf, ope_request);
+		if (!kmd_buf) {
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* Stripe level programming for batch i */
+			/* Stripes */
+		kmd_buf = ope_create_stripes_batch(hw_mgr, ctx_data, req_idx,
+			kmd_buf, i, ope_dev_prepare_req);
+		if (!kmd_buf) {
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+	/* wait_idle_irq */
+	kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf,
+			OPE_WAIT_COMP_IDLE, 0x0);
+
+	/* prepare CDM submit packet */
+	len = (cdm_kmd_start_addr - kmd_buf) * sizeof(uint32_t);
+	cam_ope_dev_prepare_cdm_request(ope_dev_prepare_req->hw_mgr,
+		ope_dev_prepare_req->prepare_args,
+		ope_dev_prepare_req->ctx_data, ope_dev_prepare_req->req_idx,
+		ope_dev_prepare_req->kmd_buf_offset, ope_dev_prepare_req,
+		len, false);
+
+end:
+	return rc;
+}
+
+static int cam_ope_dev_create_kmd_buf(struct cam_ope_hw_mgr *hw_mgr,
+	struct cam_hw_prepare_update_args *prepare_args,
+	struct cam_ope_ctx *ctx_data, uint32_t req_idx,
+	uint32_t kmd_buf_offset,
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req)
+{
+	int rc = 0;
+	uint32_t len;
+	struct cam_ope_request *ope_request;
+	uint32_t *kmd_buf;
+	uint32_t *cdm_kmd_start_addr;
+	struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
+	struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
+	struct cam_cdm_utils_ops *cdm_ops;
+
+
+	if (ctx_data->ope_acquire.dev_type == OPE_DEV_TYPE_OPE_NRT) {
+		return cam_ope_dev_create_kmd_buf_nrt(
+			ope_dev_prepare_req->hw_mgr,
+			ope_dev_prepare_req->prepare_args,
+			ope_dev_prepare_req->ctx_data,
+			ope_dev_prepare_req->req_idx,
+			ope_dev_prepare_req->kmd_buf_offset,
+			ope_dev_prepare_req);
+	}
+
+	if (ctx_data->ope_acquire.batch_size > 1) {
+		return cam_ope_dev_create_kmd_buf_batch(
+		ope_dev_prepare_req->hw_mgr,
+		ope_dev_prepare_req->prepare_args,
+		ope_dev_prepare_req->ctx_data,
+		ope_dev_prepare_req->req_idx,
+		ope_dev_prepare_req->kmd_buf_offset,
+		ope_dev_prepare_req);
+	}
+
+	ope_request = ctx_data->req_list[req_idx];
+	kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr +
+		(kmd_buf_offset / sizeof(len));
+	cdm_kmd_start_addr = kmd_buf;
+	cdm_ops = ctx_data->ope_cdm.cdm_ops;
+	wr_cdm_info =
+		&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0];
+	rd_cdm_info =
+		&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0];
+
+
+	CAM_DBG(CAM_OPE, "kmd_buf:%x req_idx:%d req_id:%lld offset:%d",
+		kmd_buf, req_idx, ope_request->request_id, kmd_buf_offset);
+
+	/* Frame 0 DB */
+	kmd_buf = ope_create_frame_cmd(hw_mgr,
+		ctx_data, req_idx,
+		kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED,
+		ope_dev_prepare_req);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Frame 0 SB */
+	kmd_buf = ope_create_frame_cmd(hw_mgr,
+		ctx_data, req_idx,
+		kmd_buf, OPE_CMD_BUF_SINGLE_BUFFERED,
+		ope_dev_prepare_req);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Frame 0 WR */
+	kmd_buf = ope_create_frame_wr(ctx_data,
+		wr_cdm_info, kmd_buf, ope_request);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Frame 0 RD */
+	kmd_buf = ope_create_frame_rd(ctx_data,
+		rd_cdm_info, kmd_buf, ope_request);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Stripes */
+	kmd_buf = ope_create_stripes(hw_mgr, ctx_data, req_idx, kmd_buf,
+		ope_dev_prepare_req);
+	if (!kmd_buf) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* wait_idle_irq */
+	kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf,
+			OPE_WAIT_COMP_IDLE, 0x0);
+
+	CAM_DBG(CAM_OPE, "wait for idle IRQ: kmd_buf:%x", kmd_buf);
+
+	/* prepare CDM submit packet */
+	len = (kmd_buf - cdm_kmd_start_addr) * sizeof(uint32_t);
+	CAM_DBG(CAM_OPE, "kmd_start_addr:%x kmdbuf_addr:%x len:%d",
+		cdm_kmd_start_addr, kmd_buf, len);
+	cam_ope_dev_prepare_cdm_request(
+		ope_dev_prepare_req->hw_mgr,
+		ope_dev_prepare_req->prepare_args,
+		ope_dev_prepare_req->ctx_data,
+		ope_dev_prepare_req->req_idx,
+		ope_dev_prepare_req->kmd_buf_offset,
+		ope_dev_prepare_req,
+		len, false);
+end:
+	return rc;
+}
+
+static int cam_ope_dev_process_prepare(struct ope_hw *ope_hw, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_ope_dev_prepare_req *ope_dev_prepare_req;
+
+	ope_dev_prepare_req = cmd_args;
+
+	rc = cam_ope_top_process(ope_hw, ope_dev_prepare_req->ctx_data->ctx_id,
+		OPE_HW_PREPARE, ope_dev_prepare_req);
+	if (rc)
+		goto end;
+
+	rc = cam_ope_bus_rd_process(ope_hw,
+		ope_dev_prepare_req->ctx_data->ctx_id,
+		OPE_HW_PREPARE, ope_dev_prepare_req);
+	if (rc)
+		goto end;
+
+	rc = cam_ope_bus_wr_process(ope_hw,
+		ope_dev_prepare_req->ctx_data->ctx_id,
+		OPE_HW_PREPARE, ope_dev_prepare_req);
+	if (rc)
+		goto end;
+
+	cam_ope_dev_create_kmd_buf(ope_dev_prepare_req->hw_mgr,
+		ope_dev_prepare_req->prepare_args,
+		ope_dev_prepare_req->ctx_data, ope_dev_prepare_req->req_idx,
+		ope_dev_prepare_req->kmd_buf_offset, ope_dev_prepare_req);
+
+end:
+	return rc;
+}
+
+static int cam_ope_dev_process_probe(struct ope_hw *ope_hw,
+	void *cmd_args)
+{
+	cam_ope_top_process(ope_hw, -1, OPE_HW_PROBE, NULL);
+	cam_ope_bus_rd_process(ope_hw, -1, OPE_HW_PROBE, NULL);
+	cam_ope_bus_wr_process(ope_hw, -1, OPE_HW_PROBE, NULL);
+
+	return 0;
+}
+
+static int cam_ope_process_probe(struct ope_hw *ope_hw,
+	void *cmd_args, bool hfi_en)
+{
+	struct cam_ope_dev_probe *ope_probe = cmd_args;
+
+	if (!ope_probe->hfi_en)
+		return cam_ope_dev_process_probe(ope_hw, cmd_args);
+
+	return -EINVAL;
+}
+
+static int cam_ope_process_reset(struct ope_hw *ope_hw,
+	void *cmd_args, bool hfi_en)
+{
+	if (!hfi_en)
+		return cam_ope_dev_process_reset(ope_hw, cmd_args);
+
+	return -EINVAL;
+}
+
+static int cam_ope_process_release(struct ope_hw *ope_hw,
+	void *cmd_args, bool hfi_en)
+{
+	if (!hfi_en)
+		return cam_ope_dev_process_release(ope_hw, cmd_args);
+
+	return -EINVAL;
+}
+
+static int cam_ope_process_acquire(struct ope_hw *ope_hw,
+	void *cmd_args, bool hfi_en)
+{
+	if (!hfi_en)
+		return cam_ope_dev_process_acquire(ope_hw, cmd_args);
+
+	return -EINVAL;
+}
+
+static int cam_ope_process_prepare(struct ope_hw *ope_hw,
+	void *cmd_args, bool hfi_en)
+{
+	if (!hfi_en)
+		return cam_ope_dev_process_prepare(ope_hw, cmd_args);
+
+	return -EINVAL;
+}
+
+int cam_ope_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_hw_info *ope_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ope_device_core_info *core_info = NULL;
+	struct ope_hw *ope_hw;
+	bool hfi_en;
+	unsigned long flags;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_OPE, "Invalid args %x for cmd %u",
+			device_priv, cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &ope_dev->soc_info;
+	core_info = (struct cam_ope_device_core_info *)ope_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_OPE, "soc_info = %x core_info = %x",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	hfi_en = core_info->ope_hw_info->hfi_en;
+	ope_hw = core_info->ope_hw_info->ope_hw;
+	if (!ope_hw) {
+		CAM_ERR(CAM_OPE, "Invalid ope hw info");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case OPE_HW_PROBE:
+		rc = cam_ope_process_probe(ope_hw, cmd_args, hfi_en);
+		break;
+	case OPE_HW_ACQUIRE:
+		rc = cam_ope_process_acquire(ope_hw, cmd_args, hfi_en);
+		break;
+	case OPE_HW_RELEASE:
+		rc = cam_ope_process_release(ope_hw, cmd_args, hfi_en);
+		break;
+	case OPE_HW_PREPARE:
+		rc = cam_ope_process_prepare(ope_hw, cmd_args, hfi_en);
+		break;
+	case OPE_HW_START:
+		break;
+	case OPE_HW_STOP:
+		break;
+	case OPE_HW_FLUSH:
+		break;
+	case OPE_HW_RESET:
+		rc = cam_ope_process_reset(ope_hw, cmd_args, hfi_en);
+		break;
+	case OPE_HW_CLK_UPDATE: {
+		struct cam_ope_dev_clk_update *clk_upd_cmd =
+			(struct cam_ope_dev_clk_update *)cmd_args;
+
+		rc = cam_ope_update_clk_rate(soc_info, clk_upd_cmd->clk_rate);
+		if (rc)
+			CAM_ERR(CAM_OPE, "Failed to update clk: %d", rc);
+		}
+		break;
+	case OPE_HW_BW_UPDATE: {
+		struct cam_ope_dev_bw_update *cpas_vote = cmd_args;
+
+		if (!cmd_args)
+			return -EINVAL;
+
+		rc = cam_ope_caps_vote(core_info, cpas_vote);
+		if (rc)
+			CAM_ERR(CAM_OPE, "failed to update bw: %d", rc);
+		}
+		break;
+	case OPE_HW_SET_IRQ_CB: {
+		struct cam_ope_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_OPE, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		spin_lock_irqsave(&ope_dev->hw_lock, flags);
+		core_info->irq_cb.ope_hw_mgr_cb = irq_cb->ope_hw_mgr_cb;
+		core_info->irq_cb.data = irq_cb->data;
+		spin_unlock_irqrestore(&ope_dev->hw_lock, flags);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+irqreturn_t cam_ope_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *ope_dev = data;
+	struct cam_ope_device_core_info *core_info = NULL;
+	struct ope_hw *ope_hw;
+	struct cam_ope_irq_data irq_data;
+
+	if (!data) {
+		CAM_ERR(CAM_OPE, "Invalid cam_dev_info or query_cap args");
+		return IRQ_HANDLED;
+	}
+
+	core_info = (struct cam_ope_device_core_info *)ope_dev->core_info;
+	ope_hw = core_info->ope_hw_info->ope_hw;
+
+	irq_data.error = 0;
+	cam_ope_top_process(ope_hw, 0, OPE_HW_ISR, &irq_data);
+	cam_ope_bus_rd_process(ope_hw, 0, OPE_HW_ISR, &irq_data);
+	cam_ope_bus_wr_process(ope_hw, 0, OPE_HW_ISR, &irq_data);
+
+
+	spin_lock(&ope_dev->hw_lock);
+	if (core_info->irq_cb.ope_hw_mgr_cb && core_info->irq_cb.data)
+		if (irq_data.error)
+			core_info->irq_cb.ope_hw_mgr_cb(irq_data.error,
+				core_info->irq_cb.data);
+	spin_unlock(&ope_dev->hw_lock);
+
+
+	return IRQ_HANDLED;
+}

+ 99 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.h

@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_OPE_CORE_H
+#define CAM_OPE_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_ope.h>
+#include "cam_cpas_api.h"
+#include "ope_hw.h"
+#include "ope_dev_intf.h"
+/**
+ * struct cam_ope_cpas_vote
+ * @ahb_vote: AHB vote info
+ * @axi_vote: AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote data
+ * @axi_vote_valid: flag for axi vote data
+ */
+struct cam_ope_cpas_vote {
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	uint32_t ahb_vote_valid;
+	uint32_t axi_vote_valid;
+};
+
+/**
+ * struct cam_ope_device_hw_info
+ *
+ * @ope_hw:          OPE hardware
+ * @hw_idx:          Hardware index
+ * @ope_cdm_base:    Base address of CDM
+ * @ope_top_base:    Base address of top
+ * @ope_qos_base:    Base address of QOS
+ * @ope_pp_base:     Base address of PP
+ * @ope_bus_rd_base: Base address of RD
+ * @ope_bus_wr_base: Base address of WM
+ * @hfi_en:          HFI flag enable
+ * @reserved:        Reserved
+ */
+struct cam_ope_device_hw_info {
+	struct ope_hw *ope_hw;
+	uint32_t hw_idx;
+	void *ope_cdm_base;
+	void *ope_top_base;
+	void *ope_qos_base;
+	void *ope_pp_base;
+	void *ope_bus_rd_base;
+	void *ope_bus_wr_base;
+	bool hfi_en;
+	uint32_t reserved;
+};
+
+/**
+ * struct cam_ope_device_core_info
+ *
+ * @ope_hw_info: OPE hardware info
+ * @hw_version:  Hardware version
+ * @hw_idx:      Hardware Index
+ * @hw_type:     Hardware Type
+ * @cpas_handle: CPAS Handle
+ * @cpas_start:  CPAS start flag
+ * @clk_enable:  Clock enable flag
+ * @irq_cb:      IRQ Callback
+ */
+struct cam_ope_device_core_info {
+	struct cam_ope_device_hw_info *ope_hw_info;
+	uint32_t hw_version;
+	uint32_t hw_idx;
+	uint32_t hw_type;
+	uint32_t cpas_handle;
+	bool cpas_start;
+	bool clk_enable;
+	struct cam_ope_set_irq_cb irq_cb;
+};
+
+
+int cam_ope_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_ope_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_ope_start(void *device_priv,
+	void *start_args, uint32_t arg_size);
+int cam_ope_stop(void *device_priv,
+	void *stop_args, uint32_t arg_size);
+int cam_ope_flush(void *device_priv,
+	void *flush_args, uint32_t arg_size);
+int cam_ope_get_hw_caps(void *device_priv,
+	void *get_hw_cap_args, uint32_t arg_size);
+int cam_ope_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_ope_irq(int irq_num, void *data);
+
+#endif /* CAM_OPE_CORE_H */

+ 253 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev.c

@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "ope_core.h"
+#include "ope_soc.h"
+#include "cam_hw.h"
+#include "ope_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_ope_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "ope_hw_100.h"
+#include "ope_dev_intf.h"
+
+static struct cam_ope_device_hw_info ope_hw_info;
+static struct ope_dev_soc ope_soc_info;
+EXPORT_SYMBOL(ope_soc_info);
+
+static struct hw_version_reg ope_hw_version_reg = {
+	.hw_ver = 0x0,
+};
+
+static char ope_dev_name[8];
+
+static int cam_ope_init_hw_version(struct cam_hw_soc_info *soc_info,
+	struct cam_ope_device_core_info *core_info)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_OPE, "soc_info = %x core_info = %x",
+		soc_info, core_info);
+	CAM_DBG(CAM_OPE, "CDM:%x TOP: %x QOS: %x PP: %x RD: %x WR: %x",
+		soc_info->reg_map[OPE_CDM_BASE].mem_base,
+		soc_info->reg_map[OPE_TOP_BASE].mem_base,
+		soc_info->reg_map[OPE_QOS_BASE].mem_base,
+		soc_info->reg_map[OPE_PP_BASE].mem_base,
+		soc_info->reg_map[OPE_BUS_RD].mem_base,
+		soc_info->reg_map[OPE_BUS_WR].mem_base);
+	CAM_DBG(CAM_OPE, "core: %x",
+		core_info->ope_hw_info->ope_cdm_base);
+
+	core_info->ope_hw_info->ope_cdm_base =
+		soc_info->reg_map[OPE_CDM_BASE].mem_base;
+	core_info->ope_hw_info->ope_top_base =
+		soc_info->reg_map[OPE_TOP_BASE].mem_base;
+	core_info->ope_hw_info->ope_qos_base =
+		soc_info->reg_map[OPE_QOS_BASE].mem_base;
+	core_info->ope_hw_info->ope_pp_base =
+		soc_info->reg_map[OPE_PP_BASE].mem_base;
+	core_info->ope_hw_info->ope_bus_rd_base =
+		soc_info->reg_map[OPE_BUS_RD].mem_base;
+	core_info->ope_hw_info->ope_bus_wr_base =
+		soc_info->reg_map[OPE_BUS_WR].mem_base;
+
+	core_info->hw_version = cam_io_r_mb(
+			core_info->ope_hw_info->ope_top_base +
+			ope_hw_version_reg.hw_ver);
+
+	switch (core_info->hw_version) {
+	case OPE_HW_VER_1_0_0:
+		core_info->ope_hw_info->ope_hw = &ope_hw_100;
+		break;
+	default:
+		CAM_ERR(CAM_OPE, "Unsupported version : %u",
+			core_info->hw_version);
+		rc = -EINVAL;
+		break;
+	}
+
+	ope_hw_100.top_reg->base = core_info->ope_hw_info->ope_top_base;
+	ope_hw_100.bus_rd_reg->base = core_info->ope_hw_info->ope_bus_rd_base;
+	ope_hw_100.bus_wr_reg->base = core_info->ope_hw_info->ope_bus_wr_base;
+
+	return rc;
+}
+
+int cam_ope_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_ope_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "ope", sizeof("ope"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		CAM_ERR(CAM_OPE, "failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+int cam_ope_probe(struct platform_device *pdev)
+{
+	struct cam_hw_intf                *ope_dev_intf = NULL;
+	struct cam_hw_info                *ope_dev = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_ope_device_core_info   *core_info = NULL;
+	int                                rc = 0;
+	uint32_t hw_idx;
+	struct cam_ope_dev_probe ope_probe;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &hw_idx);
+
+	ope_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!ope_dev_intf)
+		return -ENOMEM;
+
+	ope_dev_intf->hw_idx = hw_idx;
+	ope_dev_intf->hw_type = OPE_DEV_OPE;
+	ope_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!ope_dev) {
+		rc = -ENOMEM;
+		goto ope_dev_alloc_failed;
+	}
+
+	memset(ope_dev_name, 0, sizeof(ope_dev_name));
+	snprintf(ope_dev_name, sizeof(ope_dev_name),
+		"ope%1u", ope_dev_intf->hw_idx);
+
+	ope_dev->soc_info.pdev = pdev;
+	ope_dev->soc_info.dev = &pdev->dev;
+	ope_dev->soc_info.dev_name = ope_dev_name;
+	ope_dev_intf->hw_priv = ope_dev;
+	ope_dev_intf->hw_ops.init = cam_ope_init_hw;
+	ope_dev_intf->hw_ops.deinit = cam_ope_deinit_hw;
+	ope_dev_intf->hw_ops.get_hw_caps = cam_ope_get_hw_caps;
+	ope_dev_intf->hw_ops.start = cam_ope_start;
+	ope_dev_intf->hw_ops.stop = cam_ope_stop;
+	ope_dev_intf->hw_ops.flush = cam_ope_flush;
+	ope_dev_intf->hw_ops.process_cmd = cam_ope_process_cmd;
+
+	CAM_DBG(CAM_OPE, "type %d index %d",
+		ope_dev_intf->hw_type,
+		ope_dev_intf->hw_idx);
+
+	platform_set_drvdata(pdev, ope_dev_intf);
+
+	ope_dev->core_info = kzalloc(sizeof(struct cam_ope_device_core_info),
+		GFP_KERNEL);
+	if (!ope_dev->core_info) {
+		rc = -ENOMEM;
+		goto ope_core_alloc_failed;
+	}
+	core_info = (struct cam_ope_device_core_info *)ope_dev->core_info;
+	core_info->ope_hw_info = &ope_hw_info;
+	ope_dev->soc_info.soc_private = &ope_soc_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		rc = -EINVAL;
+		CAM_DBG(CAM_OPE, "No ope hardware info");
+		goto ope_match_dev_failed;
+	}
+
+	rc = cam_ope_init_soc_resources(&ope_dev->soc_info, cam_ope_irq,
+		ope_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_OPE, "failed to init_soc");
+		goto init_soc_failed;
+	}
+
+	rc = cam_ope_enable_soc_resources(&ope_dev->soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_OPE, "enable soc resorce failed: %d", rc);
+		goto enable_soc_failed;
+	}
+
+	rc = cam_ope_init_hw_version(&ope_dev->soc_info, ope_dev->core_info);
+	if (rc)
+		goto init_hw_failure;
+
+	core_info->hw_type = OPE_DEV_OPE;
+	core_info->hw_idx = hw_idx;
+	rc = cam_ope_register_cpas(&ope_dev->soc_info,
+		core_info, ope_dev_intf->hw_idx);
+	if (rc < 0)
+		goto register_cpas_failed;
+
+	cam_ope_disable_soc_resources(&ope_dev->soc_info, true);
+	ope_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+
+	ope_probe.hfi_en = ope_soc_info.hfi_en;
+	cam_ope_process_cmd(ope_dev, OPE_HW_PROBE,
+		&ope_probe, sizeof(ope_probe));
+	mutex_init(&ope_dev->hw_mutex);
+	spin_lock_init(&ope_dev->hw_lock);
+	init_completion(&ope_dev->hw_complete);
+
+	CAM_DBG(CAM_OPE, "OPE%d probe successful",
+		ope_dev_intf->hw_idx);
+	return rc;
+
+init_hw_failure:
+enable_soc_failed:
+register_cpas_failed:
+init_soc_failed:
+ope_match_dev_failed:
+	kfree(ope_dev->core_info);
+ope_core_alloc_failed:
+	kfree(ope_dev);
+ope_dev_alloc_failed:
+	kfree(ope_dev_intf);
+	return rc;
+}
+
+static const struct of_device_id cam_ope_dt_match[] = {
+	{
+		.compatible = "qcom,ope",
+		.data = &ope_hw_version_reg,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_ope_dt_match);
+
+static struct platform_driver cam_ope_driver = {
+	.probe = cam_ope_probe,
+	.driver = {
+		.name = "ope",
+		.of_match_table = cam_ope_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+int cam_ope_init_module(void)
+{
+	return platform_driver_register(&cam_ope_driver);
+}
+
+void cam_ope_exit_module(void)
+{
+	platform_driver_unregister(&cam_ope_driver);
+}
+
+MODULE_DESCRIPTION("CAM OPE driver");
+MODULE_LICENSE("GPL v2");

+ 174 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev_intf.h

@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_OPE_DEV_INTF_H
+#define CAM_OPE_DEV_INTF_H
+
+#include <media/cam_ope.h>
+#include "cam_ope_hw_mgr.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cpas_api.h"
+
+
+#define OPE_HW_INIT       0x1
+#define OPE_HW_DEINIT     0x2
+#define OPE_HW_ACQUIRE    0x3
+#define OPE_HW_RELEASE    0x4
+#define OPE_HW_START      0x5
+#define OPE_HW_STOP       0x6
+#define OPE_HW_FLUSH      0x7
+#define OPE_HW_PREPARE    0x8
+#define OPE_HW_ISR        0x9
+#define OPE_HW_PROBE      0xA
+#define OPE_HW_CLK_UPDATE 0xB
+#define OPE_HW_BW_UPDATE  0xC
+#define OPE_HW_RESET      0xD
+#define OPE_HW_SET_IRQ_CB 0xE
+
+/**
+ * struct cam_ope_dev_probe
+ *
+ * @hfi_en: HFI enable flag
+ */
+struct cam_ope_dev_probe {
+	bool hfi_en;
+};
+
+/**
+ * struct cam_ope_dev_init
+ *
+ * @hfi_en:    HFI enable flag
+ * @core_info: OPE core info
+ */
+struct cam_ope_dev_init {
+	bool hfi_en;
+	struct cam_ope_device_core_info *core_info;
+};
+
+/**
+ * struct cam_ope_dev_clk_update
+ *
+ * @clk_rate: Clock rate
+ */
+struct cam_ope_dev_clk_update {
+	uint32_t clk_rate;
+};
+
+/**
+ * struct cam_ope_dev_bw_update
+ *
+ * @ahb_vote:       AHB vote info
+ * @axi_vote:       AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote
+ * @axi_vote_valid: Flag for axi vote
+ */
+struct cam_ope_dev_bw_update {
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	uint32_t ahb_vote_valid;
+	uint32_t axi_vote_valid;
+};
+
+/**
+ * struct cam_ope_dev_caps
+ *
+ * @hw_idx: Hardware index
+ * @hw_ver: Hardware version info
+ */
+struct cam_ope_dev_caps {
+	uint32_t hw_idx;
+	struct ope_hw_ver hw_ver;
+};
+
+/**
+ * struct cam_ope_dev_acquire
+ *
+ * @ctx_id:      Context id
+ * @ope_acquire: OPE acquire info
+ * @bus_wr_ctx:  Bus Write context
+ * @bus_rd_ctx:  Bus Read context
+ */
+struct cam_ope_dev_acquire {
+	uint32_t ctx_id;
+	struct ope_acquire_dev_info *ope_acquire;
+	struct ope_bus_wr_ctx *bus_wr_ctx;
+	struct ope_bus_rd_ctx *bus_rd_ctx;
+};
+
+/**
+ * struct cam_ope_dev_release
+ *
+ * @ctx_id:      Context id
+ * @bus_wr_ctx:  Bus Write context
+ * @bus_rd_ctx:  Bus Read context
+ */
+struct cam_ope_dev_release {
+	uint32_t ctx_id;
+	struct ope_bus_wr_ctx *bus_wr_ctx;
+	struct ope_bus_rd_ctx *bus_rd_ctx;
+};
+
+/**
+ * struct cam_ope_set_irq_cb
+ *
+ * @ope_hw_mgr_cb: Callback to hardware manager
+ * @data:          Private data
+ */
+struct cam_ope_set_irq_cb {
+	int32_t (*ope_hw_mgr_cb)(uint32_t irq_status, void *data);
+	void *data;
+};
+
+/**
+ * struct cam_ope_irq_data
+ *
+ * @error: IRQ error
+ */
+struct cam_ope_irq_data {
+	uint32_t error;
+};
+
+/**
+ * struct cam_ope_dev_prepare_req
+ *
+ * @hw_mgr:         OPE hardware manager
+ * @packet:         Packet
+ * @prepare_args:   Prepare request args
+ * @ctx_data:       Context data
+ * @wr_cdm_batch:   WM request
+ * @rd_cdm_batch:   RD master request
+ * @frame_process:  Frame process command
+ * @req_idx:        Request Index
+ * @kmd_buf_offset: KMD buffer offset
+ */
+struct cam_ope_dev_prepare_req {
+	struct cam_ope_hw_mgr *hw_mgr;
+	struct cam_packet *packet;
+	struct cam_hw_prepare_update_args *prepare_args;
+	struct cam_ope_ctx *ctx_data;
+	struct ope_bus_wr_io_port_cdm_batch *wr_cdm_batch;
+	struct ope_bus_rd_io_port_cdm_batch *rd_cdm_batch;
+	struct ope_frame_process *frame_process;
+	uint32_t req_idx;
+	uint32_t kmd_buf_offset;
+};
+
+int cam_ope_top_process(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data);
+
+int cam_ope_bus_rd_process(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data);
+
+int cam_ope_bus_wr_process(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data);
+
+int cam_ope_init_module(void);
+void cam_ope_exit_module(void);
+
+int cam_ope_subdev_init_module(void);
+void cam_ope_subdev_exit_module(void);
+
+#endif /* CAM_OPE_DEV_INTF_H */
+

+ 399 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/ope_hw.h

@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_OPE_HW_H
+#define CAM_OPE_HW_H
+
+#define OPE_HW_VER_1_0_0 0x10000000
+
+#define OPE_DEV_OPE  0
+#define OPE_DEV_MAX  1
+
+#define MAX_RD_CLIENTS   2
+#define MAX_WR_CLIENTS   8
+
+#define OPE_CDM_BASE     0x0
+#define OPE_TOP_BASE     0x1
+#define OPE_QOS_BASE     0x2
+#define OPE_PP_BASE      0x3
+#define OPE_BUS_RD       0x4
+#define OPE_BUS_WR       0x5
+#define OPE_BASE_MAX     0x6
+
+
+#define BUS_RD_COMBO_BAYER_MASK   0x1
+#define BUS_RD_COMBO_YUV_MASK     0x2
+#define BUS_RD_COMBO_MAX          0x2
+
+#define BUS_RD_BAYER         0x0
+#define BUS_RD_YUV           0x1
+
+#define BUS_WR_COMBO_YUV_MASK    0x1
+#define BUS_WR_COMBO_MAX         0x1
+
+#define BUS_WR_YUV           0x0
+
+#define BUS_WR_VIDEO_Y       0x0
+#define BUS_WR_VIDEO_C       0x1
+#define BUS_WR_DISP_Y        0x2
+#define BUS_WR_DISP_C        0x3
+#define BUS_WR_ARGB          0x4
+#define BUS_WR_STATS_RS      0x5
+#define BUS_WR_STATS_IHIST   0x6
+#define BUS_WR_STATS_LTM     0x7
+
+#define OPE_WAIT_COMP_RUP     0x1
+#define OPE_WAIT_COMP_WR_DONE 0x2
+#define OPE_WAIT_COMP_IDLE    0x4
+#define OPE_WAIT_COMP_GEN_IRQ 0x8
+
+struct cam_ope_common {
+	uint32_t mode[CAM_FORMAT_MAX];
+};
+
+struct cam_ope_top_reg {
+	void *base;
+	uint32_t offset;
+	uint32_t hw_version;
+	uint32_t reset_cmd;
+	uint32_t core_clk_cfg_ctrl_0;
+	uint32_t ahb_clk_cgc_ctrl;
+	uint32_t core_cfg;
+	uint32_t irq_status;
+	uint32_t irq_mask;
+	uint32_t irq_clear;
+	uint32_t irq_set;
+	uint32_t irq_cmd;
+	uint32_t violation_status;
+	uint32_t throttle_cnt_cfg;
+};
+
+struct cam_ope_top_reg_val {
+	uint32_t hw_version;
+	uint32_t major_mask;
+	uint32_t major_shift;
+	uint32_t minor_mask;
+	uint32_t minor_shift;
+	uint32_t incr_mask;
+	uint32_t incr_shift;
+	uint32_t irq_mask;
+	uint32_t irq_set_clear;
+	uint32_t sw_reset_cmd;
+	uint32_t hw_reset_cmd;
+	uint32_t core_clk_cfg_ctrl_0;
+	uint32_t ahb_clk_cgc_ctrl;
+	uint32_t input_format;
+	uint32_t input_format_mask;
+	uint32_t color_correct_src_sel;
+	uint32_t color_correct_src_sel_mask;
+	uint32_t stats_ihist_src_sel;
+	uint32_t stats_ihist_src_sel_mask;
+	uint32_t chroma_up_src_sel;
+	uint32_t chroma_up_src_sel_mask;
+	uint32_t argb_alpha;
+	uint32_t argb_alpha_mask;
+	uint32_t rs_throttle_cnt;
+	uint32_t rs_throttle_cnt_mask;
+	uint32_t ihist_throttle_cnt;
+	uint32_t ihist_throttle_cnt_mask;
+	uint32_t rst_done;
+	uint32_t we_done;
+	uint32_t fe_done;
+	uint32_t ope_violation;
+	uint32_t idle;
+};
+
+struct cam_ope_qos_reg {
+	void *base;
+	uint32_t offset;
+	uint32_t hw_version;
+	uint32_t hw_status;
+	uint32_t module_cfg;
+	uint32_t curve_cfg_0;
+	uint32_t curve_cfg_1;
+	uint32_t window_cfg;
+	uint32_t eos_status_0;
+	uint32_t eos_status_1;
+	uint32_t eos_status_2;
+};
+
+struct cam_ope_qos_reg_val {
+	uint32_t hw_version;
+	uint32_t proc_interval;
+	uint32_t proc_interval_mask;
+	uint32_t static_health;
+	uint32_t static_health_mask;
+	uint32_t module_cfg_en;
+	uint32_t module_cfg_en_mask;
+	uint32_t yexp_ymin_dec;
+	uint32_t yexp_ymin_dec_mask;
+	uint32_t ymin_inc;
+	uint32_t ymin_inc_mask;
+	uint32_t initial_delta;
+	uint32_t initial_delta_mask;
+	uint32_t window_cfg;
+};
+
+struct cam_ope_bus_rd_client_reg {
+	uint32_t core_cfg;
+	uint32_t ccif_meta_data;
+	uint32_t img_addr;
+	uint32_t img_cfg;
+	uint32_t stride;
+	uint32_t unpack_cfg;
+	uint32_t latency_buf_allocation;
+	uint32_t misr_cfg_0;
+	uint32_t misr_cfg_1;
+	uint32_t misr_rd_val;
+};
+
+struct cam_ope_bus_rd_reg {
+	void *base;
+	uint32_t offset;
+	uint32_t hw_version;
+	uint32_t sw_reset;
+	uint32_t cgc_override;
+	uint32_t irq_mask;
+	uint32_t irq_clear;
+	uint32_t irq_cmd;
+	uint32_t irq_status;
+	uint32_t input_if_cmd;
+	uint32_t irq_set;
+	uint32_t misr_reset;
+	uint32_t security_cfg;
+	uint32_t iso_cfg;
+	uint32_t iso_seed;
+
+	uint32_t num_clients;
+	struct cam_ope_bus_rd_client_reg rd_clients[MAX_RD_CLIENTS];
+};
+
+struct cam_ope_bus_rd_client_reg_val {
+	uint32_t core_cfg;
+	uint32_t stripe_location;
+	uint32_t stripe_location_mask;
+	uint32_t stripe_location_shift;
+	uint32_t pix_pattern;
+	uint32_t pix_pattern_mask;
+	uint32_t pix_pattern_shift;
+	uint32_t img_addr;
+	uint32_t img_width;
+	uint32_t img_width_mask;
+	uint32_t img_width_shift;
+	uint32_t img_height;
+	uint32_t img_height_mask;
+	uint32_t img_height_shift;
+	uint32_t stride;
+	uint32_t mode;
+	uint32_t mode_mask;
+	uint32_t mode_shift;
+	uint32_t alignment;
+	uint32_t alignment_mask;
+	uint32_t alignment_shift;
+	uint32_t latency_buf_allocation;
+	uint32_t misr_cfg_samp_mode;
+	uint32_t misr_cfg_samp_mode_mask;
+	uint32_t misr_cfg_en;
+	uint32_t misr_cfg_en_mask;
+	uint32_t misr_cfg_1;
+	uint32_t misr_rd_val;
+	uint32_t input_port_id;
+	uint32_t rm_port_id;
+	uint32_t format_type;
+	uint32_t num_combos_supported;
+};
+
+struct cam_ope_bus_rd_reg_val {
+	uint32_t hw_version;
+	uint32_t sw_reset;
+	uint32_t cgc_override;
+	uint32_t irq_mask;
+	uint32_t go_cmd;
+	uint32_t go_cmd_mask;
+	uint32_t ica_en;
+	uint32_t ica_en_mask;
+	uint32_t static_prg;
+	uint32_t static_prg_mask;
+	uint32_t go_cmd_sel;
+	uint32_t go_cmd_sel_mask;
+	uint32_t fs_sync_en;
+	uint32_t fs_sync_en_mask;
+	uint32_t misr_reset;
+	uint32_t security_cfg;
+	uint32_t iso_bpp_select;
+	uint32_t iso_bpp_select_mask;
+	uint32_t iso_pattern_select;
+	uint32_t iso_pattern_select_mask;
+	uint32_t iso_en;
+	uint32_t iso_en_mask;
+	uint32_t iso_seed;
+	uint32_t irq_set_clear;
+	uint32_t rst_done;
+	uint32_t rup_done;
+	uint32_t rd_buf_done;
+	uint32_t violation;
+	uint32_t latency_buf_size;
+
+	uint32_t num_clients;
+	struct cam_ope_bus_rd_client_reg_val rd_clients[MAX_RD_CLIENTS];
+};
+
+struct cam_ope_bus_wr_client_reg {
+	uint32_t core_cfg;
+	uint32_t img_addr;
+	uint32_t img_cfg;
+	uint32_t x_init;
+	uint32_t stride;
+	uint32_t pack_cfg;
+	uint32_t bw_limit;
+	uint32_t frame_header_addr;
+	uint32_t subsample_period;
+	uint32_t subsample_pattern;
+};
+
+struct cam_ope_bus_wr_reg {
+	void *base;
+	uint32_t offset;
+	uint32_t hw_version;
+	uint32_t cgc_override;
+	uint32_t irq_mask_0;
+	uint32_t irq_mask_1;
+	uint32_t irq_clear_0;
+	uint32_t irq_clear_1;
+	uint32_t irq_status_0;
+	uint32_t irq_status_1;
+	uint32_t irq_cmd;
+	uint32_t frame_header_cfg_0;
+	uint32_t local_frame_header_cfg_0;
+	uint32_t irq_set_0;
+	uint32_t irq_set_1;
+	uint32_t iso_cfg;
+	uint32_t violation_status;
+	uint32_t image_size_violation_status;
+	uint32_t misr_cfg_0;
+	uint32_t misr_cfg_1;
+	uint32_t misr_rd_sel;
+	uint32_t misr_reset;
+	uint32_t misr_val;
+	uint32_t num_clients;
+	struct cam_ope_bus_wr_client_reg wr_clients[MAX_WR_CLIENTS];
+};
+
+struct cam_ope_bus_wr_client_reg_val {
+	uint32_t core_cfg_en;
+	uint32_t core_cfg_en_mask;
+	uint32_t core_cfg_en_shift;
+	uint32_t virtual_frame_en;
+	uint32_t virtual_frame_en_mask;
+	uint32_t virtual_frame_en_shift;
+	uint32_t frame_header_en;
+	uint32_t frame_header_en_mask;
+	uint32_t frame_header_en_shift;
+	uint32_t auto_recovery_en;
+	uint32_t auto_recovery_en_mask;
+	uint32_t auto_recovery_en_shift;
+	uint32_t mode;
+	uint32_t mode_mask;
+	uint32_t mode_shift;
+	uint32_t img_addr;
+	uint32_t width;
+	uint32_t width_mask;
+	uint32_t width_shift;
+	uint32_t height;
+	uint32_t height_mask;
+	uint32_t height_shift;
+	uint32_t x_init;
+	uint32_t stride;
+	uint32_t format;
+	uint32_t format_mask;
+	uint32_t format_shift;
+	uint32_t alignment;
+	uint32_t alignment_mask;
+	uint32_t alignment_shift;
+	uint32_t bw_limit_en;
+	uint32_t bw_limit_en_mask;
+	uint32_t bw_limit_counter;
+	uint32_t bw_limit_counter_mask;
+	uint32_t frame_header_addr;
+	uint32_t subsample_period;
+	uint32_t subsample_pattern;
+	uint32_t output_port_id;
+	uint32_t wm_port_id;
+	uint32_t format_type;
+	uint32_t num_combos_supported;
+};
+
+struct cam_ope_bus_wr_reg_val {
+	uint32_t hw_version;
+	uint32_t cgc_override;
+	uint32_t irq_mask_0;
+	uint32_t irq_mask_1;
+	uint32_t irq_set_clear;
+	uint32_t comp_rup_done;
+	uint32_t comp_buf_done;
+	uint32_t cons_violation;
+	uint32_t violation;
+	uint32_t img_size_violation;
+	uint32_t frame_header_cfg_0;
+	uint32_t local_frame_header_cfg_0;
+	uint32_t iso_cfg;
+	uint32_t misr_0_en;
+	uint32_t misr_0_en_mask;
+	uint32_t misr_1_en;
+	uint32_t misr_1_en_mask;
+	uint32_t misr_2_en;
+	uint32_t misr_2_en_mask;
+	uint32_t misr_3_en;
+	uint32_t misr_3_en_mask;
+	uint32_t misr_0_samp_mode;
+	uint32_t misr_0_samp_mode_mask;
+	uint32_t misr_1_samp_mode;
+	uint32_t misr_1_samp_mode_mask;
+	uint32_t misr_2_samp_mode;
+	uint32_t misr_2_samp_mode_mask;
+	uint32_t misr_3_samp_mode;
+	uint32_t misr_3_samp_mode_mask;
+	uint32_t misr_0_id;
+	uint32_t misr_0_id_mask;
+	uint32_t misr_1_id;
+	uint32_t misr_1_id_mask;
+	uint32_t misr_2_id;
+	uint32_t misr_2_id_mask;
+	uint32_t misr_3_id;
+	uint32_t misr_3_id_mask;
+	uint32_t misr_rd_misr_sel;
+	uint32_t misr_rd_misr_sel_mask;
+	uint32_t misr_rd_word_sel;
+	uint32_t misr_rd_word_sel_mask;
+	uint32_t misr_reset;
+	uint32_t misr_val;
+
+
+	uint32_t num_clients;
+	struct cam_ope_bus_wr_client_reg_val wr_clients[MAX_WR_CLIENTS];
+};
+
+struct ope_hw {
+	struct cam_ope_top_reg        *top_reg;
+	struct cam_ope_top_reg_val    *top_reg_val;
+
+	struct cam_ope_bus_rd_reg     *bus_rd_reg;
+	struct cam_ope_bus_rd_reg_val *bus_rd_reg_val;
+
+	struct cam_ope_bus_wr_reg     *bus_wr_reg;
+	struct cam_ope_bus_wr_reg_val *bus_wr_reg_val;
+
+	struct cam_ope_qos_reg        *qos_reg;
+	struct cam_ope_qos_reg_val    *qos_reg_val;
+
+	struct cam_ope_common         *common;
+};
+
+struct hw_version_reg {
+	uint32_t hw_ver;
+	uint32_t reserved;
+};
+
+#endif /* CAM_OPE_HW_H */

+ 532 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/ope_hw_100.h

@@ -0,0 +1,532 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_OPE_HW_100_H
+#define CAM_OPE_HW_100_H
+
+#define OPE_BUS_RD_TYPE_BAYER            0x0
+#define OPE_BUS_RD_TYPE_YUV_Y            0x0
+#define OPE_BUS_RD_TYPE_YUC_C            0x1
+
+#define OPE_BUS_WR_TYPE_VID_Y            0x0
+#define OPE_BUS_WR_TYPE_VID_C            0x1
+#define OPE_BUS_WR_TYPE_DISP_Y           0x2
+#define OPE_BUS_WR_TYPE_DISP_C           0x3
+#define OPE_BUS_WR_TYPE_ARGB             0x4
+#define OPE_BUS_WR_TYPE_RS               0x5
+#define OPE_BUS_WR_TYPE_IHIST            0x6
+#define OPE_BUS_WR_TYPE_LTM              0x7
+
+enum cam_ope_bus_rd_unpacker_format {
+	BUS_RD_VER1_PACKER_FMT_PLAIN_128_BYPASS            = 0x0,
+	BUS_RD_VER1_PACKER_FMT_PLAIN_8                     = 0x1,
+	BUS_RD_VER1_PACKER_FMT_PLAIN_16_10BPP              = 0x2,
+	BUS_RD_VER1_PACKER_FMT_PLAIN_16_12BPP              = 0x3,
+	BUS_RD_VER1_PACKER_FMT_PLAIN_16_14BPP              = 0x4,
+	BUS_RD_VER1_PACKER_FMT_PLAIN_32_20BPP              = 0x5,
+	BUS_RD_VER1_PACKER_FMT_ARGB16_10                   = 0x6,
+	BUS_RD_VER1_PACKER_FMT_ARGB16_12                   = 0x7,
+	BUS_RD_VER1_PACKER_FMT_ARGB16_14                   = 0x8,
+	BUS_RD_VER1_PACKER_FMT_PLAIN_32                    = 0x9,
+	BUS_RD_VER1_PACKER_FMT_PLAIN_64                    = 0xA,
+	BUS_RD_VER1_PACKER_FMT_TP_10                       = 0xB,
+	BUS_RD_VER1_PACKER_FMT_MIPI_8                      = 0xC,
+	BUS_RD_VER1_PACKER_FMT_MIPI_10                     = 0xD,
+	BUS_RD_VER1_PACKER_FMT_MIPI_12                     = 0xE,
+	BUS_RD_VER1_PACKER_FMT_MIPI_14                     = 0xF,
+	BUS_RD_VER1_PACKER_FMT_PLAIN_16_16BPP              = 0x10,
+	BUS_RD_VER1_PACKER_FMT_BYPASS_SWAP                 = 0x11,
+	BUS_RD_VER1_PACKER_FMT_PLAIN_8_SWAP                = 0x12,
+	BUS_RD_VER1_PACKER_FMT_MAX                         = 0x13,
+};
+
+static struct cam_ope_top_reg ope_top_reg = {
+	.offset              = 0x400,
+	.hw_version          = 0x0,
+	.reset_cmd           = 0x4,
+	.core_clk_cfg_ctrl_0 = 0x8,
+	.ahb_clk_cgc_ctrl    = 0xC,
+	.core_cfg            = 0x10,
+	.irq_status          = 0x14,
+	.irq_mask            = 0x18,
+	.irq_clear           = 0x1C,
+	.irq_set             = 0x20,
+	.irq_cmd             = 0x24,
+	.violation_status    = 0x28,
+	.throttle_cnt_cfg    = 0x2C,
+};
+
+static struct cam_ope_top_reg_val ope_top_reg_val = {
+	.hw_version    = 0x10000000,
+	.major_mask    = 0xFFFF,
+	.major_shift   = 0x0,
+	.minor_mask    = 0x0FFF0000,
+	.minor_shift   = 0xF,
+	.incr_mask     = 0xF0000000,
+	.incr_shift    = 0x1B,
+	.irq_mask      = 0x0000000F,
+	.sw_reset_cmd     = 0x2,
+	.hw_reset_cmd     = 0x1,
+	.irq_set_clear = 0x1,
+	.rst_done      = 0x1,
+	.we_done       = 0x2,
+	.fe_done       = 0x4,
+	.ope_violation = 0x8,
+	.idle          = 0x10,
+};
+
+
+static struct cam_ope_bus_rd_reg_val ope_bus_rd_reg_val = {
+	.hw_version = 0x00050000,
+	.sw_reset = 0x1,
+	.cgc_override = 0x0,
+	.irq_mask   = 0x30001,
+	.irq_set_clear = 0x1,
+	.rst_done      = 0x1,
+	.rup_done      = 0x2,
+	.rd_buf_done   = 0xC,
+	.violation     = 0x3000,
+	.go_cmd = 0x1,
+	.security_cfg = 0x0,
+	.latency_buf_size = 4096,
+	.num_clients = 0x2,
+	.rd_clients = {
+		{
+			.core_cfg = 0x1,
+			.stripe_location_mask = 0x3,
+			.stripe_location_shift = 0x0,
+			.pix_pattern_mask = 0x3F,
+			.pix_pattern_shift = 0x2,
+			.img_width_mask = 0xFFFF,
+			.img_width_shift = 0x10,
+			.img_height_mask = 0xFFFF,
+			.img_height_shift = 0x0,
+			.mode_mask = 0x1F,
+			.mode_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x5,
+			.latency_buf_allocation = 4096,
+			.input_port_id = OPE_IN_RES_FULL,
+			.rm_port_id = 0,
+			.format_type = BUS_RD_COMBO_BAYER_MASK |
+				BUS_RD_COMBO_YUV_MASK,
+			.num_combos_supported = 2,
+		},
+		{
+			.core_cfg = 0x1,
+			.stripe_location_mask = 0x3,
+			.stripe_location_shift = 0x0,
+			.pix_pattern_mask = 0x3F,
+			.pix_pattern_shift = 0x2,
+			.img_width_mask = 0xFFFF,
+			.img_width_shift = 0x10,
+			.img_height_mask = 0xFFFF,
+			.img_height_shift = 0x0,
+			.mode_mask = 0x1F,
+			.mode_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x5,
+			.latency_buf_allocation = 4096,
+			.input_port_id = OPE_IN_RES_FULL,
+			.rm_port_id = 1,
+			.format_type = BUS_RD_COMBO_YUV_MASK,
+			.num_combos_supported = 1,
+
+		},
+	},
+};
+
+static struct cam_ope_bus_rd_reg ope_bus_rd_reg = {
+	.offset = 0x4C00,
+	.hw_version = 0x0,
+	.sw_reset = 0x4,
+	.cgc_override = 0x8,
+	.irq_mask = 0xC,
+	.irq_clear = 0x10,
+	.irq_cmd = 0x14,
+	.irq_status = 0x18,
+	.input_if_cmd = 0x1C,
+	.irq_set = 0x20,
+	.misr_reset = 0x24,
+	.security_cfg = 0x28,
+	.iso_cfg = 0x2C,
+	.iso_seed = 0x30,
+	.num_clients = 0x2,
+	.rd_clients = {
+		{
+			.core_cfg = 0x50,
+			.ccif_meta_data = 0x54,
+			.img_addr = 0x58,
+			.img_cfg = 0x5C,
+			.stride = 0x60,
+			.unpack_cfg = 0x64,
+			.latency_buf_allocation = 0x78,
+			.misr_cfg_0 =  0x80,
+			.misr_cfg_1 =  0x84,
+			.misr_rd_val = 0x88,
+		},
+		{
+			.core_cfg = 0xF0,
+			.ccif_meta_data = 0xF4,
+			.img_addr = 0xF8,
+			.img_cfg = 0xFC,
+			.stride = 0x100,
+			.unpack_cfg = 0x104,
+			.latency_buf_allocation = 0x118,
+			.misr_cfg_0 =  0x120,
+			.misr_cfg_1 =  0x124,
+			.misr_rd_val = 0x128,
+		},
+	},
+};
+
+static struct cam_ope_bus_wr_reg ope_bus_wr_reg = {
+	.offset = 0x4D90,
+	.hw_version = 0x0,
+	.cgc_override = 0x8,
+	.irq_mask_0 = 0x18,
+	.irq_mask_1 = 0x1C,
+	.irq_clear_0 = 0x20,
+	.irq_clear_1 = 0x24,
+	.irq_status_0 = 0x28,
+	.irq_status_1 = 0x2C,
+	.irq_cmd = 0x30,
+	.frame_header_cfg_0 = 0x34,
+	.local_frame_header_cfg_0 = 0x4C,
+	.irq_set_0 = 0x50,
+	.irq_set_1 = 0x54,
+	.iso_cfg = 0x5C,
+	.violation_status = 0x64,
+	.image_size_violation_status  = 0x70,
+	.misr_cfg_0 = 0xB8,
+	.misr_cfg_1 = 0xBC,
+	.misr_rd_sel = 0xC8,
+	.misr_reset = 0xCC,
+	.misr_val = 0xD0,
+	.num_clients = 0x8,
+	.wr_clients = {
+		{
+			.core_cfg = 0x200,
+			.img_addr = 0x204,
+			.img_cfg = 0x20C,
+			.x_init = 0x210,
+			.stride = 0x214,
+			.pack_cfg = 0x218,
+			.bw_limit = 0x21C,
+			.frame_header_addr = 0x220,
+			.subsample_period = 0x230,
+			.subsample_pattern = 0x234,
+		},
+		{
+			.core_cfg = 0x300,
+			.img_addr = 0x304,
+			.img_cfg = 0x30C,
+			.x_init = 0x310,
+			.stride = 0x314,
+			.pack_cfg = 0x318,
+			.bw_limit = 0x31C,
+			.frame_header_addr = 0x320,
+			.subsample_period = 0x330,
+			.subsample_pattern = 0x334,
+		},
+		{
+			.core_cfg = 0x400,
+			.img_addr = 0x404,
+			.img_cfg = 0x40C,
+			.x_init = 0x410,
+			.stride = 0x414,
+			.pack_cfg = 0x418,
+			.bw_limit = 0x41C,
+			.frame_header_addr = 0x420,
+			.subsample_period = 0x430,
+			.subsample_pattern = 0x434,
+		},
+		{
+			.core_cfg = 0x500,
+			.img_addr = 0x504,
+			.img_cfg = 0x50C,
+			.x_init = 0x510,
+			.stride = 0x514,
+			.pack_cfg = 0x518,
+			.bw_limit = 0x51C,
+			.frame_header_addr = 0x520,
+			.subsample_period = 0x530,
+			.subsample_pattern = 0x534,
+		},
+		{
+			.core_cfg = 0x600,
+			.img_addr = 0x604,
+			.img_cfg = 0x60C,
+			.x_init = 0x610,
+			.stride = 0x614,
+			.pack_cfg = 0x618,
+			.bw_limit = 0x61C,
+			.frame_header_addr = 0x620,
+			.subsample_period = 0x630,
+			.subsample_pattern = 0x634,
+		},
+		{
+			.core_cfg = 0x700,
+			.img_addr = 0x704,
+			.img_cfg = 0x70C,
+			.x_init = 0x710,
+			.stride = 0x714,
+			.pack_cfg = 0x718,
+			.bw_limit = 0x71C,
+			.frame_header_addr = 0x720,
+			.subsample_period = 0x730,
+			.subsample_pattern = 0x734,
+		},
+		{
+			.core_cfg = 0x800,
+			.img_addr = 0x804,
+			.img_cfg = 0x80C,
+			.x_init = 0x810,
+			.stride = 0x814,
+			.pack_cfg = 0x818,
+			.bw_limit = 0x81C,
+			.frame_header_addr = 0x820,
+			.subsample_period = 0x830,
+			.subsample_pattern = 0x834,
+		},
+		{
+			.core_cfg = 0x900,
+			.img_addr = 0x904,
+			.img_cfg = 0x90C,
+			.x_init = 0x910,
+			.stride = 0x914,
+			.pack_cfg = 0x918,
+			.bw_limit = 0x91C,
+			.frame_header_addr = 0x920,
+			.subsample_period = 0x930,
+			.subsample_pattern = 0x934,
+		},
+	},
+};
+
+static struct cam_ope_bus_wr_reg_val ope_bus_wr_reg_val = {
+	.hw_version = 0x20010000,
+	.irq_mask_0 = 0xD0000000,
+	.irq_mask_1 = 0x0,
+	.irq_set_clear = 0x1,
+	.comp_rup_done = 0x1,
+	.comp_buf_done = 0x100,
+	.cons_violation = 0x10000000,
+	.violation = 0x40000000,
+	.img_size_violation = 0x80000000,
+	.num_clients = 0x8,
+	.wr_clients = {
+		{
+			.core_cfg_en = 0x1,
+			.core_cfg_en_mask = 0x1,
+			.core_cfg_en_shift = 0x0,
+			.virtual_frame_en_mask = 0x1,
+			.virtual_frame_en_shift = 0x1,
+			.frame_header_en_mask = 0x1,
+			.frame_header_en_shift = 0x2,
+			.auto_recovery_en_mask = 0x1,
+			.auto_recovery_en_shift = 0x4,
+			.mode_mask = 0x3,
+			.mode_shift = 0x10,
+			.width_mask = 0xFFFF,
+			.width_shift = 0x0,
+			.height_mask = 0xFFFF,
+			.height_shift = 0x10,
+			.format_mask = 0xF,
+			.format_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x4,
+			.output_port_id = OPE_OUT_RES_VIDEO,
+			.wm_port_id = BUS_WR_VIDEO_Y,
+			.format_type = BUS_WR_COMBO_YUV_MASK,
+			.num_combos_supported = 1,
+		},
+		{
+			.core_cfg_en = 0x1,
+			.core_cfg_en_mask = 0x1,
+			.core_cfg_en_shift = 0x0,
+			.virtual_frame_en_mask = 0x1,
+			.virtual_frame_en_shift = 0x1,
+			.frame_header_en_mask = 0x1,
+			.frame_header_en_shift = 0x2,
+			.auto_recovery_en_mask = 0x1,
+			.auto_recovery_en_shift = 0x4,
+			.mode_mask = 0x3,
+			.mode_shift = 0x10,
+			.width_mask = 0xFFFF,
+			.width_shift = 0x0,
+			.height_mask = 0xFFFF,
+			.height_shift = 0x10,
+			.format_mask = 0xF,
+			.format_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x4,
+			.output_port_id = OPE_OUT_RES_VIDEO,
+			.wm_port_id = BUS_WR_VIDEO_C,
+			.format_type = BUS_WR_COMBO_YUV_MASK,
+			.num_combos_supported = 1,
+		},
+		{
+			.core_cfg_en = 0x1,
+			.core_cfg_en_mask = 0x1,
+			.core_cfg_en_shift = 0x0,
+			.virtual_frame_en_mask = 0x1,
+			.virtual_frame_en_shift = 0x1,
+			.frame_header_en_mask = 0x1,
+			.frame_header_en_shift = 0x2,
+			.auto_recovery_en_mask = 0x1,
+			.auto_recovery_en_shift = 0x4,
+			.mode_mask = 0x3,
+			.mode_shift = 0x10,
+			.width_mask = 0xFFFF,
+			.width_shift = 0x0,
+			.height_mask = 0xFFFF,
+			.height_shift = 0x10,
+			.format_mask = 0xF,
+			.format_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x4,
+			.output_port_id = OPE_OUT_RES_DISP,
+			.wm_port_id = BUS_WR_DISP_Y,
+			.format_type = BUS_WR_COMBO_YUV_MASK,
+			.num_combos_supported = 1,
+		},
+		{
+			.core_cfg_en = 0x1,
+			.core_cfg_en_mask = 0x1,
+			.core_cfg_en_shift = 0x0,
+			.virtual_frame_en_mask = 0x1,
+			.virtual_frame_en_shift = 0x1,
+			.frame_header_en_mask = 0x1,
+			.frame_header_en_shift = 0x2,
+			.auto_recovery_en_mask = 0x1,
+			.auto_recovery_en_shift = 0x4,
+			.mode_mask = 0x3,
+			.mode_shift = 0x10,
+			.width_mask = 0xFFFF,
+			.width_shift = 0x0,
+			.height_mask = 0xFFFF,
+			.height_shift = 0x10,
+			.format_mask = 0xF,
+			.format_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x4,
+			.output_port_id = OPE_OUT_RES_DISP,
+			.wm_port_id = BUS_WR_DISP_C,
+			.format_type = BUS_WR_COMBO_YUV_MASK,
+			.num_combos_supported = 1,
+		},
+		{
+			.core_cfg_en = 0x1,
+			.core_cfg_en_mask = 0x1,
+			.core_cfg_en_shift = 0x0,
+			.virtual_frame_en_mask = 0x1,
+			.virtual_frame_en_shift = 0x1,
+			.frame_header_en_mask = 0x1,
+			.frame_header_en_shift = 0x2,
+			.auto_recovery_en_mask = 0x1,
+			.auto_recovery_en_shift = 0x4,
+			.mode_mask = 0x3,
+			.mode_shift = 0x10,
+			.width_mask = 0xFFFF,
+			.width_shift = 0x0,
+			.height_mask = 0xFFFF,
+			.height_shift = 0x10,
+			.format_mask = 0xF,
+			.format_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x4,
+			.output_port_id = OPE_OUT_RES_ARGB,
+			.wm_port_id = BUS_WR_ARGB,
+			.format_type = BUS_WR_COMBO_YUV_MASK,
+			.num_combos_supported = 1,
+		},
+		{
+			.core_cfg_en = 0x1,
+			.core_cfg_en_mask = 0x1,
+			.core_cfg_en_shift = 0x0,
+			.virtual_frame_en_mask = 0x1,
+			.virtual_frame_en_shift = 0x1,
+			.frame_header_en_mask = 0x1,
+			.frame_header_en_shift = 0x2,
+			.auto_recovery_en_mask = 0x1,
+			.auto_recovery_en_shift = 0x4,
+			.mode_mask = 0x3,
+			.mode_shift = 0x10,
+			.width_mask = 0xFFFF,
+			.width_shift = 0x0,
+			.height_mask = 0xFFFF,
+			.height_shift = 0x10,
+			.format_mask = 0xF,
+			.format_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x4,
+			.output_port_id = OPE_OUT_RES_STATS_RS,
+			.wm_port_id = BUS_WR_STATS_RS,
+			.format_type = BUS_WR_COMBO_YUV_MASK,
+			.num_combos_supported = 1,
+		},
+		{
+			.core_cfg_en = 0x1,
+			.core_cfg_en_mask = 0x1,
+			.core_cfg_en_shift = 0x0,
+			.virtual_frame_en_mask = 0x1,
+			.virtual_frame_en_shift = 0x1,
+			.frame_header_en_mask = 0x1,
+			.frame_header_en_shift = 0x2,
+			.auto_recovery_en_mask = 0x1,
+			.auto_recovery_en_shift = 0x4,
+			.mode_mask = 0x3,
+			.mode_shift = 0x10,
+			.width_mask = 0xFFFF,
+			.width_shift = 0x0,
+			.height_mask = 0xFFFF,
+			.height_shift = 0x10,
+			.format_mask = 0xF,
+			.format_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x4,
+			.output_port_id = OPE_OUT_RES_STATS_IHIST,
+			.wm_port_id = BUS_WR_STATS_IHIST,
+			.format_type = BUS_WR_COMBO_YUV_MASK,
+			.num_combos_supported = 1,
+		},
+		{
+			.core_cfg_en = 0x1,
+			.core_cfg_en_mask = 0x1,
+			.core_cfg_en_shift = 0x0,
+			.virtual_frame_en_mask = 0x1,
+			.virtual_frame_en_shift = 0x1,
+			.frame_header_en_mask = 0x1,
+			.frame_header_en_shift = 0x2,
+			.auto_recovery_en_mask = 0x1,
+			.auto_recovery_en_shift = 0x4,
+			.mode_mask = 0x3,
+			.mode_shift = 0x10,
+			.width_mask = 0xFFFF,
+			.width_shift = 0x0,
+			.height_mask = 0xFFFF,
+			.height_shift = 0x10,
+			.format_mask = 0xF,
+			.format_shift = 0x0,
+			.alignment_mask = 0x1,
+			.alignment_shift = 0x4,
+			.output_port_id = OPE_OUT_RES_STATS_LTM,
+			.wm_port_id = BUS_WR_STATS_LTM,
+			.format_type = BUS_WR_COMBO_YUV_MASK,
+			.num_combos_supported = 1,
+		},
+	},
+};
+static struct ope_hw ope_hw_100 = {
+	.top_reg     = &ope_top_reg,
+	.top_reg_val = &ope_top_reg_val,
+	.bus_rd_reg  = &ope_bus_rd_reg,
+	.bus_rd_reg_val  = &ope_bus_rd_reg_val,
+	.bus_wr_reg  = &ope_bus_wr_reg,
+	.bus_wr_reg_val  = &ope_bus_wr_reg_val,
+};
+
+#endif /* CAM_OPE_HW_100_H */

+ 136 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.c

@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "ope_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+
+static int cam_ope_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct platform_device *pdev = NULL;
+	struct device_node *of_node = NULL;
+	struct ope_dev_soc *ope_soc_info;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_OPE, "soc_info is NULL");
+		return -EINVAL;
+	}
+
+	pdev = soc_info->pdev;
+	of_node = pdev->dev.of_node;
+	ope_soc_info = soc_info->soc_private;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0)
+		CAM_ERR(CAM_OPE, "get ope dt prop is failed: %d", rc);
+
+	ope_soc_info->hfi_en = of_property_read_bool(of_node, "hfi_en");
+
+	return rc;
+}
+
+static int cam_ope_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t ope_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, ope_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_ope_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t ope_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_ope_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	rc = cam_ope_request_platform_resource(soc_info, ope_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+int cam_ope_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc) {
+		CAM_ERR(CAM_OPE, "enable platform failed");
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_ope_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+		true);
+	if (rc)
+		CAM_ERR(CAM_OPE, "enable platform failed");
+
+	return rc;
+}
+
+int cam_ope_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate)
+{
+	int32_t src_clk_idx;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_OPE, "Invalid soc info");
+		return -EINVAL;
+	}
+
+	src_clk_idx = soc_info->src_clk_idx;
+
+	CAM_DBG(CAM_OPE, "clk_rate = %u src_clk_index = %d",
+		clk_rate, src_clk_idx);
+	if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+		(soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+		(clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+		CAM_DBG(CAM_OPE, "clk_rate %d greater than max, reset to %d",
+			clk_rate,
+			soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+		clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+	}
+
+	CAM_DBG(CAM_OPE, "clk_rate = %u src_clk_index = %d",
+		clk_rate, src_clk_idx);
+	return cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+}
+
+int cam_ope_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+	int rc = 0;
+
+	if (clk_enable)
+		rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+	else
+		cam_soc_util_clk_disable_default(soc_info);
+
+	return rc;
+}

+ 33 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.h

@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_OPE_SOC_H
+#define CAM_OPE_SOC_H
+
+#include "cam_soc_util.h"
+
+/**
+ * struct ope_dev_soc
+ *
+ * @hfi_en: HFI enable flag
+ */
+struct ope_dev_soc {
+	uint32_t hfi_en;
+};
+
+int cam_ope_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t ope_irq_handler, void *irq_data);
+
+
+int cam_ope_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_ope_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk);
+
+int cam_ope_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate);
+
+int cam_ope_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
+#endif /* CAM_OPE_SOC_H */

+ 246 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.c

@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include <linux/completion.h>
+#include <media/cam_ope.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "ope_core.h"
+#include "ope_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "ope_hw.h"
+#include "ope_dev_intf.h"
+#include "ope_top.h"
+
+static struct ope_top ope_top_info;
+
+static int cam_ope_top_reset(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	struct cam_ope_top_reg *top_reg;
+	struct cam_ope_top_reg_val *top_reg_val;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+
+	top_reg = ope_hw_info->top_reg;
+	top_reg_val = ope_hw_info->top_reg_val;
+
+	init_completion(&ope_top_info.reset_complete);
+
+	/* enable interrupt mask */
+	cam_io_w_mb(top_reg_val->irq_mask,
+		ope_hw_info->top_reg->base + top_reg->irq_mask);
+
+	/* OPE SW RESET */
+	cam_io_w_mb(top_reg_val->sw_reset_cmd,
+		ope_hw_info->top_reg->base + top_reg->reset_cmd);
+
+	rc = wait_for_completion_timeout(
+			&ope_top_info.reset_complete,
+			msecs_to_jiffies(30));
+
+	if (!rc || rc < 0) {
+		CAM_ERR(CAM_OPE, "reset error result = %d", rc);
+		if (!rc)
+			rc = -ETIMEDOUT;
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+static int cam_ope_top_release(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+
+	if (ctx_id < 0) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	ope_top_info.top_ctx[ctx_id].ope_acquire = NULL;
+
+	return rc;
+}
+
+static int cam_ope_top_acquire(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+
+	if (ctx_id < 0 || !data) {
+		CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data);
+		return -EINVAL;
+	}
+
+	ope_top_info.top_ctx[ctx_id].ope_acquire = data;
+
+	return rc;
+}
+
+static int cam_ope_top_init(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	struct cam_ope_top_reg *top_reg;
+	struct cam_ope_top_reg_val *top_reg_val;
+	struct cam_ope_dev_init *dev_init = data;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+
+	top_reg = ope_hw_info->top_reg;
+	top_reg_val = ope_hw_info->top_reg_val;
+
+	top_reg->base = dev_init->core_info->ope_hw_info->ope_top_base;
+
+	/* OPE SW RESET */
+	init_completion(&ope_top_info.reset_complete);
+
+	/* enable interrupt mask */
+	cam_io_w_mb(top_reg_val->irq_mask,
+		ope_hw_info->top_reg->base + top_reg->irq_mask);
+
+	cam_io_w_mb(top_reg_val->sw_reset_cmd,
+		ope_hw_info->top_reg->base + top_reg->reset_cmd);
+
+	rc = wait_for_completion_timeout(
+			&ope_top_info.reset_complete,
+			msecs_to_jiffies(30));
+
+	if (!rc || rc < 0) {
+		CAM_ERR(CAM_OPE, "reset error result = %d", rc);
+		if (!rc)
+			rc = -ETIMEDOUT;
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+static int cam_ope_top_probe(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+
+	ope_top_info.ope_hw_info = ope_hw_info;
+
+	return rc;
+}
+
+static int cam_ope_top_isr(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, void *data)
+{
+	int rc = 0;
+	uint32_t irq_status;
+	uint32_t violation_status;
+	struct cam_ope_top_reg *top_reg;
+	struct cam_ope_top_reg_val *top_reg_val;
+	struct cam_ope_irq_data *irq_data = data;
+
+	if (!ope_hw_info) {
+		CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
+		return -EINVAL;
+	}
+
+	top_reg = ope_hw_info->top_reg;
+	top_reg_val = ope_hw_info->top_reg_val;
+
+	/* Read and Clear Top Interrupt status */
+	irq_status = cam_io_r_mb(top_reg->base + top_reg->irq_status);
+	cam_io_w_mb(irq_status,
+		top_reg->base + top_reg->irq_clear);
+
+	cam_io_w_mb(top_reg_val->irq_set_clear,
+		top_reg->base + top_reg->irq_cmd);
+
+	if (irq_status & top_reg_val->rst_done) {
+		CAM_DBG(CAM_OPE, "ope reset done");
+		complete(&ope_top_info.reset_complete);
+	}
+
+	if (irq_status & top_reg_val->ope_violation) {
+		violation_status = cam_io_r_mb(top_reg->base +
+			top_reg->violation_status);
+		irq_data->error = 1;
+		CAM_ERR(CAM_OPE, "ope violation: %x", violation_status);
+	}
+
+	return rc;
+}
+
+int cam_ope_top_process(struct ope_hw *ope_hw_info,
+	int32_t ctx_id, uint32_t cmd_id, void *data)
+{
+	int rc = 0;
+
+	switch (cmd_id) {
+	case OPE_HW_PROBE:
+		CAM_DBG(CAM_OPE, "OPE_HW_PROBE: E");
+		rc = cam_ope_top_probe(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_PROBE: X");
+		break;
+	case OPE_HW_INIT:
+		CAM_DBG(CAM_OPE, "OPE_HW_INIT: E");
+		rc = cam_ope_top_init(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_INIT: X");
+		break;
+	case OPE_HW_DEINIT:
+		break;
+	case OPE_HW_ACQUIRE:
+		CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: E");
+		rc = cam_ope_top_acquire(ope_hw_info, ctx_id, data);
+		CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: X");
+		break;
+	case OPE_HW_PREPARE:
+		break;
+	case OPE_HW_RELEASE:
+		rc = cam_ope_top_release(ope_hw_info, ctx_id, data);
+		break;
+	case OPE_HW_START:
+		break;
+	case OPE_HW_STOP:
+		break;
+	case OPE_HW_FLUSH:
+		break;
+	case OPE_HW_ISR:
+		rc = cam_ope_top_isr(ope_hw_info, 0, data);
+		break;
+	case OPE_HW_RESET:
+		rc = cam_ope_top_reset(ope_hw_info, 0, 0);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}

+ 41 - 0
drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.h

@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef OPE_TOP_H
+#define OPE_TOP_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_ope.h>
+#include "ope_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_soc_util.h"
+#include "cam_context.h"
+#include "cam_ope_context.h"
+#include "cam_ope_hw_mgr.h"
+
+/**
+ * struct ope_top_ctx
+ *
+ * @ope_acquire: OPE acquire info
+ */
+struct ope_top_ctx {
+	struct ope_acquire_dev_info *ope_acquire;
+};
+
+/**
+ * struct ope_top
+ *
+ * @ope_hw_info:    OPE hardware info
+ * @top_ctx:        OPE top context
+ * @reset_complete: Reset complete flag
+ */
+struct ope_top {
+	struct ope_hw *ope_hw_info;
+	struct ope_top_ctx top_ctx[OPE_CTX_MAX];
+	struct completion reset_complete;
+};
+#endif /* OPE_TOP_H */

+ 6 - 1
drivers/cam_utils/cam_debug_util.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundataion. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundataion. All rights reserved.
  */
 
 #include <linux/io.h>
@@ -93,6 +93,11 @@ const char *cam_get_module_name(unsigned int module_id)
 		break;
 	case CAM_CUSTOM:
 		name = "CAM-CUSTOM";
+	case CAM_OPE:
+		name = "CAM-OPE";
+		break;
+	case CAM_PRESIL:
+		name = "CAM-PRESIL";
 		break;
 	default:
 		name = "CAM";

+ 3 - 1
drivers/cam_utils/cam_debug_util.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_DEBUG_UTIL_H_
@@ -39,6 +39,8 @@
 /* CAM_PERF: Used for performance (clock, BW etc) logs */
 #define CAM_PERF       (1 << 25)
 #define CAM_CUSTOM     (1 << 26)
+#define CAM_OPE        (1 << 28)
+#define CAM_PRESIL     (1 << 27)
 
 #define STR_BUFFER_MAX_LENGTH  1024
 

+ 15 - 1
drivers/camera_main.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  */
 #include <linux/module.h>
 #include <linux/build_bug.h>
@@ -48,6 +48,8 @@
 
 #include "cam_debug_util.h"
 
+#include "ope_dev_intf.h"
+
 struct camera_submodule_component {
 	int (*init)(void);
 	void (*exit)(void);
@@ -101,6 +103,13 @@ static const struct camera_submodule_component camera_icp[] = {
 #endif
 };
 
+static const struct camera_submodule_component camera_ope[] = {
+#ifdef CONFIG_SPECTRA_OPE
+	{&cam_ope_init_module, &cam_ope_exit_module},
+	{&cam_ope_subdev_init_module, &cam_ope_subdev_exit_module},
+#endif
+};
+
 static const struct camera_submodule_component camera_jpeg[] = {
 #ifdef CONFIG_SPECTRA_JPEG
 	{&cam_jpeg_enc_init_module, &cam_jpeg_enc_exit_module},
@@ -152,6 +161,11 @@ static const struct camera_submodule submodule_table[] = {
 		.num_component = ARRAY_SIZE(camera_icp),
 		.component = camera_icp,
 	},
+	{
+		.name = "Camera OPE",
+		.num_component = ARRAY_SIZE(camera_ope),
+		.component = camera_ope,
+	},
 	{
 		.name = "Camera JPEG",
 		.num_component = ARRAY_SIZE(camera_jpeg),