浏览代码

msm: eva: Initial eva driver

For Waipio.

Change-Id: I2fa0eeadcbf9252190a6febbe0f890f1dc7b1524
Signed-off-by: George Shen <[email protected]>
George Shen 4 年之前
父节点
当前提交
387d008122

+ 11 - 0
Makefile

@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# auto-detect subdirs
+ifneq ($(CONFIG_ARCH_QTI_VM), y)
+ifeq ($(CONFIG_ARCH_LAHAINA), y)
+include $(srctree)/techpack/eva/config/waipioeva.conf
+LINUXINCLUDE    += -include $(srctree)/techpack/eva/config/waipioevaconf.h
+endif
+endif
+
+obj-y +=msm/

+ 5 - 0
config/waipioeva.conf

@@ -0,0 +1,5 @@
+ifeq ($(CONFIG_QGKI),y)
+export CONFIG_MSM_EVA=n
+else
+export CONFIG_MSM_EVA=n
+endif

+ 6 - 0
config/waipioevaconf.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define CONFIG_MSM_EVA 0

+ 24 - 0
msm/Makefile

@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -I$(srctree)/techpack/eva/msm/eva/ \
+    -I$(srctree)/drivers/media/platform/msm/synx/
+
+msm-eva-objs := eva/cvp.o \
+                eva/msm_cvp_ioctl.o \
+                eva/msm_cvp_platform.o \
+                eva/msm_cvp_common.o \
+                eva/msm_cvp_core.o \
+                eva/msm_cvp.o \
+                eva/msm_smem.o \
+                eva/msm_cvp_debug.o \
+                eva/msm_cvp_res_parse.o \
+                eva/cvp_hfi.o \
+                eva/hfi_response_handler.o \
+                eva/hfi_packetization.o \
+                eva/cvp_core_hfi.o \
+                eva/msm_cvp_clocks.o\
+                eva/msm_cvp_dsp.o \
+                eva/msm_cvp_buf.o \
+                eva/msm_cvp_synx.o
+
+obj-$(CONFIG_MSM_EVA) := msm-eva.o
+

+ 604 - 0
msm/eva/cvp.c

@@ -0,0 +1,604 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp_common.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_internal.h"
+#include "msm_cvp_res_parse.h"
+#include "msm_cvp_resources.h"
+#include "cvp_hfi_api.h"
+#include "cvp_private.h"
+#include "msm_cvp_clocks.h"
+#include "msm_cvp_dsp.h"
+
+#define CLASS_NAME              "cvp"
+#define DRIVER_NAME             "cvp"
+
+struct msm_cvp_drv *cvp_driver;
+
+static int cvp_open(struct inode *inode, struct file *filp)
+{
+	struct msm_cvp_core *core = container_of(inode->i_cdev,
+		struct msm_cvp_core, cdev);
+	struct msm_cvp_inst *inst;
+
+	dprintk(CVP_SESS, "%s: Enter\n", __func__);
+
+	inst = msm_cvp_open(core->id, MSM_CVP_USER);
+	if (!inst) {
+		dprintk(CVP_ERR, "Failed to create cvp instance\n");
+		return -ENOMEM;
+	}
+	filp->private_data = inst;
+	return 0;
+}
+
+static int cvp_close(struct inode *inode, struct file *filp)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst = filp->private_data;
+
+	rc = msm_cvp_close(inst);
+	filp->private_data = NULL;
+	return rc;
+}
+
+static unsigned int cvp_poll(struct file *filp, struct poll_table_struct *p)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst = filp->private_data;
+	unsigned long flags = 0;
+
+	poll_wait(filp, &inst->event_handler.wq, p);
+
+	spin_lock_irqsave(&inst->event_handler.lock, flags);
+	if (inst->event_handler.event == CVP_SSR_EVENT)
+		rc |= POLLPRI;
+	spin_unlock_irqrestore(&inst->event_handler.lock, flags);
+
+	return rc;
+}
+
+static const struct file_operations cvp_fops = {
+	.owner = THIS_MODULE,
+	.open = cvp_open,
+	.release = cvp_close,
+	.unlocked_ioctl = cvp_unblocked_ioctl,
+	.compat_ioctl = cvp_compat_ioctl,
+	.poll = cvp_poll,
+};
+
+static int read_platform_resources(struct msm_cvp_core *core,
+		struct platform_device *pdev)
+{
+	int rc = 0;
+
+	if (!core || !pdev) {
+		dprintk(CVP_ERR, "%s: Invalid params %pK %pK\n",
+			__func__, core, pdev);
+		return -EINVAL;
+	}
+
+	core->hfi_type = CVP_HFI_IRIS;
+	core->resources.pdev = pdev;
+	if (pdev->dev.of_node) {
+		/* Target supports DT, parse from it */
+		rc = cvp_read_platform_resources_from_drv_data(core);
+		rc = cvp_read_platform_resources_from_dt(&core->resources);
+	} else {
+		dprintk(CVP_ERR, "pdev node is NULL\n");
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int msm_cvp_initialize_core(struct platform_device *pdev,
+				struct msm_cvp_core *core)
+{
+	int i = 0;
+	int rc = 0;
+
+	if (!core)
+		return -EINVAL;
+	rc = read_platform_resources(core, pdev);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to get platform resources\n");
+		return rc;
+	}
+
+	INIT_LIST_HEAD(&core->instances);
+	mutex_init(&core->lock);
+
+	core->state = CVP_CORE_UNINIT;
+	for (i = SYS_MSG_INDEX(SYS_MSG_START);
+		i <= SYS_MSG_INDEX(SYS_MSG_END); i++) {
+		init_completion(&core->completions[i]);
+	}
+
+	INIT_DELAYED_WORK(&core->fw_unload_work, msm_cvp_fw_unload_handler);
+	INIT_WORK(&core->ssr_work, msm_cvp_ssr_handler);
+
+	return rc;
+}
+
+static ssize_t link_name_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct msm_cvp_core *core = dev_get_drvdata(dev);
+
+	if (core)
+		if (dev == core->dev)
+			return snprintf(buf, PAGE_SIZE, "msm_cvp\n");
+		else
+			return 0;
+	else
+		return 0;
+}
+
+static DEVICE_ATTR_RO(link_name);
+
+static ssize_t pwr_collapse_delay_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long val = 0;
+	int rc = 0;
+	struct msm_cvp_core *core = NULL;
+
+	rc = kstrtoul(buf, 0, &val);
+	if (rc)
+		return rc;
+	else if (!val)
+		return -EINVAL;
+
+	core = get_cvp_core(MSM_CORE_CVP);
+	if (!core)
+		return -EINVAL;
+	core->resources.msm_cvp_pwr_collapse_delay = val;
+	return count;
+}
+
+static ssize_t pwr_collapse_delay_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct msm_cvp_core *core = NULL;
+
+	core = get_cvp_core(MSM_CORE_CVP);
+	if (!core)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		core->resources.msm_cvp_pwr_collapse_delay);
+}
+
+static DEVICE_ATTR_RW(pwr_collapse_delay);
+
+static ssize_t thermal_level_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", cvp_driver->thermal_level);
+}
+
+static ssize_t thermal_level_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int rc = 0, val = 0;
+
+	rc = kstrtoint(buf, 0, &val);
+	if (rc || val < 0) {
+		dprintk(CVP_WARN,
+			"Invalid thermal level value: %s\n", buf);
+		return -EINVAL;
+	}
+	dprintk(CVP_PWR, "Thermal level old %d new %d\n",
+			cvp_driver->thermal_level, val);
+
+	if (val == cvp_driver->thermal_level)
+		return count;
+	cvp_driver->thermal_level = val;
+
+	msm_cvp_comm_handle_thermal_event();
+	return count;
+}
+
+static DEVICE_ATTR_RW(thermal_level);
+
+static ssize_t sku_version_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d",
+			cvp_driver->sku_version);
+}
+
+static DEVICE_ATTR_RO(sku_version);
+
+static ssize_t boot_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	int rc = 0, val = 0;
+	static int booted;
+
+	rc = kstrtoint(buf, 0, &val);
+	if (rc || val < 0) {
+		dprintk(CVP_WARN,
+			"Invalid boot value: %s\n", buf);
+		return -EINVAL;
+	}
+
+	if (val > 0 && booted == 0) {
+		struct msm_cvp_inst *inst;
+
+		inst = msm_cvp_open(MSM_CORE_CVP, MSM_CVP_BOOT);
+		if (!inst) {
+			dprintk(CVP_ERR,
+			"Failed to create cvp instance\n");
+			return -ENOMEM;
+		}
+		rc = msm_cvp_close(inst);
+		if (rc) {
+			dprintk(CVP_ERR,
+			"Failed to close cvp instance\n");
+			return rc;
+		}
+	}
+	booted = 1;
+	return count;
+}
+
+static DEVICE_ATTR_WO(boot);
+
+static struct attribute *msm_cvp_core_attrs[] = {
+		&dev_attr_pwr_collapse_delay.attr,
+		&dev_attr_thermal_level.attr,
+		&dev_attr_sku_version.attr,
+		&dev_attr_link_name.attr,
+		&dev_attr_boot.attr,
+		NULL
+};
+
+static struct attribute_group msm_cvp_core_attr_group = {
+		.attrs = msm_cvp_core_attrs,
+};
+
+static const struct of_device_id msm_cvp_plat_match[] = {
+	{.compatible = "qcom,msm-cvp"},
+	{.compatible = "qcom,msm-cvp,context-bank"},
+	{.compatible = "qcom,msm-cvp,bus"},
+	{.compatible = "qcom,msm-cvp,mem-cdsp"},
+	{}
+};
+
+static int msm_probe_cvp_device(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+
+	if (!cvp_driver) {
+		dprintk(CVP_ERR, "Invalid cvp driver\n");
+		return -EINVAL;
+	}
+
+	core = kzalloc(sizeof(*core), GFP_KERNEL);
+	if (!core)
+		return -ENOMEM;
+
+	core->platform_data = cvp_get_drv_data(&pdev->dev);
+	dev_set_drvdata(&pdev->dev, core);
+	rc = msm_cvp_initialize_core(pdev, core);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init core\n");
+		goto err_core_init;
+	}
+
+	core->id = MSM_CORE_CVP;
+
+	rc = alloc_chrdev_region(&core->dev_num, 0, 1, DRIVER_NAME);
+	if (rc < 0) {
+		dprintk(CVP_ERR, "alloc_chrdev_region failed: %d\n",
+				rc);
+		goto err_alloc_chrdev;
+	}
+
+	core->class = class_create(THIS_MODULE, CLASS_NAME);
+	if (IS_ERR(core->class)) {
+		rc = PTR_ERR(core->class);
+		dprintk(CVP_ERR, "class_create failed: %d\n",
+				rc);
+		goto err_class_create;
+	}
+
+	core->dev = device_create(core->class, NULL,
+		core->dev_num, NULL, DRIVER_NAME);
+	if (IS_ERR(core->dev)) {
+		rc = PTR_ERR(core->dev);
+		dprintk(CVP_ERR, "device_create failed: %d\n",
+				rc);
+		goto err_device_create;
+	}
+	dev_set_drvdata(core->dev, core);
+
+	cdev_init(&core->cdev, &cvp_fops);
+	rc = cdev_add(&core->cdev,
+			MKDEV(MAJOR(core->dev_num), 0), 1);
+	if (rc < 0) {
+		dprintk(CVP_ERR, "cdev_add failed: %d\n",
+				rc);
+		goto error_cdev_add;
+	}
+
+	/* finish setting up the 'core' */
+	mutex_lock(&cvp_driver->lock);
+	if (cvp_driver->num_cores + 1 > MSM_CVP_CORES_MAX) {
+		mutex_unlock(&cvp_driver->lock);
+		dprintk(CVP_ERR, "Maximum cores already exist, core_no = %d\n",
+				cvp_driver->num_cores);
+		goto err_cores_exceeded;
+	}
+	cvp_driver->num_cores++;
+	mutex_unlock(&cvp_driver->lock);
+
+	rc = sysfs_create_group(&core->dev->kobj, &msm_cvp_core_attr_group);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"Failed to create attributes\n");
+		goto err_cores_exceeded;
+	}
+
+	core->device = cvp_hfi_initialize(core->hfi_type, core->id,
+				&core->resources, &cvp_handle_cmd_response);
+	if (IS_ERR_OR_NULL(core->device)) {
+		mutex_lock(&cvp_driver->lock);
+		cvp_driver->num_cores--;
+		mutex_unlock(&cvp_driver->lock);
+
+		rc = PTR_ERR(core->device) ?: -EBADHANDLE;
+		if (rc != -EPROBE_DEFER)
+			dprintk(CVP_ERR, "Failed to create HFI device\n");
+		else
+			dprintk(CVP_CORE, "msm_cvp: request probe defer\n");
+		goto err_hfi_initialize;
+	}
+
+	mutex_lock(&cvp_driver->lock);
+	list_add_tail(&core->list, &cvp_driver->cores);
+	mutex_unlock(&cvp_driver->lock);
+
+	core->debugfs_root = msm_cvp_debugfs_init_core(
+		core, cvp_driver->debugfs_root);
+
+	cvp_driver->sku_version = core->resources.sku_version;
+
+	dprintk(CVP_CORE, "populating sub devices\n");
+	/*
+	 * Trigger probe for each sub-device i.e. qcom,msm-cvp,context-bank.
+	 * When msm_cvp_probe is called for each sub-device, parse the
+	 * context-bank details and store it in core->resources.context_banks
+	 * list.
+	 */
+	rc = of_platform_populate(pdev->dev.of_node, msm_cvp_plat_match, NULL,
+			&pdev->dev);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to trigger probe for sub-devices\n");
+		goto err_fail_sub_device_probe;
+	}
+
+	atomic64_set(&core->kernel_trans_id, 0);
+
+	rc = cvp_dsp_device_init();
+	if (rc)
+		dprintk(CVP_WARN, "Failed to initialize DSP driver\n");
+
+	return rc;
+
+err_fail_sub_device_probe:
+	cvp_hfi_deinitialize(core->hfi_type, core->device);
+err_hfi_initialize:
+err_cores_exceeded:
+	cdev_del(&core->cdev);
+error_cdev_add:
+	device_destroy(core->class, core->dev_num);
+err_device_create:
+	class_destroy(core->class);
+err_class_create:
+	unregister_chrdev_region(core->dev_num, 1);
+err_alloc_chrdev:
+	sysfs_remove_group(&pdev->dev.kobj, &msm_cvp_core_attr_group);
+err_core_init:
+	dev_set_drvdata(&pdev->dev, NULL);
+	kfree(core);
+	return rc;
+}
+
+static int msm_cvp_probe_mem_cdsp(struct platform_device *pdev)
+{
+	return cvp_read_mem_cdsp_resources_from_dt(pdev);
+}
+
+static int msm_cvp_probe_context_bank(struct platform_device *pdev)
+{
+	return cvp_read_context_bank_resources_from_dt(pdev);
+}
+
+static int msm_cvp_probe_bus(struct platform_device *pdev)
+{
+	return cvp_read_bus_resources_from_dt(pdev);
+}
+
+static int msm_cvp_probe(struct platform_device *pdev)
+{
+	/*
+	 * Sub devices probe will be triggered by of_platform_populate() towards
+	 * the end of the probe function after msm-cvp device probe is
+	 * completed. Return immediately after completing sub-device probe.
+	 */
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cvp")) {
+		return msm_probe_cvp_device(pdev);
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+		"qcom,msm-cvp,bus")) {
+		return msm_cvp_probe_bus(pdev);
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+		"qcom,msm-cvp,context-bank")) {
+		return msm_cvp_probe_context_bank(pdev);
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+		"qcom,msm-cvp,mem-cdsp")) {
+		return msm_cvp_probe_mem_cdsp(pdev);
+	}
+
+	/* How did we end up here? */
+	MSM_CVP_ERROR(1);
+	return -EINVAL;
+}
+
+static int msm_cvp_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+
+	if (!pdev) {
+		dprintk(CVP_ERR, "%s invalid input %pK", __func__, pdev);
+		return -EINVAL;
+	}
+
+	core = dev_get_drvdata(&pdev->dev);
+	if (!core) {
+		dprintk(CVP_ERR, "%s invalid core", __func__);
+		return -EINVAL;
+	}
+
+	cvp_hfi_deinitialize(core->hfi_type, core->device);
+	msm_cvp_free_platform_resources(&core->resources);
+	sysfs_remove_group(&pdev->dev.kobj, &msm_cvp_core_attr_group);
+	dev_set_drvdata(&pdev->dev, NULL);
+	mutex_destroy(&core->lock);
+	kfree(core);
+	return rc;
+}
+
+static int msm_cvp_pm_suspend(struct device *dev)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+
+	/*
+	 * Bail out if
+	 * - driver possibly not probed yet
+	 * - not the main device. We don't support power management on
+	 *   subdevices (e.g. context banks)
+	 */
+	if (!dev || !dev->driver ||
+		!of_device_is_compatible(dev->of_node, "qcom,msm-cvp"))
+		return 0;
+
+	core = dev_get_drvdata(dev);
+	if (!core) {
+		dprintk(CVP_ERR, "%s invalid core\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_cvp_suspend(core->id);
+	if (rc == -ENOTSUPP)
+		rc = 0;
+	else if (rc)
+		dprintk(CVP_WARN, "Failed to suspend: %d\n", rc);
+
+
+	return rc;
+}
+
+static int msm_cvp_pm_resume(struct device *dev)
+{
+	dprintk(CVP_INFO, "%s\n", __func__);
+	return 0;
+}
+
+static const struct dev_pm_ops msm_cvp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(msm_cvp_pm_suspend, msm_cvp_pm_resume)
+};
+
+MODULE_DEVICE_TABLE(of, msm_cvp_plat_match);
+
+static struct platform_driver msm_cvp_driver = {
+	.probe = msm_cvp_probe,
+	.remove = msm_cvp_remove,
+	.driver = {
+		.name = "msm_cvp",
+		.of_match_table = msm_cvp_plat_match,
+		.pm = &msm_cvp_pm_ops,
+	},
+};
+
+static int __init msm_cvp_init(void)
+{
+	int rc = 0;
+
+	cvp_driver = kzalloc(sizeof(*cvp_driver), GFP_KERNEL);
+	if (!cvp_driver) {
+		dprintk(CVP_ERR,
+			"Failed to allocate memroy for msm_cvp_drv\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&cvp_driver->cores);
+	mutex_init(&cvp_driver->lock);
+	cvp_driver->debugfs_root = msm_cvp_debugfs_init_drv();
+	if (!cvp_driver->debugfs_root)
+		dprintk(CVP_ERR,
+			"Failed to create debugfs for msm_cvp\n");
+
+	rc = platform_driver_register(&msm_cvp_driver);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to register platform driver\n");
+		debugfs_remove_recursive(cvp_driver->debugfs_root);
+		kfree(cvp_driver);
+		cvp_driver = NULL;
+		return rc;
+	}
+
+	cvp_driver->msg_cache = KMEM_CACHE(cvp_session_msg, 0);
+	cvp_driver->frame_cache = KMEM_CACHE(msm_cvp_frame, 0);
+	cvp_driver->buf_cache = KMEM_CACHE(cvp_internal_buf, 0);
+	cvp_driver->smem_cache = KMEM_CACHE(msm_cvp_smem, 0);
+
+	return rc;
+}
+
+static void __exit msm_cvp_exit(void)
+{
+	cvp_dsp_device_exit();
+	kmem_cache_destroy(cvp_driver->msg_cache);
+	kmem_cache_destroy(cvp_driver->frame_cache);
+	kmem_cache_destroy(cvp_driver->buf_cache);
+	kmem_cache_destroy(cvp_driver->smem_cache);
+
+	platform_driver_unregister(&msm_cvp_driver);
+	debugfs_remove_recursive(cvp_driver->debugfs_root);
+	mutex_destroy(&cvp_driver->lock);
+	kfree(cvp_driver);
+	cvp_driver = NULL;
+}
+
+module_init(msm_cvp_init);
+module_exit(msm_cvp_exit);
+
+MODULE_LICENSE("GPL v2");

+ 24 - 0
msm/eva/cvp_comm_def.h

@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_COMM_DEF_H_
+#define _MSM_COMM_DEF_H_
+
+#include <linux/types.h>
+
+enum op_mode {
+	OP_NORMAL,
+	OP_DRAINING,
+	OP_FLUSH,
+	OP_INVALID,
+};
+
+enum queue_state {
+	QUEUE_INIT,
+	QUEUE_ACTIVE = 1,
+	QUEUE_STOP = 2,
+	QUEUE_INVALID,
+};
+#endif

+ 52 - 0
msm/eva/cvp_core_hfi.c

@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "msm_cvp_debug.h"
+#include "cvp_hfi_api.h"
+#include "cvp_core_hfi.h"
+
+struct cvp_hfi_device *cvp_hfi_initialize(enum msm_cvp_hfi_type hfi_type,
+		u32 device_id, struct msm_cvp_platform_resources *res,
+		hfi_cmd_response_callback callback)
+{
+	struct cvp_hfi_device *hdev = NULL;
+	int rc = 0;
+
+	hdev = kzalloc(sizeof(struct cvp_hfi_device), GFP_KERNEL);
+	if (!hdev) {
+		dprintk(CVP_ERR, "%s: failed to allocate hdev\n", __func__);
+		return NULL;
+	}
+
+	rc = cvp_iris_hfi_initialize(hdev, device_id, res, callback);
+
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			dprintk(CVP_ERR, "%s device init failed rc = %d",
+				__func__, rc);
+		goto err_hfi_init;
+	}
+
+	return hdev;
+
+err_hfi_init:
+	kfree(hdev);
+	return ERR_PTR(rc);
+}
+
+void cvp_hfi_deinitialize(enum msm_cvp_hfi_type hfi_type,
+			struct cvp_hfi_device *hdev)
+{
+	if (!hdev) {
+		dprintk(CVP_ERR, "%s invalid device %pK", __func__, hdev);
+		return;
+	}
+
+	cvp_iris_hfi_delete_device(hdev->hfi_device_data);
+
+	kfree(hdev);
+}
+

+ 271 - 0
msm/eva/cvp_core_hfi.h

@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __H_CVP_CORE_HFI_H__
+#define __H_CVP_CORE_HFI_H__
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include "cvp_hfi_api.h"
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_api.h"
+#include "cvp_hfi.h"
+#include "msm_cvp_resources.h"
+#include "hfi_packetization.h"
+
+#define HFI_MASK_QHDR_TX_TYPE			0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE			0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE			0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE			0x000000FF
+#define HFI_Q_ID_HOST_TO_CTRL_CMD_Q		0x00
+#define HFI_Q_ID_CTRL_TO_HOST_MSG_Q		0x01
+#define HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q	0x02
+#define HFI_MASK_QHDR_STATUS			0x000000FF
+
+#define CVP_IFACEQ_NUMQ					3
+#define CVP_IFACEQ_CMDQ_IDX				0
+#define CVP_IFACEQ_MSGQ_IDX				1
+#define CVP_IFACEQ_DBGQ_IDX				2
+#define CVP_IFACEQ_MAX_BUF_COUNT			50
+#define CVP_IFACE_MAX_PARALLEL_CLNTS		16
+#define CVP_IFACEQ_DFLT_QHDR				0x01010000
+
+#define CVP_MAX_NAME_LENGTH 64
+#define CVP_MAX_PC_SKIP_COUNT 10
+#define CVP_MAX_SUBCACHES 4
+#define CVP_MAX_SUBCACHE_SIZE 52
+
+struct cvp_hfi_queue_table_header {
+	u32 qtbl_version;
+	u32 qtbl_size;
+	u32 qtbl_qhdr0_offset;
+	u32 qtbl_qhdr_size;
+	u32 qtbl_num_q;
+	u32 qtbl_num_active_q;
+	void *device_addr;
+	char name[256];
+};
+
+struct cvp_hfi_queue_header {
+	u32 qhdr_status;
+	u32 qhdr_start_addr;
+	u32 qhdr_type;
+	u32 qhdr_q_size;
+	u32 qhdr_pkt_size;
+	u32 qhdr_pkt_drop_cnt;
+	u32 qhdr_rx_wm;
+	u32 qhdr_tx_wm;
+	u32 qhdr_rx_req;
+	u32 qhdr_tx_req;
+	u32 qhdr_rx_irq_status;
+	u32 qhdr_tx_irq_status;
+	u32 qhdr_read_idx;
+	u32 qhdr_write_idx;
+};
+
+struct cvp_hfi_mem_map_table {
+	u32 mem_map_num_entries;
+	u32 mem_map_table_base_addr;
+};
+
+struct cvp_hfi_mem_map {
+	u32 virtual_addr;
+	u32 physical_addr;
+	u32 size;
+	u32 attr;
+};
+
+#define CVP_IFACEQ_TABLE_SIZE (sizeof(struct cvp_hfi_queue_table_header) \
+	+ sizeof(struct cvp_hfi_queue_header) * CVP_IFACEQ_NUMQ)
+
+#define CVP_IFACEQ_QUEUE_SIZE	(CVP_IFACEQ_MAX_PKT_SIZE *  \
+	CVP_IFACEQ_MAX_BUF_COUNT * CVP_IFACE_MAX_PARALLEL_CLNTS)
+
+#define CVP_IFACEQ_GET_QHDR_START_ADDR(ptr, i)     \
+	(void *)((ptr + sizeof(struct cvp_hfi_queue_table_header)) + \
+		(i * sizeof(struct cvp_hfi_queue_header)))
+
+#define QDSS_SIZE 4096
+#define SFR_SIZE 4096
+
+#define QUEUE_SIZE (CVP_IFACEQ_TABLE_SIZE + \
+	(CVP_IFACEQ_QUEUE_SIZE * CVP_IFACEQ_NUMQ))
+
+#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
+#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
+#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
+#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
+			ALIGNED_QDSS_SIZE, SZ_1M)
+
+struct cvp_mem_addr {
+	u32 align_device_addr;
+	u8 *align_virtual_addr;
+	u32 mem_size;
+	struct msm_cvp_smem mem_data;
+};
+
+struct cvp_iface_q_info {
+	spinlock_t hfi_lock;
+	void *q_hdr;
+	struct cvp_mem_addr q_array;
+};
+
+/*
+ * These are helper macros to iterate over various lists within
+ * iris_hfi_device->res.  The intention is to cut down on a lot of boiler-plate
+ * code
+ */
+
+/* Read as "for each 'thing' in a set of 'thingies'" */
+#define iris_hfi_for_each_thing(__device, __thing, __thingy) \
+	iris_hfi_for_each_thing_continue(__device, __thing, __thingy, 0)
+
+#define iris_hfi_for_each_thing_reverse(__device, __thing, __thingy) \
+	iris_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+			(__device)->res->__thingy##_set.count - 1)
+
+/* TODO: the __from parameter technically not required since we can figure it
+ * out with some pointer magic (i.e. __thing - __thing##_tbl[0]).  If this macro
+ * sees extensive use, probably worth cleaning it up but for now omitting it
+ * since it introduces unnecessary complexity.
+ */
+#define iris_hfi_for_each_thing_continue(__device, __thing, __thingy, __from) \
+	for (__thing = &(__device)->res->\
+			__thingy##_set.__thingy##_tbl[__from]; \
+		__thing < &(__device)->res->__thingy##_set.__thingy##_tbl[0] + \
+			((__device)->res->__thingy##_set.count - __from); \
+		++__thing)
+
+#define iris_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+		__from) \
+	for (__thing = &(__device)->res->\
+			__thingy##_set.__thingy##_tbl[__from]; \
+		__thing >= &(__device)->res->__thingy##_set.__thingy##_tbl[0]; \
+		--__thing)
+
+/* Regular set helpers */
+#define iris_hfi_for_each_regulator(__device, __rinfo) \
+	iris_hfi_for_each_thing(__device, __rinfo, regulator)
+
+#define iris_hfi_for_each_regulator_reverse(__device, __rinfo) \
+	iris_hfi_for_each_thing_reverse(__device, __rinfo, regulator)
+
+#define iris_hfi_for_each_regulator_reverse_continue(__device, __rinfo, \
+		__from) \
+	iris_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
+			regulator, __from)
+
+/* Clock set helpers */
+#define iris_hfi_for_each_clock(__device, __cinfo) \
+	iris_hfi_for_each_thing(__device, __cinfo, clock)
+
+#define iris_hfi_for_each_clock_reverse(__device, __cinfo) \
+	iris_hfi_for_each_thing_reverse(__device, __cinfo, clock)
+
+#define iris_hfi_for_each_clock_reverse_continue(__device, __rinfo, \
+		__from) \
+	iris_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
+			clock, __from)
+
+/* Bus set helpers */
+#define iris_hfi_for_each_bus(__device, __binfo) \
+	iris_hfi_for_each_thing(__device, __binfo, bus)
+#define iris_hfi_for_each_bus_reverse(__device, __binfo) \
+	iris_hfi_for_each_thing_reverse(__device, __binfo, bus)
+
+/* Subcache set helpers */
+#define iris_hfi_for_each_subcache(__device, __sinfo) \
+	iris_hfi_for_each_thing(__device, __sinfo, subcache)
+#define iris_hfi_for_each_subcache_reverse(__device, __sinfo) \
+	iris_hfi_for_each_thing_reverse(__device, __sinfo, subcache)
+
+#define call_iris_op(d, op, args...)			\
+	(((d) && (d)->vpu_ops && (d)->vpu_ops->op) ? \
+	((d)->vpu_ops->op(args)):0)
+
+struct cvp_hal_data {
+	u32 irq;
+	phys_addr_t firmware_base;
+	u8 __iomem *register_base;
+	u8 __iomem *gcc_reg_base;
+	u32 register_size;
+	u32 gcc_reg_size;
+};
+
+struct iris_resources {
+	struct msm_cvp_fw fw;
+};
+
+enum iris_hfi_state {
+	IRIS_STATE_DEINIT = 1,
+	IRIS_STATE_INIT,
+};
+
+enum reset_state {
+	INIT = 1,
+	ASSERT,
+	DEASSERT,
+};
+
+struct iris_hfi_device;
+
+struct iris_hfi_vpu_ops {
+	void (*interrupt_init)(struct iris_hfi_device *ptr);
+	void (*setup_dsp_uc_memmap)(struct iris_hfi_device *device);
+	void (*clock_config_on_enable)(struct iris_hfi_device *device);
+	int (*reset_ahb2axi_bridge)(struct iris_hfi_device *device);
+	void (*power_off)(struct iris_hfi_device *device);
+	void (*noc_error_info)(struct iris_hfi_device *device);
+};
+
+struct iris_hfi_device {
+	struct list_head sess_head;
+	u32 version;
+	u32 intr_status;
+	u32 device_id;
+	u32 clk_freq;
+	u32 last_packet_type;
+	unsigned long clk_bitrate;
+	unsigned long scaled_rate;
+	struct msm_cvp_gov_data bus_vote;
+	bool power_enabled;
+	bool reg_dumped;
+	struct mutex lock;
+	msm_cvp_callback callback;
+	struct cvp_mem_addr iface_q_table;
+	struct cvp_mem_addr dsp_iface_q_table;
+	struct cvp_mem_addr qdss;
+	struct cvp_mem_addr sfr;
+	struct cvp_mem_addr mem_addr;
+	struct cvp_iface_q_info iface_queues[CVP_IFACEQ_NUMQ];
+	struct cvp_iface_q_info dsp_iface_queues[CVP_IFACEQ_NUMQ];
+	struct cvp_hal_data *cvp_hal_data;
+	struct workqueue_struct *cvp_workq;
+	struct workqueue_struct *iris_pm_workq;
+	int spur_count;
+	int reg_count;
+	struct iris_resources resources;
+	struct msm_cvp_platform_resources *res;
+	enum iris_hfi_state state;
+	struct cvp_hfi_packetization_ops *pkt_ops;
+	enum hfi_packetization_type packetization_type;
+	struct msm_cvp_cb_info *response_pkt;
+	u8 *raw_packet;
+	struct pm_qos_request qos;
+	unsigned int skip_pc_count;
+	struct msm_cvp_capability *sys_init_capabilities;
+	struct iris_hfi_vpu_ops *vpu_ops;
+};
+
+void cvp_iris_hfi_delete_device(void *device);
+
+int cvp_iris_hfi_initialize(struct cvp_hfi_device *hdev, u32 device_id,
+		struct msm_cvp_platform_resources *res,
+		hfi_cmd_response_callback callback);
+
+#endif

+ 4680 - 0
msm/eva/cvp_hfi.c

@@ -0,0 +1,4680 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <asm/memory.h>
+#include <linux/coresight-stm.h>
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/hash.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/pm_qos.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/dma-mapping.h>
+#include <linux/reset.h>
+#include "hfi_packetization.h"
+#include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_io.h"
+#include "msm_cvp_dsp.h"
+
+#define FIRMWARE_SIZE			0X00A00000
+#define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
+#define QDSS_IOVA_START 0x80001000
+#define MIN_PAYLOAD_SIZE 3
+
+const struct msm_cvp_hfi_defs cvp_hfi_defs[] = {
+	{
+		.size = HFI_DFS_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DFS_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DFS_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DFS_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DFS_FRAME,
+		.buf_offset = HFI_DFS_FRAME_BUFFERS_OFFSET,
+		.buf_num = HFI_DFS_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_DME_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DME_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DME_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DME_BASIC_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DME_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DME_FRAME,
+		.buf_offset = HFI_DME_FRAME_BUFFERS_OFFSET,
+		.buf_num = HFI_DME_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_PERSIST_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS,
+		.buf_offset = HFI_PERSIST_BUFFERS_OFFSET,
+		.buf_num = HFI_PERSIST_BUF_NUM,
+		.resp = HAL_SESSION_PERSIST_SET_DONE,
+	},
+	{
+		.size = 0xffffffff,
+		.type = HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_PERSIST_REL_DONE,
+	},
+	{
+		.size = HFI_DS_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DS,
+		.buf_offset = HFI_DS_BUFFERS_OFFSET,
+		.buf_num = HFI_DS_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_OF_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_TME_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_TME_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_OF_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_TME_FRAME,
+		.buf_offset = HFI_OF_BUFFERS_OFFSET,
+		.buf_num = HFI_OF_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_ODT_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_ODT_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_ODT_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_ODT_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_ODT_FRAME,
+		.buf_offset = HFI_ODT_BUFFERS_OFFSET,
+		.buf_num = HFI_ODT_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_OD_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_OD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_OD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_OD_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_OD_FRAME,
+		.buf_offset = HFI_OD_BUFFERS_OFFSET,
+		.buf_num = HFI_OD_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_NCC_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_NCC_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_NCC_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_NCC_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_NCC_FRAME,
+		.buf_offset = HFI_NCC_BUFFERS_OFFSET,
+		.buf_num = HFI_NCC_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_ICA_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_ICA_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_ICA_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_ICA_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_ICA_FRAME,
+		.buf_offset = HFI_ICA_BUFFERS_OFFSET,
+		.buf_num = HFI_ICA_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_HCD_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_HCD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_HCD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_HCD_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_HCD_FRAME,
+		.buf_offset = HFI_HCD_BUFFERS_OFFSET,
+		.buf_num = HFI_HCD_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_DCM_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DC_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DC_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DCM_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DC_FRAME,
+		.buf_offset = HFI_DCM_BUFFERS_OFFSET,
+		.buf_num = HFI_DCM_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_DCM_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DCM_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DCM_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DCM_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DCM_FRAME,
+		.buf_offset = HFI_DCM_BUFFERS_OFFSET,
+		.buf_num = HFI_DCM_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_PYS_HCD_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_PYS_HCD_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_PYS_HCD_FRAME,
+		.buf_offset = HFI_PYS_HCD_BUFFERS_OFFSET,
+		.buf_num = HFI_PYS_HCD_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = 0xFFFFFFFF,
+		.type = HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_MODEL_BUF_CMD_DONE,
+	},
+	{
+		.size = 0xFFFFFFFF,
+		.type = HFI_CMD_SESSION_CVP_FD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_FD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = 0xFFFFFFFF,
+		.type = HFI_CMD_SESSION_CVP_FD_FRAME,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_NO_RESP,
+	},
+
+};
+
+struct cvp_tzbsp_memprot {
+	u32 cp_start;
+	u32 cp_size;
+	u32 cp_nonpixel_start;
+	u32 cp_nonpixel_size;
+};
+
+#define TZBSP_PIL_SET_STATE 0xA
+#define TZBSP_CVP_PAS_ID    26
+
+/* Poll interval in uS */
+#define POLL_INTERVAL_US 50
+
+enum tzbsp_subsys_state {
+	TZ_SUBSYS_STATE_SUSPEND = 0,
+	TZ_SUBSYS_STATE_RESUME = 1,
+	TZ_SUBSYS_STATE_RESTORE_THRESHOLD = 2,
+};
+
+const struct msm_cvp_gov_data CVP_DEFAULT_BUS_VOTE = {
+	.data = NULL,
+	.data_count = 0,
+};
+
+const int cvp_max_packets = 32;
+
+static void iris_hfi_pm_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(iris_hfi_pm_work, iris_hfi_pm_handler);
+static inline int __resume(struct iris_hfi_device *device);
+static inline int __suspend(struct iris_hfi_device *device);
+static int __disable_regulators(struct iris_hfi_device *device);
+static int __enable_regulators(struct iris_hfi_device *device);
+static inline int __prepare_enable_clks(struct iris_hfi_device *device);
+static inline void __disable_unprepare_clks(struct iris_hfi_device *device);
+static void __flush_debug_queue(struct iris_hfi_device *device, u8 *packet);
+static int __initialize_packetization(struct iris_hfi_device *device);
+static struct cvp_hal_session *__get_session(struct iris_hfi_device *device,
+		u32 session_id);
+static bool __is_session_valid(struct iris_hfi_device *device,
+		struct cvp_hal_session *session, const char *func);
+static int __set_clocks(struct iris_hfi_device *device, u32 freq);
+static int __iface_cmdq_write(struct iris_hfi_device *device,
+					void *pkt);
+static int __load_fw(struct iris_hfi_device *device);
+static void __unload_fw(struct iris_hfi_device *device);
+static int __tzbsp_set_cvp_state(enum tzbsp_subsys_state state);
+static int __enable_subcaches(struct iris_hfi_device *device);
+static int __set_subcaches(struct iris_hfi_device *device);
+static int __release_subcaches(struct iris_hfi_device *device);
+static int __disable_subcaches(struct iris_hfi_device *device);
+static int __power_collapse(struct iris_hfi_device *device, bool force);
+static int iris_hfi_noc_error_info(void *dev);
+
+static void interrupt_init_iris2(struct iris_hfi_device *device);
+static void setup_dsp_uc_memmap_vpu5(struct iris_hfi_device *device);
+static void clock_config_on_enable_vpu5(struct iris_hfi_device *device);
+static int reset_ahb2axi_bridge(struct iris_hfi_device *device);
+static void power_off_iris2(struct iris_hfi_device *device);
+
+static int __set_ubwc_config(struct iris_hfi_device *device);
+static void __noc_error_info_iris2(struct iris_hfi_device *device);
+
+static struct iris_hfi_vpu_ops iris2_ops = {
+	.interrupt_init = interrupt_init_iris2,
+	.setup_dsp_uc_memmap = setup_dsp_uc_memmap_vpu5,
+	.clock_config_on_enable = clock_config_on_enable_vpu5,
+	.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
+	.power_off = power_off_iris2,
+	.noc_error_info = __noc_error_info_iris2,
+};
+
+/**
+ * Utility function to enforce some of our assumptions.  Spam calls to this
+ * in hotspots in code to double check some of the assumptions that we hold.
+ */
+static inline void __strict_check(struct iris_hfi_device *device)
+{
+	msm_cvp_res_handle_fatal_hw_error(device->res,
+		!mutex_is_locked(&device->lock));
+}
+
+static inline void __set_state(struct iris_hfi_device *device,
+		enum iris_hfi_state state)
+{
+	device->state = state;
+}
+
+static inline bool __core_in_valid_state(struct iris_hfi_device *device)
+{
+	return device->state != IRIS_STATE_DEINIT;
+}
+
+static inline bool is_sys_cache_present(struct iris_hfi_device *device)
+{
+	return device->res->sys_cache_present;
+}
+
+#define ROW_SIZE 32
+
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr)
+{
+	int i, pkt_num = ARRAY_SIZE(cvp_hfi_defs);
+
+	for (i = 0; i < pkt_num; i++)
+		if (cvp_hfi_defs[i].type == hdr->packet_type)
+			return i;
+
+	return -EINVAL;
+}
+
+int get_hfi_version(void)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hfi;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	hfi = (struct iris_hfi_device *)core->device->hfi_device_data;
+
+	return hfi->version;
+}
+
+unsigned int get_msg_size(void)
+{
+	return sizeof(struct cvp_hfi_msg_session_hdr);
+}
+
+unsigned int get_msg_session_id(void *msg)
+{
+	struct cvp_hfi_msg_session_hdr *hdr =
+		(struct cvp_hfi_msg_session_hdr *)msg;
+
+	return hdr->session_id;
+}
+
+unsigned int get_msg_errorcode(void *msg)
+{
+	struct cvp_hfi_msg_session_hdr *hdr =
+		(struct cvp_hfi_msg_session_hdr *)msg;
+
+	return hdr->error_type;
+}
+
+int get_msg_opconfigs(void *msg, unsigned int *session_id,
+		unsigned int *error_type, unsigned int *config_id)
+{
+	struct cvp_hfi_msg_session_op_cfg_packet *cfg =
+		(struct cvp_hfi_msg_session_op_cfg_packet *)msg;
+
+	*session_id = cfg->session_id;
+	*error_type = cfg->error_type;
+	*config_id = cfg->op_conf_id;
+	return 0;
+}
+
+int get_signal_from_pkt_type(unsigned int type)
+{
+	int i, pkt_num = ARRAY_SIZE(cvp_hfi_defs);
+
+	for (i = 0; i < pkt_num; i++)
+		if (cvp_hfi_defs[i].type == type)
+			return cvp_hfi_defs[i].resp;
+
+	return -EINVAL;
+}
+
+static void __dump_packet(u8 *packet, enum cvp_msg_prio log_level)
+{
+	u32 c = 0, packet_size = *(u32 *)packet;
+	/*
+	 * row must contain enough for 0xdeadbaad * 8 to be converted into
+	 * "de ad ba ab " * 8 + '\0'
+	 */
+	char row[3 * ROW_SIZE];
+
+	for (c = 0; c * ROW_SIZE < packet_size; ++c) {
+		int bytes_to_read = ((c + 1) * ROW_SIZE > packet_size) ?
+			packet_size % ROW_SIZE : ROW_SIZE;
+		hex_dump_to_buffer(packet + c * ROW_SIZE, bytes_to_read,
+				ROW_SIZE, 4, row, sizeof(row), false);
+		dprintk(log_level, "%s\n", row);
+	}
+}
+
+static int __dsp_suspend(struct iris_hfi_device *device, bool force, u32 flags)
+{
+	int rc;
+	struct cvp_hal_session *temp;
+
+	if (msm_cvp_dsp_disable)
+		return 0;
+
+	list_for_each_entry(temp, &device->sess_head, list) {
+		/* if forceful suspend, don't check session pause info */
+		if (force)
+			continue;
+
+		/* don't suspend if cvp session is not paused */
+		if (!(temp->flags & SESSION_PAUSE)) {
+			dprintk(CVP_DSP,
+				"%s: cvp session %x not paused\n",
+				__func__, hash32_ptr(temp));
+			return -EBUSY;
+		}
+	}
+
+	dprintk(CVP_DSP, "%s: suspend dsp\n", __func__);
+	rc = cvp_dsp_suspend(flags);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: dsp suspend failed with error %d\n",
+			__func__, rc);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_DSP, "%s: dsp suspended\n", __func__);
+	return 0;
+}
+
+static int __dsp_resume(struct iris_hfi_device *device, u32 flags)
+{
+	int rc;
+
+	if (msm_cvp_dsp_disable)
+		return 0;
+
+	dprintk(CVP_DSP, "%s: resume dsp\n", __func__);
+	rc = cvp_dsp_resume(flags);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: dsp resume failed with error %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	dprintk(CVP_DSP, "%s: dsp resumed\n", __func__);
+	return rc;
+}
+
+static int __dsp_shutdown(struct iris_hfi_device *device, u32 flags)
+{
+	int rc;
+
+	if (msm_cvp_dsp_disable)
+		return 0;
+
+	dprintk(CVP_DSP, "%s: shutdown dsp\n", __func__);
+	rc = cvp_dsp_shutdown(flags);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: dsp shutdown failed with error %d\n",
+			__func__, rc);
+		WARN_ON(1);
+	}
+
+	dprintk(CVP_DSP, "%s: dsp shutdown successful\n", __func__);
+	return rc;
+}
+
+static int __acquire_regulator(struct regulator_info *rinfo,
+				struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (rinfo->has_hw_power_collapse) {
+		rc = regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_NORMAL);
+		if (rc) {
+			/*
+			 * This is somewhat fatal, but nothing we can do
+			 * about it. We can't disable the regulator w/o
+			 * getting it back under s/w control
+			 */
+			dprintk(CVP_WARN,
+				"Failed to acquire regulator control: %s\n",
+					rinfo->name);
+		} else {
+
+			dprintk(CVP_PWR,
+					"Acquire regulator control from HW: %s\n",
+					rinfo->name);
+
+		}
+	}
+
+	if (!regulator_is_enabled(rinfo->regulator)) {
+		dprintk(CVP_WARN, "Regulator is not enabled %s\n",
+			rinfo->name);
+		msm_cvp_res_handle_fatal_hw_error(device->res, true);
+	}
+
+	return rc;
+}
+
+static int __hand_off_regulator(struct regulator_info *rinfo)
+{
+	int rc = 0;
+
+	if (rinfo->has_hw_power_collapse) {
+		rc = regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_FAST);
+		if (rc) {
+			dprintk(CVP_WARN,
+				"Failed to hand off regulator control: %s\n",
+					rinfo->name);
+		} else {
+			dprintk(CVP_PWR,
+					"Hand off regulator control to HW: %s\n",
+					rinfo->name);
+		}
+	}
+
+	return rc;
+}
+
+static int __hand_off_regulators(struct iris_hfi_device *device)
+{
+	struct regulator_info *rinfo;
+	int rc = 0, c = 0;
+
+	iris_hfi_for_each_regulator(device, rinfo) {
+		rc = __hand_off_regulator(rinfo);
+		/*
+		 * If one regulator hand off failed, driver should take
+		 * the control for other regulators back.
+		 */
+		if (rc)
+			goto err_reg_handoff_failed;
+		c++;
+	}
+
+	return rc;
+err_reg_handoff_failed:
+	iris_hfi_for_each_regulator_reverse_continue(device, rinfo, c)
+		__acquire_regulator(rinfo, device);
+
+	return rc;
+}
+
+static int __write_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
+		bool *rx_req_is_set)
+{
+	struct cvp_hfi_queue_header *queue;
+	u32 packet_size_in_words, new_write_idx;
+	u32 empty_space, read_idx, write_idx;
+	u32 *write_ptr;
+
+	if (!qinfo || !packet) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	} else if (!qinfo->q_array.align_virtual_addr) {
+		dprintk(CVP_WARN, "Queues have already been freed\n");
+		return -EINVAL;
+	}
+
+	queue = (struct cvp_hfi_queue_header *) qinfo->q_hdr;
+	if (!queue) {
+		dprintk(CVP_ERR, "queue not present\n");
+		return -ENOENT;
+	}
+
+	if (msm_cvp_debug & CVP_PKT) {
+		dprintk(CVP_PKT, "%s: %pK\n", __func__, qinfo);
+		__dump_packet(packet, CVP_PKT);
+	}
+
+	packet_size_in_words = (*(u32 *)packet) >> 2;
+	if (!packet_size_in_words || packet_size_in_words >
+		qinfo->q_array.mem_size>>2) {
+		dprintk(CVP_ERR, "Invalid packet size\n");
+		return -ENODATA;
+	}
+
+	spin_lock(&qinfo->hfi_lock);
+	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
+
+	empty_space = (write_idx >= read_idx) ?
+		((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
+		(read_idx - write_idx);
+	if (empty_space <= packet_size_in_words) {
+		queue->qhdr_tx_req =  1;
+		spin_unlock(&qinfo->hfi_lock);
+		dprintk(CVP_ERR, "Insufficient size (%d) to write (%d)\n",
+					  empty_space, packet_size_in_words);
+		return -ENOTEMPTY;
+	}
+
+	queue->qhdr_tx_req =  0;
+
+	new_write_idx = write_idx + packet_size_in_words;
+	write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+		(write_idx << 2));
+	if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+		write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+		qinfo->q_array.mem_size)) {
+		spin_unlock(&qinfo->hfi_lock);
+		dprintk(CVP_ERR, "Invalid write index\n");
+		return -ENODATA;
+	}
+
+	if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
+		memcpy(write_ptr, packet, packet_size_in_words << 2);
+	} else {
+		new_write_idx -= qinfo->q_array.mem_size >> 2;
+		memcpy(write_ptr, packet, (packet_size_in_words -
+			new_write_idx) << 2);
+		memcpy((void *)qinfo->q_array.align_virtual_addr,
+			packet + ((packet_size_in_words - new_write_idx) << 2),
+			new_write_idx  << 2);
+	}
+
+	/*
+	 * Memory barrier to make sure packet is written before updating the
+	 * write index
+	 */
+	mb();
+	queue->qhdr_write_idx = new_write_idx;
+	if (rx_req_is_set)
+		*rx_req_is_set = queue->qhdr_rx_req == 1;
+	/*
+	 * Memory barrier to make sure write index is updated before an
+	 * interrupt is raised.
+	 */
+	mb();
+	spin_unlock(&qinfo->hfi_lock);
+	return 0;
+}
+
+static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
+		u32 *pb_tx_req_is_set)
+{
+	struct cvp_hfi_queue_header *queue;
+	u32 packet_size_in_words, new_read_idx;
+	u32 *read_ptr;
+	u32 receive_request = 0;
+	u32 read_idx, write_idx;
+		int rc = 0;
+
+	if (!qinfo || !packet || !pb_tx_req_is_set) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	} else if (!qinfo->q_array.align_virtual_addr) {
+		dprintk(CVP_WARN, "Queues have already been freed\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Memory barrier to make sure data is valid before
+	 *reading it
+	 */
+	mb();
+	queue = (struct cvp_hfi_queue_header *) qinfo->q_hdr;
+
+	if (!queue) {
+		dprintk(CVP_ERR, "Queue memory is not allocated\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Do not set receive request for debug queue, if set,
+	 * Iris generates interrupt for debug messages even
+	 * when there is no response message available.
+	 * In general debug queue will not become full as it
+	 * is being emptied out for every interrupt from Iris.
+	 * Iris will anyway generates interrupt if it is full.
+	 */
+	spin_lock(&qinfo->hfi_lock);
+	if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
+		receive_request = 1;
+
+	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
+
+	if (read_idx == write_idx) {
+		queue->qhdr_rx_req = receive_request;
+		/*
+		 * mb() to ensure qhdr is updated in main memory
+		 * so that iris reads the updated header values
+		 */
+		mb();
+		*pb_tx_req_is_set = 0;
+		if (write_idx != queue->qhdr_write_idx) {
+			queue->qhdr_rx_req = 0;
+		} else {
+			spin_unlock(&qinfo->hfi_lock);
+			dprintk(CVP_HFI,
+				"%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
+				receive_request ? "message" : "debug",
+				queue->qhdr_rx_req, queue->qhdr_tx_req,
+				queue->qhdr_read_idx);
+			return -ENODATA;
+		}
+	}
+
+	read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+				(read_idx << 2));
+	if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+		read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+		qinfo->q_array.mem_size - sizeof(*read_ptr))) {
+		spin_unlock(&qinfo->hfi_lock);
+		dprintk(CVP_ERR, "Invalid read index\n");
+		return -ENODATA;
+	}
+
+	packet_size_in_words = (*read_ptr) >> 2;
+	if (!packet_size_in_words) {
+		spin_unlock(&qinfo->hfi_lock);
+		dprintk(CVP_ERR, "Zero packet size\n");
+		return -ENODATA;
+	}
+
+	new_read_idx = read_idx + packet_size_in_words;
+	if (((packet_size_in_words << 2) <= CVP_IFACEQ_VAR_HUGE_PKT_SIZE)
+			&& read_idx <= (qinfo->q_array.mem_size >> 2)) {
+		if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
+			memcpy(packet, read_ptr,
+					packet_size_in_words << 2);
+		} else {
+			new_read_idx -= (qinfo->q_array.mem_size >> 2);
+			memcpy(packet, read_ptr,
+			(packet_size_in_words - new_read_idx) << 2);
+			memcpy(packet + ((packet_size_in_words -
+					new_read_idx) << 2),
+					(u8 *)qinfo->q_array.align_virtual_addr,
+					new_read_idx << 2);
+		}
+	} else {
+		dprintk(CVP_WARN,
+			"BAD packet received, read_idx: %#x, pkt_size: %d\n",
+			read_idx, packet_size_in_words << 2);
+		dprintk(CVP_WARN, "Dropping this packet\n");
+		new_read_idx = write_idx;
+		rc = -ENODATA;
+	}
+
+	if (new_read_idx != queue->qhdr_write_idx)
+		queue->qhdr_rx_req = 0;
+	else
+		queue->qhdr_rx_req = receive_request;
+	queue->qhdr_read_idx = new_read_idx;
+	/*
+	 * mb() to ensure qhdr is updated in main memory
+	 * so that iris reads the updated header values
+	 */
+	mb();
+
+	*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
+
+	spin_unlock(&qinfo->hfi_lock);
+
+	if ((msm_cvp_debug & CVP_PKT) &&
+		!(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
+		dprintk(CVP_PKT, "%s: %pK\n", __func__, qinfo);
+		__dump_packet(packet, CVP_PKT);
+	}
+
+	return rc;
+}
+
+static int __smem_alloc(struct iris_hfi_device *dev, struct cvp_mem_addr *mem,
+			u32 size, u32 align, u32 flags)
+{
+	struct msm_cvp_smem *alloc = &mem->mem_data;
+	int rc = 0;
+
+	if (!dev || !mem || !size) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	dprintk(CVP_INFO, "start to alloc size: %d, flags: %d\n", size, flags);
+	rc = msm_cvp_smem_alloc(size, align, flags, 1, (void *)dev->res, alloc);
+	if (rc) {
+		dprintk(CVP_ERR, "Alloc failed\n");
+		rc = -ENOMEM;
+		goto fail_smem_alloc;
+	}
+
+	dprintk(CVP_MEM, "%s: ptr = %pK, size = %d\n", __func__,
+			alloc->kvaddr, size);
+
+	mem->mem_size = alloc->size;
+	mem->align_virtual_addr = alloc->kvaddr;
+	mem->align_device_addr = alloc->device_addr;
+
+	return rc;
+fail_smem_alloc:
+	return rc;
+}
+
+static void __smem_free(struct iris_hfi_device *dev, struct msm_cvp_smem *mem)
+{
+	if (!dev || !mem) {
+		dprintk(CVP_ERR, "invalid param %pK %pK\n", dev, mem);
+		return;
+	}
+
+	msm_cvp_smem_free(mem);
+}
+
+static void __write_register(struct iris_hfi_device *device,
+		u32 reg, u32 value)
+{
+	u32 hwiosymaddr = reg;
+	u8 *base_addr;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return;
+	}
+
+	__strict_check(device);
+
+	if (!device->power_enabled) {
+		dprintk(CVP_WARN,
+			"HFI Write register failed : Power is OFF\n");
+		msm_cvp_res_handle_fatal_hw_error(device->res, true);
+		return;
+	}
+
+	base_addr = device->cvp_hal_data->register_base;
+	dprintk(CVP_REG, "Base addr: %pK, written to: %#x, Value: %#x...\n",
+		base_addr, hwiosymaddr, value);
+	base_addr += hwiosymaddr;
+	writel_relaxed(value, base_addr);
+
+	/*
+	 * Memory barrier to make sure value is written into the register.
+	 */
+	wmb();
+}
+
+static int __read_register(struct iris_hfi_device *device, u32 reg)
+{
+	int rc = 0;
+	u8 *base_addr;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!device->power_enabled) {
+		dprintk(CVP_WARN,
+			"HFI Read register failed : Power is OFF\n");
+		msm_cvp_res_handle_fatal_hw_error(device->res, true);
+		return -EINVAL;
+	}
+
+	base_addr = device->cvp_hal_data->register_base;
+
+	rc = readl_relaxed(base_addr + reg);
+	/*
+	 * Memory barrier to make sure value is read correctly from the
+	 * register.
+	 */
+	rmb();
+	dprintk(CVP_REG, "Base addr: %pK, read from: %#x, value: %#x...\n",
+		base_addr, reg, rc);
+
+	return rc;
+}
+
+static void __set_registers(struct iris_hfi_device *device)
+{
+	struct reg_set *reg_set;
+	int i;
+
+	if (!device->res) {
+		dprintk(CVP_ERR,
+			"device resources null, cannot set registers\n");
+		return;
+	}
+
+	reg_set = &device->res->reg_set;
+	for (i = 0; i < reg_set->count; i++) {
+		__write_register(device, reg_set->reg_tbl[i].reg,
+				reg_set->reg_tbl[i].value);
+		dprintk(CVP_REG, "write_reg offset=%x, val=%x\n",
+					reg_set->reg_tbl[i].reg,
+					reg_set->reg_tbl[i].value);
+	}
+}
+
+/*
+ * The existence of this function is a hack for 8996 (or certain Iris versions)
+ * to overcome a hardware bug.  Whenever the GDSCs momentarily power collapse
+ * (after calling __hand_off_regulators()), the values of the threshold
+ * registers (typically programmed by TZ) are incorrectly reset.  As a result
+ * reprogram these registers at certain agreed upon points.
+ */
+static void __set_threshold_registers(struct iris_hfi_device *device)
+{
+	u32 version = __read_register(device, CVP_WRAPPER_HW_VERSION);
+
+	version &= ~GENMASK(15, 0);
+	if (version != (0x3 << 28 | 0x43 << 16))
+		return;
+
+	if (__tzbsp_set_cvp_state(TZ_SUBSYS_STATE_RESTORE_THRESHOLD))
+		dprintk(CVP_ERR, "Failed to restore threshold values\n");
+}
+
+static int __unvote_buses(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct bus_info *bus = NULL;
+
+	kfree(device->bus_vote.data);
+	device->bus_vote.data = NULL;
+	device->bus_vote.data_count = 0;
+
+	iris_hfi_for_each_bus(device, bus) {
+		rc = icc_set_bw(bus->client, 0, 0);
+		if (rc) {
+			dprintk(CVP_ERR,
+			"%s: Failed unvoting bus\n", __func__);
+			goto err_unknown_device;
+		}
+	}
+
+err_unknown_device:
+	return rc;
+}
+
+static int __vote_buses(struct iris_hfi_device *device,
+		struct cvp_bus_vote_data *data, int num_data)
+{
+	int rc = 0;
+	struct bus_info *bus = NULL;
+	struct cvp_bus_vote_data *new_data = NULL;
+
+	if (!num_data) {
+		dprintk(CVP_PWR, "No vote data available\n");
+		goto no_data_count;
+	} else if (!data) {
+		dprintk(CVP_ERR, "Invalid voting data\n");
+		return -EINVAL;
+	}
+
+	new_data = kmemdup(data, num_data * sizeof(*new_data), GFP_KERNEL);
+	if (!new_data) {
+		dprintk(CVP_ERR, "Can't alloc memory to cache bus votes\n");
+		rc = -ENOMEM;
+		goto err_no_mem;
+	}
+
+no_data_count:
+	kfree(device->bus_vote.data);
+	device->bus_vote.data = new_data;
+	device->bus_vote.data_count = num_data;
+
+	iris_hfi_for_each_bus(device, bus) {
+		if (bus) {
+			rc = icc_set_bw(bus->client, bus->range[1], 0);
+			if (rc)
+				dprintk(CVP_ERR,
+				"Failed voting bus %s to ab %u\n",
+				bus->name, bus->range[1]*1000);
+		}
+	}
+
+err_no_mem:
+	return rc;
+}
+
+static int iris_hfi_vote_buses(void *dev, struct cvp_bus_vote_data *d, int n)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = dev;
+
+	if (!device)
+		return -EINVAL;
+
+	mutex_lock(&device->lock);
+	rc = __vote_buses(device, d, n);
+	mutex_unlock(&device->lock);
+
+	return rc;
+
+}
+
+static int __core_set_resource(struct iris_hfi_device *device,
+		struct cvp_resource_hdr *resource_hdr, void *resource_value)
+{
+	struct cvp_hfi_cmd_sys_set_resource_packet *pkt;
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+
+	if (!device || !resource_hdr || !resource_value) {
+		dprintk(CVP_ERR, "set_res: Invalid Params\n");
+		return -EINVAL;
+	}
+
+	pkt = (struct cvp_hfi_cmd_sys_set_resource_packet *) packet;
+
+	rc = call_hfi_pkt_op(device, sys_set_resource,
+			pkt, resource_hdr, resource_value);
+	if (rc) {
+		dprintk(CVP_ERR, "set_res: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	rc = __iface_cmdq_write(device, pkt);
+	if (rc)
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+static int __core_release_resource(struct iris_hfi_device *device,
+		struct cvp_resource_hdr *resource_hdr)
+{
+	struct cvp_hfi_cmd_sys_release_resource_packet *pkt;
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+
+	if (!device || !resource_hdr) {
+		dprintk(CVP_ERR, "release_res: Invalid Params\n");
+		return -EINVAL;
+	}
+
+	pkt = (struct cvp_hfi_cmd_sys_release_resource_packet *) packet;
+
+	rc = call_hfi_pkt_op(device, sys_release_resource,
+			pkt, resource_hdr);
+
+	if (rc) {
+		dprintk(CVP_ERR, "release_res: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	rc = __iface_cmdq_write(device, pkt);
+	if (rc)
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+static int __tzbsp_set_cvp_state(enum tzbsp_subsys_state state)
+{
+	int rc = 0;
+
+	rc = qcom_scm_set_remote_state(state, TZBSP_CVP_PAS_ID);
+	dprintk(CVP_CORE, "Set state %d, resp %d\n", state, rc);
+
+	if (rc) {
+		dprintk(CVP_ERR, "Failed qcom_scm_set_remote_state %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static inline int __boot_firmware(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	u32 ctrl_init_val = 0, ctrl_status = 0, count = 0, max_tries = 1000;
+
+	ctrl_init_val = BIT(0);
+	__write_register(device, CVP_CTRL_INIT, ctrl_init_val);
+	while (!ctrl_status && count < max_tries) {
+		ctrl_status = __read_register(device, CVP_CTRL_STATUS);
+		if ((ctrl_status & CVP_CTRL_ERROR_STATUS__M) == 0x4) {
+			dprintk(CVP_ERR, "invalid setting for UC_REGION\n");
+			rc = -ENODATA;
+			break;
+		}
+
+		/* Reduce to 1/100th and x100 of max_tries */
+		usleep_range(500, 1000);
+		count++;
+	}
+
+	if (!(ctrl_status & CVP_CTRL_INIT_STATUS__M)) {
+		dprintk(CVP_ERR, "Failed to boot FW status: %x\n",
+			ctrl_status);
+		rc = -ENODEV;
+	}
+
+	/* Enable interrupt before sending commands to tensilica */
+	__write_register(device, CVP_CPU_CS_H2XSOFTINTEN, 0x1);
+	__write_register(device, CVP_CPU_CS_X2RPMh, 0x0);
+
+	return rc;
+}
+
+static int iris_hfi_resume(void *dev)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = (struct iris_hfi_device *) dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_CORE, "Resuming Iris\n");
+
+	mutex_lock(&device->lock);
+	rc = __resume(device);
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+
+static int iris_hfi_suspend(void *dev)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = (struct iris_hfi_device *) dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	} else if (!device->res->sw_power_collapsible) {
+		return -ENOTSUPP;
+	}
+
+	dprintk(CVP_CORE, "Suspending Iris\n");
+	mutex_lock(&device->lock);
+	rc = __power_collapse(device, true);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: Iris is busy\n", __func__);
+		rc = -EBUSY;
+	}
+	mutex_unlock(&device->lock);
+
+	/* Cancel pending delayed works if any */
+	if (!rc)
+		cancel_delayed_work(&iris_hfi_pm_work);
+
+	return rc;
+}
+
+static void cvp_dump_csr(struct iris_hfi_device *dev)
+{
+	u32 reg;
+
+	if (!dev)
+		return;
+	if (!dev->power_enabled || dev->reg_dumped)
+		return;
+	reg = __read_register(dev, CVP_WRAPPER_CPU_STATUS);
+	dprintk(CVP_ERR, "CVP_WRAPPER_CPU_STATUS: %x\n", reg);
+	reg = __read_register(dev, CVP_CPU_CS_SCIACMDARG0);
+	dprintk(CVP_ERR, "CVP_CPU_CS_SCIACMDARG0: %x\n", reg);
+	reg = __read_register(dev, CVP_WRAPPER_CPU_CLOCK_CONFIG);
+	dprintk(CVP_ERR, "CVP_WRAPPER_CPU_CLOCK_CONFIG: %x\n", reg);
+	reg = __read_register(dev, CVP_WRAPPER_CORE_CLOCK_CONFIG);
+	dprintk(CVP_ERR, "CVP_WRAPPER_CORE_CLOCK_CONFIG: %x\n", reg);
+	reg = __read_register(dev, CVP_WRAPPER_INTR_STATUS);
+	dprintk(CVP_ERR, "CVP_WRAPPER_INTR_STATUS: %x\n", reg);
+	reg = __read_register(dev, CVP_CPU_CS_H2ASOFTINT);
+	dprintk(CVP_ERR, "CVP_CPU_CS_H2ASOFTINT: %x\n", reg);
+	reg = __read_register(dev, CVP_CPU_CS_A2HSOFTINT);
+	dprintk(CVP_ERR, "CVP_CPU_CS_A2HSOFTINT: %x\n", reg);
+	reg = __read_register(dev, CVP_CC_MVS1C_GDSCR);
+	dprintk(CVP_ERR, "CVP_CC_MVS1C_GDSCR: %x\n", reg);
+	reg = __read_register(dev, CVP_CC_MVS1C_CBCR);
+	dprintk(CVP_ERR, "CVP_CC_MVS1C_CBCR: %x\n", reg);
+	dev->reg_dumped = true;
+}
+
+static int iris_hfi_flush_debug_queue(void *dev)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = (struct iris_hfi_device *) dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	cvp_dump_csr(device);
+	mutex_lock(&device->lock);
+
+	if (!device->power_enabled) {
+		dprintk(CVP_WARN, "%s: iris power off\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+	__flush_debug_queue(device, NULL);
+exit:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int __set_clocks(struct iris_hfi_device *device, u32 freq)
+{
+	struct clock_info *cl;
+	int rc = 0;
+
+	iris_hfi_for_each_clock(device, cl) {
+		if (cl->has_scaling) {/* has_scaling */
+			device->clk_freq = freq;
+			if (msm_cvp_clock_voting)
+				freq = msm_cvp_clock_voting;
+
+			rc = clk_set_rate(cl->clk, freq);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"Failed to set clock rate %u %s: %d %s\n",
+					freq, cl->name, rc, __func__);
+				return rc;
+			}
+
+			dprintk(CVP_PWR, "Scaling clock %s to %u\n",
+					cl->name, freq);
+		}
+	}
+
+	return 0;
+}
+
+static int iris_hfi_scale_clocks(void *dev, u32 freq)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid args: %pK\n", device);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	if (__resume(device)) {
+		dprintk(CVP_ERR, "Resume from power collapse failed\n");
+		rc = -ENODEV;
+		goto exit;
+	}
+
+	rc = __set_clocks(device, freq);
+exit:
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int __scale_clocks(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u32 rate = 0;
+
+	allowed_clks_tbl = device->res->allowed_clks_tbl;
+
+	rate = device->clk_freq ? device->clk_freq :
+		allowed_clks_tbl[0].clock_rate;
+
+	dprintk(CVP_PWR, "%s: scale clock rate %d\n", __func__, rate);
+	rc = __set_clocks(device, rate);
+	return rc;
+}
+
+/* Writes into cmdq without raising an interrupt */
+static int __iface_cmdq_write_relaxed(struct iris_hfi_device *device,
+		void *pkt, bool *requires_interrupt)
+{
+	struct cvp_iface_q_info *q_info;
+	struct cvp_hal_cmd_pkt_hdr *cmd_packet;
+	int result = -E2BIG;
+
+	if (!device || !pkt) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!__core_in_valid_state(device)) {
+		dprintk(CVP_ERR, "%s - fw not in init state\n", __func__);
+		result = -EINVAL;
+		goto err_q_null;
+	}
+
+	cmd_packet = (struct cvp_hal_cmd_pkt_hdr *)pkt;
+	device->last_packet_type = cmd_packet->packet_type;
+
+	q_info = &device->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	if (!q_info) {
+		dprintk(CVP_ERR, "cannot write to shared Q's\n");
+		goto err_q_null;
+	}
+
+	if (!q_info->q_array.align_virtual_addr) {
+		dprintk(CVP_ERR, "cannot write to shared CMD Q's\n");
+		result = -ENODATA;
+		goto err_q_null;
+	}
+
+	if (__resume(device)) {
+		dprintk(CVP_ERR, "%s: Power on failed\n", __func__);
+		goto err_q_write;
+	}
+
+	if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) {
+		if (device->res->sw_power_collapsible) {
+			cancel_delayed_work(&iris_hfi_pm_work);
+			if (!queue_delayed_work(device->iris_pm_workq,
+				&iris_hfi_pm_work,
+				msecs_to_jiffies(
+				device->res->msm_cvp_pwr_collapse_delay))) {
+				dprintk(CVP_PWR,
+				"PM work already scheduled\n");
+			}
+		}
+
+		result = 0;
+	} else {
+		dprintk(CVP_ERR, "__iface_cmdq_write: queue full\n");
+	}
+
+err_q_write:
+err_q_null:
+	return result;
+}
+
+static int __iface_cmdq_write(struct iris_hfi_device *device, void *pkt)
+{
+	bool needs_interrupt = false;
+	int rc = __iface_cmdq_write_relaxed(device, pkt, &needs_interrupt);
+
+	if (!rc && needs_interrupt) {
+		/* Consumer of cmdq prefers that we raise an interrupt */
+		rc = 0;
+		__write_register(device, CVP_CPU_CS_H2ASOFTINT, 1);
+	}
+
+	return rc;
+}
+
+static int __iface_msgq_read(struct iris_hfi_device *device, void *pkt)
+{
+	u32 tx_req_is_set = 0;
+	int rc = 0;
+	struct cvp_iface_q_info *q_info;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!__core_in_valid_state(device)) {
+		dprintk(CVP_WARN, "%s - fw not in init state\n", __func__);
+		rc = -EINVAL;
+		goto read_error_null;
+	}
+
+	q_info = &device->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	if (q_info->q_array.align_virtual_addr == NULL) {
+		dprintk(CVP_ERR, "cannot read from shared MSG Q's\n");
+		rc = -ENODATA;
+		goto read_error_null;
+	}
+
+	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+		if (tx_req_is_set)
+			__write_register(device, CVP_CPU_CS_H2ASOFTINT, 1);
+		rc = 0;
+	} else
+		rc = -ENODATA;
+
+read_error_null:
+	return rc;
+}
+
+static int __iface_dbgq_read(struct iris_hfi_device *device, void *pkt)
+{
+	u32 tx_req_is_set = 0;
+	int rc = 0;
+	struct cvp_iface_q_info *q_info;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	q_info = &device->iface_queues[CVP_IFACEQ_DBGQ_IDX];
+	if (q_info->q_array.align_virtual_addr == NULL) {
+		dprintk(CVP_ERR, "cannot read from shared DBG Q's\n");
+		rc = -ENODATA;
+		goto dbg_error_null;
+	}
+
+	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+		if (tx_req_is_set)
+			__write_register(device, CVP_CPU_CS_H2ASOFTINT, 1);
+		rc = 0;
+	} else
+		rc = -ENODATA;
+
+dbg_error_null:
+	return rc;
+}
+
+static void __set_queue_hdr_defaults(struct cvp_hfi_queue_header *q_hdr)
+{
+	q_hdr->qhdr_status = 0x1;
+	q_hdr->qhdr_type = CVP_IFACEQ_DFLT_QHDR;
+	q_hdr->qhdr_q_size = CVP_IFACEQ_QUEUE_SIZE / 4;
+	q_hdr->qhdr_pkt_size = 0;
+	q_hdr->qhdr_rx_wm = 0x1;
+	q_hdr->qhdr_tx_wm = 0x1;
+	q_hdr->qhdr_rx_req = 0x1;
+	q_hdr->qhdr_tx_req = 0x0;
+	q_hdr->qhdr_rx_irq_status = 0x0;
+	q_hdr->qhdr_tx_irq_status = 0x0;
+	q_hdr->qhdr_read_idx = 0x0;
+	q_hdr->qhdr_write_idx = 0x0;
+}
+
+static void __interface_dsp_queues_release(struct iris_hfi_device *device)
+{
+	int i;
+	struct msm_cvp_smem *mem_data = &device->dsp_iface_q_table.mem_data;
+	struct context_bank_info *cb = mem_data->mapping_info.cb_info;
+
+	if (!device->dsp_iface_q_table.align_virtual_addr) {
+		dprintk(CVP_ERR, "%s: already released\n", __func__);
+		return;
+	}
+
+	dma_unmap_single_attrs(cb->dev, mem_data->device_addr,
+		mem_data->size, DMA_BIDIRECTIONAL, 0);
+	dma_free_coherent(device->res->mem_cdsp.dev, mem_data->size,
+		mem_data->kvaddr, mem_data->dma_handle);
+
+	for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+		device->dsp_iface_queues[i].q_hdr = NULL;
+		device->dsp_iface_queues[i].q_array.align_virtual_addr = NULL;
+		device->dsp_iface_queues[i].q_array.align_device_addr = 0;
+	}
+	device->dsp_iface_q_table.align_virtual_addr = NULL;
+	device->dsp_iface_q_table.align_device_addr = 0;
+}
+
+static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
+{
+	int rc = 0;
+	u32 i;
+	struct cvp_hfi_queue_table_header *q_tbl_hdr;
+	struct cvp_hfi_queue_header *q_hdr;
+	struct cvp_iface_q_info *iface_q;
+	int offset = 0;
+	phys_addr_t fw_bias = 0;
+	size_t q_size;
+	struct msm_cvp_smem *mem_data;
+	void *kvaddr;
+	dma_addr_t dma_handle;
+	dma_addr_t iova;
+	struct context_bank_info *cb;
+
+	q_size = ALIGN(QUEUE_SIZE, SZ_1M);
+	mem_data = &dev->dsp_iface_q_table.mem_data;
+
+	/* Allocate dsp queues from CDSP device memory */
+	kvaddr = dma_alloc_coherent(dev->res->mem_cdsp.dev, q_size,
+				&dma_handle, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(kvaddr)) {
+		dprintk(CVP_ERR, "%s: failed dma allocation\n", __func__);
+		goto fail_dma_alloc;
+	}
+	cb = msm_cvp_smem_get_context_bank(0, dev->res, 0);
+	if (!cb) {
+		dprintk(CVP_ERR,
+			"%s: failed to get context bank\n", __func__);
+		goto fail_dma_map;
+	}
+	iova = dma_map_single_attrs(cb->dev, phys_to_virt(dma_handle),
+				q_size, DMA_BIDIRECTIONAL, 0);
+	if (dma_mapping_error(cb->dev, iova)) {
+		dprintk(CVP_ERR, "%s: failed dma mapping\n", __func__);
+		goto fail_dma_map;
+	}
+	dprintk(CVP_DSP,
+		"%s: kvaddr %pK dma_handle %#llx iova %#llx size %zd\n",
+		__func__, kvaddr, dma_handle, iova, q_size);
+
+	memset(mem_data, 0, sizeof(struct msm_cvp_smem));
+	mem_data->kvaddr = kvaddr;
+	mem_data->device_addr = iova;
+	mem_data->dma_handle = dma_handle;
+	mem_data->size = q_size;
+	mem_data->ion_flags = 0;
+	mem_data->mapping_info.cb_info = cb;
+
+	if (!is_iommu_present(dev->res))
+		fw_bias = dev->cvp_hal_data->firmware_base;
+
+	dev->dsp_iface_q_table.align_virtual_addr = kvaddr;
+	dev->dsp_iface_q_table.align_device_addr = iova - fw_bias;
+	dev->dsp_iface_q_table.mem_size = CVP_IFACEQ_TABLE_SIZE;
+	offset = dev->dsp_iface_q_table.mem_size;
+
+	for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+		iface_q = &dev->dsp_iface_queues[i];
+		iface_q->q_array.align_device_addr = iova + offset - fw_bias;
+		iface_q->q_array.align_virtual_addr = kvaddr + offset;
+		iface_q->q_array.mem_size = CVP_IFACEQ_QUEUE_SIZE;
+		offset += iface_q->q_array.mem_size;
+		iface_q->q_hdr = CVP_IFACEQ_GET_QHDR_START_ADDR(
+			dev->dsp_iface_q_table.align_virtual_addr, i);
+		__set_queue_hdr_defaults(iface_q->q_hdr);
+		spin_lock_init(&iface_q->hfi_lock);
+	}
+
+	q_tbl_hdr = (struct cvp_hfi_queue_table_header *)
+			dev->dsp_iface_q_table.align_virtual_addr;
+	q_tbl_hdr->qtbl_version = 0;
+	q_tbl_hdr->device_addr = (void *)dev;
+	strlcpy(q_tbl_hdr->name, "msm_cvp", sizeof(q_tbl_hdr->name));
+	q_tbl_hdr->qtbl_size = CVP_IFACEQ_TABLE_SIZE;
+	q_tbl_hdr->qtbl_qhdr0_offset =
+				sizeof(struct cvp_hfi_queue_table_header);
+	q_tbl_hdr->qtbl_qhdr_size = sizeof(struct cvp_hfi_queue_header);
+	q_tbl_hdr->qtbl_num_q = CVP_IFACEQ_NUMQ;
+	q_tbl_hdr->qtbl_num_active_q = CVP_IFACEQ_NUMQ;
+
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
+
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
+
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_DBGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
+	/*
+	 * Set receive request to zero on debug queue as there is no
+	 * need of interrupt from cvp hardware for debug messages
+	 */
+	q_hdr->qhdr_rx_req = 0;
+	return rc;
+
+fail_dma_map:
+	dma_free_coherent(dev->res->mem_cdsp.dev, q_size, kvaddr, dma_handle);
+fail_dma_alloc:
+	return -ENOMEM;
+}
+
+static void __interface_queues_release(struct iris_hfi_device *device)
+{
+	int i;
+	struct cvp_hfi_mem_map_table *qdss;
+	struct cvp_hfi_mem_map *mem_map;
+	int num_entries = device->res->qdss_addr_set.count;
+	unsigned long mem_map_table_base_addr;
+	struct context_bank_info *cb;
+
+	if (device->qdss.align_virtual_addr) {
+		qdss = (struct cvp_hfi_mem_map_table *)
+			device->qdss.align_virtual_addr;
+		qdss->mem_map_num_entries = num_entries;
+		mem_map_table_base_addr =
+			device->qdss.align_device_addr +
+			sizeof(struct cvp_hfi_mem_map_table);
+		qdss->mem_map_table_base_addr =
+			(u32)mem_map_table_base_addr;
+		if ((unsigned long)qdss->mem_map_table_base_addr !=
+			mem_map_table_base_addr) {
+			dprintk(CVP_ERR,
+				"Invalid mem_map_table_base_addr %#lx",
+				mem_map_table_base_addr);
+		}
+
+		mem_map = (struct cvp_hfi_mem_map *)(qdss + 1);
+		cb = msm_cvp_smem_get_context_bank(false, device->res, 0);
+
+		for (i = 0; cb && i < num_entries; i++) {
+			iommu_unmap(cb->domain,
+						mem_map[i].virtual_addr,
+						mem_map[i].size);
+		}
+
+		__smem_free(device, &device->qdss.mem_data);
+	}
+
+	__smem_free(device, &device->iface_q_table.mem_data);
+	__smem_free(device, &device->sfr.mem_data);
+
+	for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+		device->iface_queues[i].q_hdr = NULL;
+		device->iface_queues[i].q_array.align_virtual_addr = NULL;
+		device->iface_queues[i].q_array.align_device_addr = 0;
+	}
+
+	device->iface_q_table.align_virtual_addr = NULL;
+	device->iface_q_table.align_device_addr = 0;
+
+	device->qdss.align_virtual_addr = NULL;
+	device->qdss.align_device_addr = 0;
+
+	device->sfr.align_virtual_addr = NULL;
+	device->sfr.align_device_addr = 0;
+
+	device->mem_addr.align_virtual_addr = NULL;
+	device->mem_addr.align_device_addr = 0;
+
+	__interface_dsp_queues_release(device);
+}
+
+static int __get_qdss_iommu_virtual_addr(struct iris_hfi_device *dev,
+		struct cvp_hfi_mem_map *mem_map,
+		struct iommu_domain *domain)
+{
+	int i;
+	int rc = 0;
+	dma_addr_t iova = QDSS_IOVA_START;
+	int num_entries = dev->res->qdss_addr_set.count;
+	struct addr_range *qdss_addr_tbl = dev->res->qdss_addr_set.addr_tbl;
+
+	if (!num_entries)
+		return -ENODATA;
+
+	for (i = 0; i < num_entries; i++) {
+		if (domain) {
+			rc = iommu_map(domain, iova,
+					qdss_addr_tbl[i].start,
+					qdss_addr_tbl[i].size,
+					IOMMU_READ | IOMMU_WRITE);
+
+			if (rc) {
+				dprintk(CVP_ERR,
+						"IOMMU QDSS mapping failed for addr %#x\n",
+						qdss_addr_tbl[i].start);
+				rc = -ENOMEM;
+				break;
+			}
+		} else {
+			iova =  qdss_addr_tbl[i].start;
+		}
+
+		mem_map[i].virtual_addr = (u32)iova;
+		mem_map[i].physical_addr = qdss_addr_tbl[i].start;
+		mem_map[i].size = qdss_addr_tbl[i].size;
+		mem_map[i].attr = 0x0;
+
+		iova += mem_map[i].size;
+	}
+
+	if (i < num_entries) {
+		dprintk(CVP_ERR,
+			"QDSS mapping failed, Freeing other entries %d\n", i);
+
+		for (--i; domain && i >= 0; i--) {
+			iommu_unmap(domain,
+				mem_map[i].virtual_addr,
+				mem_map[i].size);
+		}
+	}
+
+	return rc;
+}
+
+static void __setup_ucregion_memory_map(struct iris_hfi_device *device)
+{
+	__write_register(device, CVP_UC_REGION_ADDR,
+			(u32)device->iface_q_table.align_device_addr);
+	__write_register(device, CVP_UC_REGION_SIZE, SHARED_QSIZE);
+	__write_register(device, CVP_QTBL_ADDR,
+			(u32)device->iface_q_table.align_device_addr);
+	__write_register(device, CVP_QTBL_INFO, 0x01);
+	if (device->sfr.align_device_addr)
+		__write_register(device, CVP_SFR_ADDR,
+				(u32)device->sfr.align_device_addr);
+	if (device->qdss.align_device_addr)
+		__write_register(device, CVP_MMAP_ADDR,
+				(u32)device->qdss.align_device_addr);
+	call_iris_op(device, setup_dsp_uc_memmap, device);
+}
+
+static int __interface_queues_init(struct iris_hfi_device *dev)
+{
+	struct cvp_hfi_queue_table_header *q_tbl_hdr;
+	struct cvp_hfi_queue_header *q_hdr;
+	u32 i;
+	int rc = 0;
+	struct cvp_hfi_mem_map_table *qdss;
+	struct cvp_hfi_mem_map *mem_map;
+	struct cvp_iface_q_info *iface_q;
+	struct cvp_hfi_sfr_struct *vsfr;
+	struct cvp_mem_addr *mem_addr;
+	int offset = 0;
+	int num_entries = dev->res->qdss_addr_set.count;
+	phys_addr_t fw_bias = 0;
+	size_t q_size;
+	unsigned long mem_map_table_base_addr;
+	struct context_bank_info *cb;
+
+	q_size = SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE;
+	mem_addr = &dev->mem_addr;
+	if (!is_iommu_present(dev->res))
+		fw_bias = dev->cvp_hal_data->firmware_base;
+	rc = __smem_alloc(dev, mem_addr, q_size, 1, SMEM_UNCACHED);
+	if (rc) {
+		dprintk(CVP_ERR, "iface_q_table_alloc_fail\n");
+		goto fail_alloc_queue;
+	}
+
+	dev->iface_q_table.align_virtual_addr = mem_addr->align_virtual_addr;
+	dev->iface_q_table.align_device_addr = mem_addr->align_device_addr -
+					fw_bias;
+	dev->iface_q_table.mem_size = CVP_IFACEQ_TABLE_SIZE;
+	dev->iface_q_table.mem_data = mem_addr->mem_data;
+	offset += dev->iface_q_table.mem_size;
+
+	for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+		iface_q = &dev->iface_queues[i];
+		iface_q->q_array.align_device_addr = mem_addr->align_device_addr
+			+ offset - fw_bias;
+		iface_q->q_array.align_virtual_addr =
+			mem_addr->align_virtual_addr + offset;
+		iface_q->q_array.mem_size = CVP_IFACEQ_QUEUE_SIZE;
+		offset += iface_q->q_array.mem_size;
+		iface_q->q_hdr = CVP_IFACEQ_GET_QHDR_START_ADDR(
+				dev->iface_q_table.align_virtual_addr, i);
+		__set_queue_hdr_defaults(iface_q->q_hdr);
+		spin_lock_init(&iface_q->hfi_lock);
+	}
+
+	if ((msm_cvp_fw_debug_mode & HFI_DEBUG_MODE_QDSS) && num_entries) {
+		rc = __smem_alloc(dev, mem_addr, ALIGNED_QDSS_SIZE, 1,
+				SMEM_UNCACHED);
+		if (rc) {
+			dprintk(CVP_WARN,
+				"qdss_alloc_fail: QDSS messages logging will not work\n");
+			dev->qdss.align_device_addr = 0;
+		} else {
+			dev->qdss.align_device_addr =
+				mem_addr->align_device_addr - fw_bias;
+			dev->qdss.align_virtual_addr =
+				mem_addr->align_virtual_addr;
+			dev->qdss.mem_size = ALIGNED_QDSS_SIZE;
+			dev->qdss.mem_data = mem_addr->mem_data;
+		}
+	}
+
+	rc = __smem_alloc(dev, mem_addr, ALIGNED_SFR_SIZE, 1, SMEM_UNCACHED);
+	if (rc) {
+		dprintk(CVP_WARN, "sfr_alloc_fail: SFR not will work\n");
+		dev->sfr.align_device_addr = 0;
+	} else {
+		dev->sfr.align_device_addr = mem_addr->align_device_addr -
+					fw_bias;
+		dev->sfr.align_virtual_addr = mem_addr->align_virtual_addr;
+		dev->sfr.mem_size = ALIGNED_SFR_SIZE;
+		dev->sfr.mem_data = mem_addr->mem_data;
+	}
+
+	q_tbl_hdr = (struct cvp_hfi_queue_table_header *)
+			dev->iface_q_table.align_virtual_addr;
+	q_tbl_hdr->qtbl_version = 0;
+	q_tbl_hdr->device_addr = (void *)dev;
+	strlcpy(q_tbl_hdr->name, "msm_cvp", sizeof(q_tbl_hdr->name));
+	q_tbl_hdr->qtbl_size = CVP_IFACEQ_TABLE_SIZE;
+	q_tbl_hdr->qtbl_qhdr0_offset =
+				sizeof(struct cvp_hfi_queue_table_header);
+	q_tbl_hdr->qtbl_qhdr_size = sizeof(struct cvp_hfi_queue_header);
+	q_tbl_hdr->qtbl_num_q = CVP_IFACEQ_NUMQ;
+	q_tbl_hdr->qtbl_num_active_q = CVP_IFACEQ_NUMQ;
+
+	iface_q = &dev->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
+
+	iface_q = &dev->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
+
+	iface_q = &dev->iface_queues[CVP_IFACEQ_DBGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
+	/*
+	 * Set receive request to zero on debug queue as there is no
+	 * need of interrupt from cvp hardware for debug messages
+	 */
+	q_hdr->qhdr_rx_req = 0;
+
+	if (dev->qdss.align_virtual_addr) {
+		qdss =
+		(struct cvp_hfi_mem_map_table *)dev->qdss.align_virtual_addr;
+		qdss->mem_map_num_entries = num_entries;
+		mem_map_table_base_addr = dev->qdss.align_device_addr +
+			sizeof(struct cvp_hfi_mem_map_table);
+		qdss->mem_map_table_base_addr = mem_map_table_base_addr;
+
+		mem_map = (struct cvp_hfi_mem_map *)(qdss + 1);
+		cb = msm_cvp_smem_get_context_bank(false, dev->res, 0);
+		if (!cb) {
+			dprintk(CVP_ERR,
+				"%s: failed to get context bank\n", __func__);
+			return -EINVAL;
+		}
+
+		rc = __get_qdss_iommu_virtual_addr(dev, mem_map, cb->domain);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"IOMMU mapping failed, Freeing qdss memdata\n");
+			__smem_free(dev, &dev->qdss.mem_data);
+			dev->qdss.align_virtual_addr = NULL;
+			dev->qdss.align_device_addr = 0;
+		}
+	}
+
+	vsfr = (struct cvp_hfi_sfr_struct *) dev->sfr.align_virtual_addr;
+	if (vsfr)
+		vsfr->bufSize = ALIGNED_SFR_SIZE;
+
+	rc = __interface_dsp_queues_init(dev);
+	if (rc) {
+		dprintk(CVP_ERR, "dsp_queues_init failed\n");
+		goto fail_alloc_queue;
+	}
+
+	__setup_ucregion_memory_map(dev);
+	return 0;
+fail_alloc_queue:
+	return -ENOMEM;
+}
+
+static int __sys_set_debug(struct iris_hfi_device *device, u32 debug)
+{
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	rc = call_hfi_pkt_op(device, sys_debug_config, pkt, debug);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"Debug mode setting to FW failed\n");
+		return -ENOTEMPTY;
+	}
+
+	if (__iface_cmdq_write(device, pkt))
+		return -ENOTEMPTY;
+	return 0;
+}
+
+static int __sys_set_idle_indicator(struct iris_hfi_device *device,
+	bool enable)
+{
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	rc = call_hfi_pkt_op(device, sys_set_idle_indicator, pkt, enable);
+	if (__iface_cmdq_write(device, pkt))
+		return -ENOTEMPTY;
+	return 0;
+}
+
+static int __sys_set_coverage(struct iris_hfi_device *device, u32 mode)
+{
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	rc = call_hfi_pkt_op(device, sys_coverage_config,
+			pkt, mode);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"Coverage mode setting to FW failed\n");
+		return -ENOTEMPTY;
+	}
+
+	if (__iface_cmdq_write(device, pkt)) {
+		dprintk(CVP_WARN, "Failed to send coverage pkt to f/w\n");
+		return -ENOTEMPTY;
+	}
+
+	return 0;
+}
+
+static int __sys_set_power_control(struct iris_hfi_device *device,
+	bool enable)
+{
+	struct regulator_info *rinfo;
+	bool supported = false;
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	iris_hfi_for_each_regulator(device, rinfo) {
+		if (rinfo->has_hw_power_collapse) {
+			supported = true;
+			break;
+		}
+	}
+
+	if (!supported)
+		return 0;
+
+	call_hfi_pkt_op(device, sys_power_control, pkt, enable);
+	if (__iface_cmdq_write(device, pkt))
+		return -ENOTEMPTY;
+	return 0;
+}
+
+static int iris_hfi_core_init(void *device)
+{
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_init_packet pkt;
+	struct cvp_hfi_cmd_sys_get_property_packet version_pkt;
+	struct iris_hfi_device *dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid device\n");
+		return -ENODEV;
+	}
+
+	dev = device;
+
+	dprintk(CVP_CORE, "Core initializing\n");
+
+	mutex_lock(&dev->lock);
+
+	dev->bus_vote.data =
+		kzalloc(sizeof(struct cvp_bus_vote_data), GFP_KERNEL);
+	if (!dev->bus_vote.data) {
+		dprintk(CVP_ERR, "Bus vote data memory is not allocated\n");
+		rc = -ENOMEM;
+		goto err_no_mem;
+	}
+
+	dev->bus_vote.data_count = 1;
+	dev->bus_vote.data->power_mode = CVP_POWER_TURBO;
+
+	rc = __load_fw(dev);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to load Iris FW\n");
+		goto err_load_fw;
+	}
+
+	__set_state(dev, IRIS_STATE_INIT);
+	dev->reg_dumped = false;
+
+	dprintk(CVP_CORE, "Dev_Virt: %pa, Reg_Virt: %pK\n",
+		&dev->cvp_hal_data->firmware_base,
+		dev->cvp_hal_data->register_base);
+
+
+	rc = __interface_queues_init(dev);
+	if (rc) {
+		dprintk(CVP_ERR, "failed to init queues\n");
+		rc = -ENOMEM;
+		goto err_core_init;
+	}
+
+	rc = __boot_firmware(dev);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to start core\n");
+		rc = -ENODEV;
+		goto err_core_init;
+	}
+
+	dev->version = __read_register(dev, CVP_VERSION_INFO);
+
+	rc =  call_hfi_pkt_op(dev, sys_init, &pkt, 0);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to create sys init pkt\n");
+		goto err_core_init;
+	}
+
+	if (__iface_cmdq_write(dev, &pkt)) {
+		rc = -ENOTEMPTY;
+		goto err_core_init;
+	}
+
+	rc = call_hfi_pkt_op(dev, sys_image_version, &version_pkt);
+	if (rc || __iface_cmdq_write(dev, &version_pkt))
+		dprintk(CVP_WARN, "Failed to send image version pkt to f/w\n");
+
+	__sys_set_debug(device, msm_cvp_fw_debug);
+
+	__enable_subcaches(device);
+	__set_subcaches(device);
+
+	__set_ubwc_config(device);
+	__sys_set_idle_indicator(device, true);
+
+	if (dev->res->pm_qos_latency_us)
+		pm_qos_add_request(&dev->qos, PM_QOS_CPU_DMA_LATENCY,
+				dev->res->pm_qos_latency_us);
+
+	mutex_unlock(&dev->lock);
+
+	cvp_dsp_send_hfi_queue();
+
+	dprintk(CVP_CORE, "Core inited successfully\n");
+
+	return 0;
+err_core_init:
+	__set_state(dev, IRIS_STATE_DEINIT);
+	__unload_fw(dev);
+err_load_fw:
+err_no_mem:
+	dprintk(CVP_ERR, "Core init failed\n");
+	mutex_unlock(&dev->lock);
+	return rc;
+}
+
+static int iris_hfi_core_release(void *dev)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = dev;
+	struct cvp_hal_session *session, *next;
+
+	if (!device) {
+		dprintk(CVP_ERR, "invalid device\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&device->lock);
+	dprintk(CVP_WARN, "Core releasing\n");
+	if (device->res->pm_qos_latency_us &&
+		pm_qos_request_active(&device->qos))
+		pm_qos_remove_request(&device->qos);
+
+	__resume(device);
+	__set_state(device, IRIS_STATE_DEINIT);
+
+	__dsp_shutdown(device, 0);
+
+	__unload_fw(device);
+
+	/* unlink all sessions from device */
+	list_for_each_entry_safe(session, next, &device->sess_head, list) {
+		list_del(&session->list);
+		session->device = NULL;
+	}
+
+	dprintk(CVP_CORE, "Core released successfully\n");
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static void __core_clear_interrupt(struct iris_hfi_device *device)
+{
+	u32 intr_status = 0, mask = 0;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	intr_status = __read_register(device, CVP_WRAPPER_INTR_STATUS);
+	mask = (CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK | CVP_FATAL_INTR_BMSK);
+
+	if (intr_status & mask) {
+		device->intr_status |= intr_status;
+		device->reg_count++;
+		dprintk(CVP_CORE,
+			"INTERRUPT for device: %pK: times: %d status: %d\n",
+			device, device->reg_count, intr_status);
+	} else {
+		device->spur_count++;
+	}
+
+	__write_register(device, CVP_CPU_CS_A2HSOFTINTCLR, 1);
+}
+
+static int iris_hfi_core_trigger_ssr(void *device,
+		enum hal_ssr_trigger_type type)
+{
+	struct cvp_hfi_cmd_sys_test_ssr_packet pkt;
+	int rc = 0;
+	struct iris_hfi_device *dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "invalid device\n");
+		return -ENODEV;
+	}
+
+	dev = device;
+	if (mutex_trylock(&dev->lock)) {
+		rc = call_hfi_pkt_op(dev, ssr_cmd, type, &pkt);
+		if (rc) {
+			dprintk(CVP_ERR, "%s: failed to create packet\n",
+					__func__);
+			goto err_create_pkt;
+		}
+
+		if (__iface_cmdq_write(dev, &pkt))
+			rc = -ENOTEMPTY;
+	} else {
+		return -EAGAIN;
+	}
+
+err_create_pkt:
+	mutex_unlock(&dev->lock);
+	return rc;
+}
+
+static void __set_default_sys_properties(struct iris_hfi_device *device)
+{
+	if (__sys_set_debug(device, msm_cvp_fw_debug))
+		dprintk(CVP_WARN, "Setting fw_debug msg ON failed\n");
+	if (__sys_set_power_control(device, msm_cvp_fw_low_power_mode))
+		dprintk(CVP_WARN, "Setting h/w power collapse ON failed\n");
+}
+
+static void __session_clean(struct cvp_hal_session *session)
+{
+	struct cvp_hal_session *temp, *next;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_WARN, "%s: invalid params\n", __func__);
+		return;
+	}
+	device = session->device;
+	dprintk(CVP_SESS, "deleted the session: %pK\n", session);
+	/*
+	 * session might have been removed from the device list in
+	 * core_release, so check and remove if it is in the list
+	 */
+	list_for_each_entry_safe(temp, next, &device->sess_head, list) {
+		if (session == temp) {
+			list_del(&session->list);
+			break;
+		}
+	}
+	/* Poison the session handle with zeros */
+	*session = (struct cvp_hal_session){ {0} };
+	kfree(session);
+}
+
+static int iris_hfi_session_clean(void *session)
+{
+	struct cvp_hal_session *sess_close;
+	struct iris_hfi_device *device;
+
+	if (!session) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess_close = session;
+	device = sess_close->device;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid device handle %s\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	__session_clean(sess_close);
+
+	mutex_unlock(&device->lock);
+	return 0;
+}
+
+static int iris_hfi_session_init(void *device, void *session_id,
+		void **new_session)
+{
+	struct cvp_hfi_cmd_sys_session_init_packet pkt;
+	struct iris_hfi_device *dev;
+	struct cvp_hal_session *s;
+
+	if (!device || !new_session) {
+		dprintk(CVP_ERR, "%s - invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	dev = device;
+	mutex_lock(&dev->lock);
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s) {
+		dprintk(CVP_ERR, "new session fail: Out of memory\n");
+		goto err_session_init_fail;
+	}
+
+	s->session_id = session_id;
+	s->device = dev;
+	dprintk(CVP_SESS,
+		"%s: inst %pK, session %pK\n", __func__, session_id, s);
+
+	list_add_tail(&s->list, &dev->sess_head);
+
+	__set_default_sys_properties(device);
+
+	if (call_hfi_pkt_op(dev, session_init, &pkt, s)) {
+		dprintk(CVP_ERR, "session_init: failed to create packet\n");
+		goto err_session_init_fail;
+	}
+
+	*new_session = s;
+	if (__iface_cmdq_write(dev, &pkt))
+		goto err_session_init_fail;
+
+	mutex_unlock(&dev->lock);
+	return 0;
+
+err_session_init_fail:
+	if (s)
+		__session_clean(s);
+	*new_session = NULL;
+	mutex_unlock(&dev->lock);
+	return -EINVAL;
+}
+
+static int __send_session_cmd(struct cvp_hal_session *session, int pkt_type)
+{
+	struct cvp_hal_session_cmd_pkt pkt;
+	int rc = 0;
+	struct iris_hfi_device *device = session->device;
+
+	if (!__is_session_valid(device, session, __func__))
+		return -ECONNRESET;
+
+	rc = call_hfi_pkt_op(device, session_cmd,
+			&pkt, pkt_type, session);
+	if (rc == -EPERM)
+		return 0;
+
+	if (rc) {
+		dprintk(CVP_ERR, "send session cmd: create pkt failed\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+static int iris_hfi_session_end(void *session)
+{
+	struct cvp_hal_session *sess;
+	struct iris_hfi_device *device;
+	int rc = 0;
+
+	if (!session) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess = session;
+	device = sess->device;
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid session %s\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	if (msm_cvp_fw_coverage) {
+		if (__sys_set_coverage(sess->device, msm_cvp_fw_coverage))
+			dprintk(CVP_WARN, "Fw_coverage msg ON failed\n");
+	}
+
+	rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_END);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int iris_hfi_session_abort(void *sess)
+{
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+	int rc = 0;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	device = session->device;
+
+	mutex_lock(&device->lock);
+
+	rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_ABORT);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int iris_hfi_session_set_buffers(void *sess, u32 iova, u32 size)
+{
+	struct cvp_hfi_cmd_session_set_buffers_packet pkt;
+	int rc = 0;
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device || !iova || !size) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (!__is_session_valid(device, session, __func__)) {
+		rc = -ECONNRESET;
+		goto err_create_pkt;
+	}
+
+	rc = call_hfi_pkt_op(device, session_set_buffers,
+			&pkt, session, iova, size);
+	if (rc) {
+		dprintk(CVP_ERR, "set buffers: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int iris_hfi_session_release_buffers(void *sess)
+{
+	struct cvp_session_release_buffers_packet pkt;
+	int rc = 0;
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (!__is_session_valid(device, session, __func__)) {
+		rc = -ECONNRESET;
+		goto err_create_pkt;
+	}
+
+	rc = call_hfi_pkt_op(device, session_release_buffers, &pkt, session);
+	if (rc) {
+		dprintk(CVP_ERR, "release buffers: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int iris_hfi_session_send(void *sess,
+		struct cvp_kmd_hfi_packet *in_pkt)
+{
+	int rc = 0;
+	struct cvp_kmd_hfi_packet pkt;
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "invalid session");
+		return -ENODEV;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (!__is_session_valid(device, session, __func__)) {
+		rc = -ECONNRESET;
+		goto err_send_pkt;
+	}
+	rc = call_hfi_pkt_op(device, session_send,
+			&pkt, session, in_pkt);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"failed to create pkt\n");
+		goto err_send_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_send_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+
+	return rc;
+}
+
+static int iris_hfi_session_flush(void *sess)
+{
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+	int rc = 0;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	device = session->device;
+
+	mutex_lock(&device->lock);
+
+	rc = __send_session_cmd(session, HFI_CMD_SESSION_CVP_FLUSH);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int __check_core_registered(struct iris_hfi_device *device,
+		phys_addr_t fw_addr, u8 *reg_addr, u32 reg_size,
+		phys_addr_t irq)
+{
+	struct cvp_hal_data *cvp_hal_data;
+
+	if (!device) {
+		dprintk(CVP_INFO, "no device Registered\n");
+		return -EINVAL;
+	}
+
+	cvp_hal_data = device->cvp_hal_data;
+	if (!cvp_hal_data)
+		return -EINVAL;
+
+	if (cvp_hal_data->irq == irq &&
+		(CONTAINS(cvp_hal_data->firmware_base,
+				FIRMWARE_SIZE, fw_addr) ||
+		CONTAINS(fw_addr, FIRMWARE_SIZE,
+				cvp_hal_data->firmware_base) ||
+		CONTAINS(cvp_hal_data->register_base,
+				reg_size, reg_addr) ||
+		CONTAINS(reg_addr, reg_size,
+				cvp_hal_data->register_base) ||
+		OVERLAPS(cvp_hal_data->register_base,
+				reg_size, reg_addr, reg_size) ||
+		OVERLAPS(reg_addr, reg_size,
+				cvp_hal_data->register_base,
+				reg_size) ||
+		OVERLAPS(cvp_hal_data->firmware_base,
+				FIRMWARE_SIZE, fw_addr,
+				FIRMWARE_SIZE) ||
+		OVERLAPS(fw_addr, FIRMWARE_SIZE,
+				cvp_hal_data->firmware_base,
+				FIRMWARE_SIZE))) {
+		return 0;
+	}
+
+	dprintk(CVP_INFO, "Device not registered\n");
+	return -EINVAL;
+}
+
+static void __process_fatal_error(
+		struct iris_hfi_device *device)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	cmd_done.device_id = device->device_id;
+	device->callback(HAL_SYS_ERROR, &cmd_done);
+}
+
+static int __prepare_pc(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_pc_prep_packet pkt;
+
+	rc = call_hfi_pkt_op(device, sys_pc_prep, &pkt);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to create sys pc prep pkt\n");
+		goto err_pc_prep;
+	}
+
+	if (__iface_cmdq_write(device, &pkt))
+		rc = -ENOTEMPTY;
+	if (rc)
+		dprintk(CVP_ERR, "Failed to prepare iris for power off");
+err_pc_prep:
+	return rc;
+}
+
+static void iris_hfi_pm_handler(struct work_struct *work)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *device;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	if (core)
+		device = core->device->hfi_device_data;
+	else
+		return;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	dprintk(CVP_PWR,
+		"Entering %s\n", __func__);
+	/*
+	 * It is ok to check this variable outside the lock since
+	 * it is being updated in this context only
+	 */
+	if (device->skip_pc_count >= CVP_MAX_PC_SKIP_COUNT) {
+		dprintk(CVP_WARN, "Failed to PC for %d times\n",
+				device->skip_pc_count);
+		device->skip_pc_count = 0;
+		__process_fatal_error(device);
+		return;
+	}
+
+	mutex_lock(&device->lock);
+	if (gfa_cv.state == DSP_SUSPEND)
+		rc = __power_collapse(device, true);
+	else
+		rc = __power_collapse(device, false);
+	mutex_unlock(&device->lock);
+	switch (rc) {
+	case 0:
+		device->skip_pc_count = 0;
+		/* Cancel pending delayed works if any */
+		cancel_delayed_work(&iris_hfi_pm_work);
+		dprintk(CVP_PWR, "%s: power collapse successful!\n",
+			__func__);
+		break;
+	case -EBUSY:
+		device->skip_pc_count = 0;
+		dprintk(CVP_PWR, "%s: retry PC as cvp is busy\n", __func__);
+		queue_delayed_work(device->iris_pm_workq,
+			&iris_hfi_pm_work, msecs_to_jiffies(
+			device->res->msm_cvp_pwr_collapse_delay));
+		break;
+	case -EAGAIN:
+		device->skip_pc_count++;
+		dprintk(CVP_WARN, "%s: retry power collapse (count %d)\n",
+			__func__, device->skip_pc_count);
+		queue_delayed_work(device->iris_pm_workq,
+			&iris_hfi_pm_work, msecs_to_jiffies(
+			device->res->msm_cvp_pwr_collapse_delay));
+		break;
+	default:
+		dprintk(CVP_ERR, "%s: power collapse failed\n", __func__);
+		break;
+	}
+}
+
+static int __power_collapse(struct iris_hfi_device *device, bool force)
+{
+	int rc = 0;
+	u32 wfi_status = 0, idle_status = 0, pc_ready = 0;
+	u32 flags = 0;
+	int count = 0;
+	const int max_tries = 150;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (!device->power_enabled) {
+		dprintk(CVP_PWR, "%s: Power already disabled\n",
+				__func__);
+		goto exit;
+	}
+
+	rc = __core_in_valid_state(device);
+	if (!rc) {
+		dprintk(CVP_WARN,
+				"Core is in bad state, Skipping power collapse\n");
+		return -EINVAL;
+	}
+
+	rc = __dsp_suspend(device, force, flags);
+	if (rc == -EBUSY)
+		goto exit;
+	else if (rc)
+		goto skip_power_off;
+
+	pc_ready = __read_register(device, CVP_CTRL_STATUS) &
+		CVP_CTRL_STATUS_PC_READY;
+	if (!pc_ready) {
+		wfi_status = __read_register(device,
+				CVP_WRAPPER_CPU_STATUS);
+		idle_status = __read_register(device,
+				CVP_CTRL_STATUS);
+		if (!(wfi_status & BIT(0))) {
+			dprintk(CVP_WARN,
+				"Skipping PC as wfi_status (%#x) bit not set\n",
+				wfi_status);
+			goto skip_power_off;
+		}
+		if (!(idle_status & BIT(30))) {
+			dprintk(CVP_WARN,
+				"Skipping PC as idle_status (%#x) bit not set\n",
+				idle_status);
+			goto skip_power_off;
+		}
+
+		rc = __prepare_pc(device);
+		if (rc) {
+			dprintk(CVP_WARN, "Failed PC %d\n", rc);
+			goto skip_power_off;
+		}
+
+		while (count < max_tries) {
+			wfi_status = __read_register(device,
+					CVP_WRAPPER_CPU_STATUS);
+			pc_ready = __read_register(device,
+					CVP_CTRL_STATUS);
+			if ((wfi_status & BIT(0)) && (pc_ready &
+				CVP_CTRL_STATUS_PC_READY))
+				break;
+			usleep_range(150, 250);
+			count++;
+		}
+
+		if (count == max_tries) {
+			dprintk(CVP_ERR,
+					"Skip PC. Core is not in right state (%#x, %#x)\n",
+					wfi_status, pc_ready);
+			goto skip_power_off;
+		}
+	}
+
+	__flush_debug_queue(device, device->raw_packet);
+
+	rc = __suspend(device);
+	if (rc)
+		dprintk(CVP_ERR, "Failed __suspend\n");
+
+exit:
+	return rc;
+
+skip_power_off:
+	dprintk(CVP_PWR, "Skip PC(%#x, %#x, %#x)\n",
+		wfi_status, idle_status, pc_ready);
+	__flush_debug_queue(device, device->raw_packet);
+	return -EAGAIN;
+}
+
+static void __process_sys_error(struct iris_hfi_device *device)
+{
+	struct cvp_hfi_sfr_struct *vsfr = NULL;
+
+	vsfr = (struct cvp_hfi_sfr_struct *)device->sfr.align_virtual_addr;
+	if (vsfr) {
+		void *p = memchr(vsfr->rg_data, '\0', vsfr->bufSize);
+		/*
+		 * SFR isn't guaranteed to be NULL terminated
+		 * since SYS_ERROR indicates that Iris is in the
+		 * process of crashing.
+		 */
+		if (p == NULL)
+			vsfr->rg_data[vsfr->bufSize - 1] = '\0';
+
+		dprintk(CVP_ERR, "SFR Message from FW: %s\n",
+				vsfr->rg_data);
+	}
+}
+
+static void __flush_debug_queue(struct iris_hfi_device *device, u8 *packet)
+{
+	bool local_packet = false;
+	enum cvp_msg_prio log_level = CVP_FW;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return;
+	}
+
+	if (!packet) {
+		packet = kzalloc(CVP_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL);
+		if (!packet) {
+			dprintk(CVP_ERR, "In %s() Fail to allocate mem\n",
+				__func__);
+			return;
+		}
+
+		local_packet = true;
+
+		/*
+		 * Local packek is used when something FATAL occurred.
+		 * It is good to print these logs by default.
+		 */
+
+		log_level = CVP_ERR;
+	}
+
+#define SKIP_INVALID_PKT(pkt_size, payload_size, pkt_hdr_size) ({ \
+		if (pkt_size < pkt_hdr_size || \
+			payload_size < MIN_PAYLOAD_SIZE || \
+			payload_size > \
+			(pkt_size - pkt_hdr_size + sizeof(u8))) { \
+			dprintk(CVP_ERR, \
+				"%s: invalid msg size - %d\n", \
+				__func__, pkt->msg_size); \
+			continue; \
+		} \
+	})
+
+	while (!__iface_dbgq_read(device, packet)) {
+		struct cvp_hfi_packet_header *pkt =
+			(struct cvp_hfi_packet_header *) packet;
+
+		if (pkt->size < sizeof(struct cvp_hfi_packet_header)) {
+			dprintk(CVP_ERR, "Invalid pkt size - %s\n",
+				__func__);
+			continue;
+		}
+
+		if (pkt->packet_type == HFI_MSG_SYS_DEBUG) {
+			struct cvp_hfi_msg_sys_debug_packet *pkt =
+				(struct cvp_hfi_msg_sys_debug_packet *) packet;
+
+			SKIP_INVALID_PKT(pkt->size,
+				pkt->msg_size, sizeof(*pkt));
+
+			/*
+			 * All fw messages starts with new line character. This
+			 * causes dprintk to print this message in two lines
+			 * in the kernel log. Ignoring the first character
+			 * from the message fixes this to print it in a single
+			 * line.
+			 */
+			pkt->rg_msg_data[pkt->msg_size-1] = '\0';
+			dprintk(log_level, "%s", &pkt->rg_msg_data[1]);
+		}
+	}
+#undef SKIP_INVALID_PKT
+
+	if (local_packet)
+		kfree(packet);
+}
+
+static bool __is_session_valid(struct iris_hfi_device *device,
+		struct cvp_hal_session *session, const char *func)
+{
+	struct cvp_hal_session *temp = NULL;
+
+	if (!device || !session)
+		goto invalid;
+
+	list_for_each_entry(temp, &device->sess_head, list)
+		if (session == temp)
+			return true;
+
+invalid:
+	dprintk(CVP_WARN, "%s: device %pK, invalid session %pK\n",
+			func, device, session);
+	return false;
+}
+
+static struct cvp_hal_session *__get_session(struct iris_hfi_device *device,
+		u32 session_id)
+{
+	struct cvp_hal_session *temp = NULL;
+
+	list_for_each_entry(temp, &device->sess_head, list) {
+		if (session_id == hash32_ptr(temp))
+			return temp;
+	}
+
+	return NULL;
+}
+
+#define _INVALID_MSG_ "Unrecognized MSG (%#x) session (%pK), discarding\n"
+#define _INVALID_STATE_ "Ignore responses from %d to %d invalid state\n"
+#define _DEVFREQ_FAIL_ "Failed to add devfreq device bus %s governor %s: %d\n"
+
+static void process_system_msg(struct msm_cvp_cb_info *info,
+		struct iris_hfi_device *device,
+		void *raw_packet)
+{
+	struct cvp_hal_sys_init_done sys_init_done = {0};
+
+	switch (info->response_type) {
+	case HAL_SYS_ERROR:
+		__process_sys_error(device);
+		break;
+	case HAL_SYS_RELEASE_RESOURCE_DONE:
+		dprintk(CVP_CORE, "Received SYS_RELEASE_RESOURCE\n");
+		break;
+	case HAL_SYS_INIT_DONE:
+		dprintk(CVP_CORE, "Received SYS_INIT_DONE\n");
+		sys_init_done.capabilities =
+			device->sys_init_capabilities;
+		cvp_hfi_process_sys_init_done_prop_read(
+			(struct cvp_hfi_msg_sys_init_done_packet *)
+				raw_packet, &sys_init_done);
+		info->response.cmd.data.sys_init_done = sys_init_done;
+		break;
+	default:
+		break;
+	}
+}
+
+
+static void **get_session_id(struct msm_cvp_cb_info *info)
+{
+	void **session_id = NULL;
+
+	/* For session-related packets, validate session */
+	switch (info->response_type) {
+	case HAL_SESSION_INIT_DONE:
+	case HAL_SESSION_END_DONE:
+	case HAL_SESSION_ABORT_DONE:
+	case HAL_SESSION_STOP_DONE:
+	case HAL_SESSION_FLUSH_DONE:
+	case HAL_SESSION_SET_BUFFER_DONE:
+	case HAL_SESSION_SUSPEND_DONE:
+	case HAL_SESSION_RESUME_DONE:
+	case HAL_SESSION_SET_PROP_DONE:
+	case HAL_SESSION_GET_PROP_DONE:
+	case HAL_SESSION_RELEASE_BUFFER_DONE:
+	case HAL_SESSION_REGISTER_BUFFER_DONE:
+	case HAL_SESSION_UNREGISTER_BUFFER_DONE:
+	case HAL_SESSION_DFS_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_CONFIG_CMD_DONE:
+	case HAL_SESSION_TME_CONFIG_CMD_DONE:
+	case HAL_SESSION_ODT_CONFIG_CMD_DONE:
+	case HAL_SESSION_OD_CONFIG_CMD_DONE:
+	case HAL_SESSION_NCC_CONFIG_CMD_DONE:
+	case HAL_SESSION_ICA_CONFIG_CMD_DONE:
+	case HAL_SESSION_HCD_CONFIG_CMD_DONE:
+	case HAL_SESSION_DCM_CONFIG_CMD_DONE:
+	case HAL_SESSION_DC_CONFIG_CMD_DONE:
+	case HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE:
+	case HAL_SESSION_DFS_FRAME_CMD_DONE:
+	case HAL_SESSION_DME_FRAME_CMD_DONE:
+	case HAL_SESSION_ICA_FRAME_CMD_DONE:
+	case HAL_SESSION_FD_FRAME_CMD_DONE:
+	case HAL_SESSION_PERSIST_SET_DONE:
+	case HAL_SESSION_PERSIST_REL_DONE:
+	case HAL_SESSION_FD_CONFIG_CMD_DONE:
+	case HAL_SESSION_MODEL_BUF_CMD_DONE:
+	case HAL_SESSION_PROPERTY_INFO:
+	case HAL_SESSION_EVENT_CHANGE:
+		session_id = &info->response.cmd.session_id;
+		break;
+	case HAL_SESSION_ERROR:
+		session_id = &info->response.data.session_id;
+		break;
+	case HAL_RESPONSE_UNUSED:
+	default:
+		session_id = NULL;
+		break;
+	}
+	return session_id;
+}
+
+static void print_msg_hdr(void *hdr)
+{
+	struct cvp_hfi_msg_session_hdr *new_hdr =
+			(struct cvp_hfi_msg_session_hdr *)hdr;
+	dprintk(CVP_HFI, "HFI MSG received: %x %x %x %x %x %x %x\n",
+			new_hdr->size, new_hdr->packet_type,
+			new_hdr->session_id,
+			new_hdr->client_data.transaction_id,
+			new_hdr->client_data.data1,
+			new_hdr->client_data.data2,
+			new_hdr->error_type);
+}
+
+static int __response_handler(struct iris_hfi_device *device)
+{
+	struct msm_cvp_cb_info *packets;
+	int packet_count = 0;
+	u8 *raw_packet = NULL;
+	bool requeue_pm_work = true;
+
+	if (!device || device->state != IRIS_STATE_INIT)
+		return 0;
+
+	packets = device->response_pkt;
+
+	raw_packet = device->raw_packet;
+
+	if (!raw_packet || !packets) {
+		dprintk(CVP_ERR,
+			"%s: Invalid args : Res packet = %p, Raw packet = %p\n",
+			__func__, packets, raw_packet);
+		return 0;
+	}
+
+	if (device->intr_status & CVP_FATAL_INTR_BMSK) {
+		struct cvp_hfi_sfr_struct *vsfr = (struct cvp_hfi_sfr_struct *)
+			device->sfr.align_virtual_addr;
+		struct msm_cvp_cb_info info = {
+			.response_type = HAL_SYS_WATCHDOG_TIMEOUT,
+			.response.cmd = {
+				.device_id = device->device_id,
+			}
+		};
+
+		if (vsfr)
+			dprintk(CVP_ERR, "SFR Message from FW: %s\n",
+					vsfr->rg_data);
+		if (device->intr_status & CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK)
+			dprintk(CVP_ERR, "Received Xtensa NOC error\n");
+
+		if (device->intr_status & CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK)
+			dprintk(CVP_ERR, "Received CVP core NOC error\n");
+
+		if (device->intr_status & CVP_WRAPPER_INTR_MASK_A2HWD_BMSK)
+			dprintk(CVP_ERR, "Received CVP watchdog timeout\n");
+
+		packets[packet_count++] = info;
+		goto exit;
+	}
+
+	/* Bleed the msg queue dry of packets */
+	while (!__iface_msgq_read(device, raw_packet)) {
+		void **session_id = NULL;
+		struct msm_cvp_cb_info *info = &packets[packet_count++];
+		struct cvp_hfi_msg_session_hdr *hdr =
+			(struct cvp_hfi_msg_session_hdr *)raw_packet;
+		int rc = 0;
+
+		print_msg_hdr(hdr);
+		rc = cvp_hfi_process_msg_packet(device->device_id,
+					raw_packet, info);
+		if (rc) {
+			dprintk(CVP_WARN,
+				"Corrupt/unknown packet found, discarding\n");
+			--packet_count;
+			continue;
+		} else if (info->response_type == HAL_NO_RESP) {
+			--packet_count;
+			continue;
+		}
+
+		/* Process the packet types that we're interested in */
+		process_system_msg(info, device, raw_packet);
+
+		session_id = get_session_id(info);
+		/*
+		 * hfi_process_msg_packet provides a session_id that's a hashed
+		 * value of struct cvp_hal_session, we need to coerce the hashed
+		 * value back to pointer that we can use. Ideally, hfi_process\
+		 * _msg_packet should take care of this, but it doesn't have
+		 * required information for it
+		 */
+		if (session_id) {
+			struct cvp_hal_session *session = NULL;
+
+			if (upper_32_bits((uintptr_t)*session_id) != 0) {
+				dprintk(CVP_ERR,
+					"Upper 32-bits != 0 for sess_id=%pK\n",
+					*session_id);
+			}
+			session = __get_session(device,
+					(u32)(uintptr_t)*session_id);
+			if (!session) {
+				dprintk(CVP_ERR, _INVALID_MSG_,
+						info->response_type,
+						*session_id);
+				--packet_count;
+				continue;
+			}
+
+			*session_id = session->session_id;
+		}
+
+		if (packet_count >= cvp_max_packets) {
+			dprintk(CVP_WARN,
+				"Too many packets in message queue!\n");
+			break;
+		}
+
+		/* do not read packets after sys error packet */
+		if (info->response_type == HAL_SYS_ERROR)
+			break;
+	}
+
+	if (requeue_pm_work && device->res->sw_power_collapsible) {
+		cancel_delayed_work(&iris_hfi_pm_work);
+		if (!queue_delayed_work(device->iris_pm_workq,
+			&iris_hfi_pm_work,
+			msecs_to_jiffies(
+				device->res->msm_cvp_pwr_collapse_delay))) {
+			dprintk(CVP_ERR, "PM work already scheduled\n");
+		}
+	}
+
+exit:
+	__flush_debug_queue(device, raw_packet);
+
+	return packet_count;
+}
+
+static void iris_hfi_core_work_handler(struct work_struct *work)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *device;
+	int num_responses = 0, i = 0;
+	u32 intr_status;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	if (core)
+		device = core->device->hfi_device_data;
+	else
+		return;
+
+	mutex_lock(&device->lock);
+
+
+	if (!__core_in_valid_state(device)) {
+		dprintk(CVP_WARN, "%s - Core not in init state\n", __func__);
+		goto err_no_work;
+	}
+
+	if (!device->callback) {
+		dprintk(CVP_ERR, "No interrupt callback function: %pK\n",
+				device);
+		goto err_no_work;
+	}
+
+	if (__resume(device)) {
+		dprintk(CVP_ERR, "%s: Power enable failed\n", __func__);
+		goto err_no_work;
+	}
+
+	__core_clear_interrupt(device);
+	num_responses = __response_handler(device);
+	dprintk(CVP_HFI, "%s:: cvp_driver_debug num_responses = %d ",
+		__func__, num_responses);
+
+err_no_work:
+
+	/* Keep the interrupt status before releasing device lock */
+	intr_status = device->intr_status;
+	mutex_unlock(&device->lock);
+
+	/*
+	 * Issue the callbacks outside of the locked contex to preserve
+	 * re-entrancy.
+	 */
+
+	for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) &&
+		i < num_responses; ++i) {
+		struct msm_cvp_cb_info *r = &device->response_pkt[i];
+		void *rsp = (void *)&r->response;
+
+		if (!__core_in_valid_state(device)) {
+			dprintk(CVP_ERR,
+				_INVALID_STATE_, (i + 1), num_responses);
+			break;
+		}
+		dprintk(CVP_HFI, "Processing response %d of %d, type %d\n",
+			(i + 1), num_responses, r->response_type);
+		device->callback(r->response_type, rsp);
+	}
+
+	/* We need re-enable the irq which was disabled in ISR handler */
+	if (!(intr_status & CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+		enable_irq(device->cvp_hal_data->irq);
+
+	/*
+	 * XXX: Don't add any code beyond here.  Reacquiring locks after release
+	 * it above doesn't guarantee the atomicity that we're aiming for.
+	 */
+}
+
+static DECLARE_WORK(iris_hfi_work, iris_hfi_core_work_handler);
+
+static irqreturn_t iris_hfi_isr(int irq, void *dev)
+{
+	struct iris_hfi_device *device = dev;
+
+	disable_irq_nosync(irq);
+	queue_work(device->cvp_workq, &iris_hfi_work);
+	return IRQ_HANDLED;
+}
+
+static int __init_regs_and_interrupts(struct iris_hfi_device *device,
+		struct msm_cvp_platform_resources *res)
+{
+	struct cvp_hal_data *hal = NULL;
+	int rc = 0;
+
+	rc = __check_core_registered(device, res->firmware_base,
+			(u8 *)(uintptr_t)res->register_base,
+			res->register_size, res->irq);
+	if (!rc) {
+		dprintk(CVP_ERR, "Core present/Already added\n");
+		rc = -EEXIST;
+		goto err_core_init;
+	}
+
+	hal = kzalloc(sizeof(*hal), GFP_KERNEL);
+	if (!hal) {
+		dprintk(CVP_ERR, "Failed to alloc\n");
+		rc = -ENOMEM;
+		goto err_core_init;
+	}
+
+	hal->irq = res->irq;
+	hal->firmware_base = res->firmware_base;
+	hal->register_base = devm_ioremap_nocache(&res->pdev->dev,
+			res->register_base, res->register_size);
+	hal->register_size = res->register_size;
+	if (!hal->register_base) {
+		dprintk(CVP_ERR,
+			"could not map reg addr %pa of size %d\n",
+			&res->register_base, res->register_size);
+		goto error_irq_fail;
+	}
+
+	device->cvp_hal_data = hal;
+	rc = request_irq(res->irq, iris_hfi_isr, IRQF_TRIGGER_HIGH,
+			"msm_cvp", device);
+	if (unlikely(rc)) {
+		dprintk(CVP_ERR, "() :request_irq failed\n");
+		goto error_irq_fail;
+	}
+
+	disable_irq_nosync(res->irq);
+	dprintk(CVP_INFO,
+		"firmware_base = %pa, register_base = %pa, register_size = %d\n",
+		&res->firmware_base, &res->register_base,
+		res->register_size);
+	return rc;
+
+error_irq_fail:
+	kfree(hal);
+err_core_init:
+	return rc;
+
+}
+
+static inline void __deinit_clocks(struct iris_hfi_device *device)
+{
+	struct clock_info *cl;
+
+	device->clk_freq = 0;
+	iris_hfi_for_each_clock_reverse(device, cl) {
+		if (cl->clk) {
+			clk_put(cl->clk);
+			cl->clk = NULL;
+		}
+	}
+}
+
+static inline int __init_clocks(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct clock_info *cl = NULL;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	iris_hfi_for_each_clock(device, cl) {
+
+		dprintk(CVP_PWR, "%s: scalable? %d, count %d\n",
+				cl->name, cl->has_scaling, cl->count);
+	}
+
+	iris_hfi_for_each_clock(device, cl) {
+		if (!cl->clk) {
+			cl->clk = clk_get(&device->res->pdev->dev, cl->name);
+			if (IS_ERR_OR_NULL(cl->clk)) {
+				dprintk(CVP_ERR,
+					"Failed to get clock: %s\n", cl->name);
+				rc = PTR_ERR(cl->clk) ?: -EINVAL;
+				cl->clk = NULL;
+				goto err_clk_get;
+			}
+		}
+	}
+	device->clk_freq = 0;
+	return 0;
+
+err_clk_get:
+	__deinit_clocks(device);
+	return rc;
+}
+
+static int __handle_reset_clk(struct msm_cvp_platform_resources *res,
+			int reset_index, enum reset_state state,
+			enum power_state pwr_state)
+{
+	int rc = 0;
+	struct reset_control *rst;
+	struct reset_info rst_info;
+	struct reset_set *rst_set = &res->reset_set;
+
+	if (!rst_set->reset_tbl)
+		return 0;
+
+	rst_info = rst_set->reset_tbl[reset_index];
+	rst = rst_info.rst;
+	dprintk(CVP_PWR, "reset_clk: name %s reset_state %d rst %pK ps=%d\n",
+		rst_set->reset_tbl[reset_index].name, state, rst, pwr_state);
+
+	switch (state) {
+	case INIT:
+		if (rst)
+			goto skip_reset_init;
+
+		rst = devm_reset_control_get(&res->pdev->dev,
+				rst_set->reset_tbl[reset_index].name);
+		if (IS_ERR(rst))
+			rc = PTR_ERR(rst);
+
+		rst_set->reset_tbl[reset_index].rst = rst;
+		break;
+	case ASSERT:
+		if (!rst) {
+			rc = PTR_ERR(rst);
+			goto failed_to_reset;
+		}
+
+		if (pwr_state != rst_info.required_state)
+			break;
+
+		rc = reset_control_assert(rst);
+		break;
+	case DEASSERT:
+		if (!rst) {
+			rc = PTR_ERR(rst);
+			goto failed_to_reset;
+		}
+
+		if (pwr_state != rst_info.required_state)
+			break;
+
+		rc = reset_control_deassert(rst);
+		break;
+	default:
+		dprintk(CVP_ERR, "Invalid reset request\n");
+		if (rc)
+			goto failed_to_reset;
+	}
+
+	return 0;
+
+skip_reset_init:
+failed_to_reset:
+	return rc;
+}
+
+static inline void __disable_unprepare_clks(struct iris_hfi_device *device)
+{
+	struct clock_info *cl;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return;
+	}
+
+	iris_hfi_for_each_clock_reverse(device, cl) {
+		dprintk(CVP_PWR, "Clock: %s disable and unprepare\n",
+				cl->name);
+		clk_disable_unprepare(cl->clk);
+	}
+}
+
+static int reset_ahb2axi_bridge(struct iris_hfi_device *device)
+{
+	int rc, i;
+	enum power_state s;
+
+	if (!device) {
+		dprintk(CVP_ERR, "NULL device\n");
+		rc = -EINVAL;
+		goto failed_to_reset;
+	}
+
+	if (device->power_enabled)
+		s = CVP_POWER_ON;
+	else
+		s = CVP_POWER_OFF;
+
+	for (i = 0; i < device->res->reset_set.count; i++) {
+		rc = __handle_reset_clk(device->res, i, ASSERT, s);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"failed to assert reset clocks\n");
+			goto failed_to_reset;
+		}
+
+		/* wait for deassert */
+		usleep_range(400, 450);
+
+		rc = __handle_reset_clk(device->res, i, DEASSERT, s);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"failed to deassert reset clocks\n");
+			goto failed_to_reset;
+		}
+	}
+
+	return 0;
+
+failed_to_reset:
+	return rc;
+}
+
+static inline int __prepare_enable_clks(struct iris_hfi_device *device)
+{
+	struct clock_info *cl = NULL, *cl_fail = NULL;
+	int rc = 0, c = 0;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	iris_hfi_for_each_clock(device, cl) {
+		/*
+		 * For the clocks we control, set the rate prior to preparing
+		 * them.  Since we don't really have a load at this point, scale
+		 * it to the lowest frequency possible
+		 */
+		if (cl->has_scaling)
+			clk_set_rate(cl->clk, clk_round_rate(cl->clk, 0));
+
+		rc = clk_prepare_enable(cl->clk);
+		if (rc) {
+			dprintk(CVP_ERR, "Failed to enable clocks\n");
+			cl_fail = cl;
+			goto fail_clk_enable;
+		}
+
+		c++;
+		dprintk(CVP_PWR, "Clock: %s prepared and enabled\n", cl->name);
+	}
+
+	return rc;
+
+fail_clk_enable:
+	iris_hfi_for_each_clock_reverse_continue(device, cl, c) {
+		dprintk(CVP_ERR, "Clock: %s disable and unprepare\n",
+			cl->name);
+		clk_disable_unprepare(cl->clk);
+	}
+
+	return rc;
+}
+
+static void __deinit_bus(struct iris_hfi_device *device)
+{
+	struct bus_info *bus = NULL;
+
+	if (!device)
+		return;
+
+	kfree(device->bus_vote.data);
+	device->bus_vote = CVP_DEFAULT_BUS_VOTE;
+
+	iris_hfi_for_each_bus_reverse(device, bus) {
+		dev_set_drvdata(bus->dev, NULL);
+		icc_put(bus->client);
+		bus->client = NULL;
+	}
+}
+
+static int __init_bus(struct iris_hfi_device *device)
+{
+	struct bus_info *bus = NULL;
+	int rc = 0;
+
+	if (!device)
+		return -EINVAL;
+
+	iris_hfi_for_each_bus(device, bus) {
+		/*
+		 * This is stupid, but there's no other easy way to ahold
+		 * of struct bus_info in iris_hfi_devfreq_*()
+		 */
+		WARN(dev_get_drvdata(bus->dev), "%s's drvdata already set\n",
+				dev_name(bus->dev));
+		dev_set_drvdata(bus->dev, device);
+		bus->client = icc_get(&device->res->pdev->dev,
+				bus->master, bus->slave);
+		if (IS_ERR_OR_NULL(bus->client)) {
+			rc = PTR_ERR(bus->client) ?: -EBADHANDLE;
+			dprintk(CVP_ERR, "Failed to register bus %s: %d\n",
+					bus->name, rc);
+			bus->client = NULL;
+			goto err_add_dev;
+		}
+	}
+
+	return 0;
+
+err_add_dev:
+	__deinit_bus(device);
+	return rc;
+}
+
+static void __deinit_regulators(struct iris_hfi_device *device)
+{
+	struct regulator_info *rinfo = NULL;
+
+	iris_hfi_for_each_regulator_reverse(device, rinfo) {
+		if (rinfo->regulator) {
+			regulator_put(rinfo->regulator);
+			rinfo->regulator = NULL;
+		}
+	}
+}
+
+static int __init_regulators(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct regulator_info *rinfo = NULL;
+
+	iris_hfi_for_each_regulator(device, rinfo) {
+		rinfo->regulator = regulator_get(&device->res->pdev->dev,
+				rinfo->name);
+		if (IS_ERR_OR_NULL(rinfo->regulator)) {
+			rc = PTR_ERR(rinfo->regulator) ?: -EBADHANDLE;
+			dprintk(CVP_ERR, "Failed to get regulator: %s\n",
+					rinfo->name);
+			rinfo->regulator = NULL;
+			goto err_reg_get;
+		}
+	}
+
+	return 0;
+
+err_reg_get:
+	__deinit_regulators(device);
+	return rc;
+}
+
+static void __deinit_subcaches(struct iris_hfi_device *device)
+{
+	struct subcache_info *sinfo = NULL;
+
+	if (!device) {
+		dprintk(CVP_ERR, "deinit_subcaches: invalid device %pK\n",
+			device);
+		goto exit;
+	}
+
+	if (!is_sys_cache_present(device))
+		goto exit;
+
+	iris_hfi_for_each_subcache_reverse(device, sinfo) {
+		if (sinfo->subcache) {
+			dprintk(CVP_CORE, "deinit_subcaches: %s\n",
+				sinfo->name);
+			llcc_slice_putd(sinfo->subcache);
+			sinfo->subcache = NULL;
+		}
+	}
+
+exit:
+	return;
+}
+
+static int __init_subcaches(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct subcache_info *sinfo = NULL;
+
+	if (!device) {
+		dprintk(CVP_ERR, "init_subcaches: invalid device %pK\n",
+			device);
+		return -EINVAL;
+	}
+
+	if (!is_sys_cache_present(device))
+		return 0;
+
+	iris_hfi_for_each_subcache(device, sinfo) {
+		if (!strcmp("cvp", sinfo->name)) {
+			sinfo->subcache = llcc_slice_getd(LLCC_CVP);
+		} else if (!strcmp("cvpfw", sinfo->name)) {
+			sinfo->subcache = llcc_slice_getd(LLCC_CVPFW);
+		} else {
+			dprintk(CVP_ERR, "Invalid subcache name %s\n",
+					sinfo->name);
+		}
+		if (IS_ERR_OR_NULL(sinfo->subcache)) {
+			rc = PTR_ERR(sinfo->subcache) ?
+				PTR_ERR(sinfo->subcache) : -EBADHANDLE;
+			dprintk(CVP_ERR,
+				 "init_subcaches: invalid subcache: %s rc %d\n",
+				sinfo->name, rc);
+			sinfo->subcache = NULL;
+			goto err_subcache_get;
+		}
+		dprintk(CVP_CORE, "init_subcaches: %s\n",
+			sinfo->name);
+	}
+
+	return 0;
+
+err_subcache_get:
+	__deinit_subcaches(device);
+	return rc;
+}
+
+static int __init_resources(struct iris_hfi_device *device,
+				struct msm_cvp_platform_resources *res)
+{
+	int i, rc = 0;
+
+	rc = __init_regulators(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to get all regulators\n");
+		return -ENODEV;
+	}
+
+	rc = __init_clocks(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init clocks\n");
+		rc = -ENODEV;
+		goto err_init_clocks;
+	}
+
+	for (i = 0; i < device->res->reset_set.count; i++) {
+		rc = __handle_reset_clk(res, i, INIT, 0);
+		if (rc) {
+			dprintk(CVP_ERR, "Failed to init reset clocks\n");
+			rc = -ENODEV;
+			goto err_init_reset_clk;
+		}
+	}
+
+	rc = __init_bus(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init bus: %d\n", rc);
+		goto err_init_bus;
+	}
+
+	rc = __init_subcaches(device);
+	if (rc)
+		dprintk(CVP_WARN, "Failed to init subcaches: %d\n", rc);
+
+	device->sys_init_capabilities =
+		kzalloc(sizeof(struct msm_cvp_capability)
+		* CVP_MAX_SESSIONS, GFP_KERNEL);
+
+	return rc;
+
+err_init_reset_clk:
+err_init_bus:
+	__deinit_clocks(device);
+err_init_clocks:
+	__deinit_regulators(device);
+	return rc;
+}
+
+static void __deinit_resources(struct iris_hfi_device *device)
+{
+	__deinit_subcaches(device);
+	__deinit_bus(device);
+	__deinit_clocks(device);
+	__deinit_regulators(device);
+	kfree(device->sys_init_capabilities);
+	device->sys_init_capabilities = NULL;
+}
+
+static int __protect_cp_mem(struct iris_hfi_device *device)
+{
+	return device ? 0 : -EINVAL;
+}
+
+static int __disable_regulator(struct regulator_info *rinfo,
+				struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	dprintk(CVP_PWR, "Disabling regulator %s\n", rinfo->name);
+
+	/*
+	 * This call is needed. Driver needs to acquire the control back
+	 * from HW in order to disable the regualtor. Else the behavior
+	 * is unknown.
+	 */
+
+	rc = __acquire_regulator(rinfo, device);
+	if (rc) {
+		/*
+		 * This is somewhat fatal, but nothing we can do
+		 * about it. We can't disable the regulator w/o
+		 * getting it back under s/w control
+		 */
+		dprintk(CVP_WARN,
+			"Failed to acquire control on %s\n",
+			rinfo->name);
+
+		goto disable_regulator_failed;
+	}
+
+	rc = regulator_disable(rinfo->regulator);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"Failed to disable %s: %d\n",
+			rinfo->name, rc);
+		goto disable_regulator_failed;
+	}
+
+	return 0;
+disable_regulator_failed:
+
+	/* Bring attention to this issue */
+	msm_cvp_res_handle_fatal_hw_error(device->res, true);
+	return rc;
+}
+
+static int __enable_hw_power_collapse(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!msm_cvp_fw_low_power_mode) {
+		dprintk(CVP_PWR, "Not enabling hardware power collapse\n");
+		return 0;
+	}
+
+	rc = __hand_off_regulators(device);
+	if (rc)
+		dprintk(CVP_WARN,
+			"%s : Failed to enable HW power collapse %d\n",
+				__func__, rc);
+	return rc;
+}
+
+static int __enable_regulators(struct iris_hfi_device *device)
+{
+	int rc = 0, c = 0;
+	struct regulator_info *rinfo;
+
+	dprintk(CVP_PWR, "Enabling regulators\n");
+
+	iris_hfi_for_each_regulator(device, rinfo) {
+		rc = regulator_enable(rinfo->regulator);
+		if (rc) {
+			dprintk(CVP_ERR, "Failed to enable %s: %d\n",
+					rinfo->name, rc);
+			goto err_reg_enable_failed;
+		}
+
+		dprintk(CVP_PWR, "Enabled regulator %s\n", rinfo->name);
+		c++;
+	}
+
+	return 0;
+
+err_reg_enable_failed:
+	iris_hfi_for_each_regulator_reverse_continue(device, rinfo, c)
+		__disable_regulator(rinfo, device);
+
+	return rc;
+}
+
+static int __disable_regulators(struct iris_hfi_device *device)
+{
+	struct regulator_info *rinfo;
+
+	dprintk(CVP_PWR, "Disabling regulators\n");
+
+	iris_hfi_for_each_regulator_reverse(device, rinfo) {
+		__disable_regulator(rinfo, device);
+		if (rinfo->has_hw_power_collapse)
+			regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_NORMAL);
+	}
+
+	return 0;
+}
+
+static int __enable_subcaches(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	u32 c = 0;
+	struct subcache_info *sinfo;
+
+	if (msm_cvp_syscache_disable || !is_sys_cache_present(device))
+		return 0;
+
+	/* Activate subcaches */
+	iris_hfi_for_each_subcache(device, sinfo) {
+		rc = llcc_slice_activate(sinfo->subcache);
+		if (rc) {
+			dprintk(CVP_WARN, "Failed to activate %s: %d\n",
+				sinfo->name, rc);
+			msm_cvp_res_handle_fatal_hw_error(device->res, true);
+			goto err_activate_fail;
+		}
+		sinfo->isactive = true;
+		dprintk(CVP_CORE, "Activated subcache %s\n", sinfo->name);
+		c++;
+	}
+
+	dprintk(CVP_CORE, "Activated %d Subcaches to CVP\n", c);
+
+	return 0;
+
+err_activate_fail:
+	__release_subcaches(device);
+	__disable_subcaches(device);
+	return 0;
+}
+
+static int __set_subcaches(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	u32 c = 0;
+	struct subcache_info *sinfo;
+	u32 resource[CVP_MAX_SUBCACHE_SIZE];
+	struct cvp_hfi_resource_syscache_info_type *sc_res_info;
+	struct cvp_hfi_resource_subcache_type *sc_res;
+	struct cvp_resource_hdr rhdr;
+
+	if (device->res->sys_cache_res_set || msm_cvp_syscache_disable) {
+		dprintk(CVP_CORE, "Subcaches already set or disabled\n");
+		return 0;
+	}
+
+	memset((void *)resource, 0x0, (sizeof(u32) * CVP_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct cvp_hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	iris_hfi_for_each_subcache(device, sinfo) {
+		if (sinfo->isactive) {
+			sc_res[c].size = sinfo->subcache->slice_size;
+			sc_res[c].sc_id = sinfo->subcache->slice_id;
+			c++;
+		}
+	}
+
+	/* Set resource to CVP for activated subcaches */
+	if (c) {
+		dprintk(CVP_CORE, "Setting %d Subcaches\n", c);
+
+		rhdr.resource_handle = sc_res_info; /* cookie */
+		rhdr.resource_id = CVP_RESOURCE_SYSCACHE;
+
+		sc_res_info->num_entries = c;
+
+		rc = __core_set_resource(device, &rhdr, (void *)sc_res_info);
+		if (rc) {
+			dprintk(CVP_WARN, "Failed to set subcaches %d\n", rc);
+			goto err_fail_set_subacaches;
+		}
+
+		iris_hfi_for_each_subcache(device, sinfo) {
+			if (sinfo->isactive)
+				sinfo->isset = true;
+		}
+
+		dprintk(CVP_CORE, "Set Subcaches done to CVP\n");
+		device->res->sys_cache_res_set = true;
+	}
+
+	return 0;
+
+err_fail_set_subacaches:
+	__disable_subcaches(device);
+
+	return 0;
+}
+
+static int __release_subcaches(struct iris_hfi_device *device)
+{
+	struct subcache_info *sinfo;
+	int rc = 0;
+	u32 c = 0;
+	u32 resource[CVP_MAX_SUBCACHE_SIZE];
+	struct cvp_hfi_resource_syscache_info_type *sc_res_info;
+	struct cvp_hfi_resource_subcache_type *sc_res;
+	struct cvp_resource_hdr rhdr;
+
+	if (msm_cvp_syscache_disable || !is_sys_cache_present(device))
+		return 0;
+
+	memset((void *)resource, 0x0, (sizeof(u32) * CVP_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct cvp_hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	/* Release resource command to Iris */
+	iris_hfi_for_each_subcache_reverse(device, sinfo) {
+		if (sinfo->isset) {
+			/* Update the entry */
+			sc_res[c].size = sinfo->subcache->slice_size;
+			sc_res[c].sc_id = sinfo->subcache->slice_id;
+			c++;
+			sinfo->isset = false;
+		}
+	}
+
+	if (c > 0) {
+		dprintk(CVP_CORE, "Releasing %d subcaches\n", c);
+		rhdr.resource_handle = sc_res_info; /* cookie */
+		rhdr.resource_id = CVP_RESOURCE_SYSCACHE;
+
+		rc = __core_release_resource(device, &rhdr);
+		if (rc)
+			dprintk(CVP_WARN,
+				"Failed to release %d subcaches\n", c);
+	}
+
+	device->res->sys_cache_res_set = false;
+
+	return 0;
+}
+
+static int __disable_subcaches(struct iris_hfi_device *device)
+{
+	struct subcache_info *sinfo;
+	int rc = 0;
+
+	if (msm_cvp_syscache_disable || !is_sys_cache_present(device))
+		return 0;
+
+	/* De-activate subcaches */
+	iris_hfi_for_each_subcache_reverse(device, sinfo) {
+		if (sinfo->isactive) {
+			dprintk(CVP_CORE, "De-activate subcache %s\n",
+				sinfo->name);
+			rc = llcc_slice_deactivate(sinfo->subcache);
+			if (rc) {
+				dprintk(CVP_WARN,
+					"Failed to de-activate %s: %d\n",
+					sinfo->name, rc);
+			}
+			sinfo->isactive = false;
+		}
+	}
+
+	return 0;
+}
+
+static void interrupt_init_iris2(struct iris_hfi_device *device)
+{
+	u32 mask_val = 0;
+
+	/* All interrupts should be disabled initially 0x1F6 : Reset value */
+	mask_val = __read_register(device, CVP_WRAPPER_INTR_MASK);
+
+	/* Write 0 to unmask CPU and WD interrupts */
+	mask_val &= ~(CVP_FATAL_INTR_BMSK | CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK);
+	__write_register(device, CVP_WRAPPER_INTR_MASK, mask_val);
+	dprintk(CVP_REG, "Init irq: reg: %x, mask value %x\n",
+		CVP_WRAPPER_INTR_MASK, mask_val);
+}
+
+static void setup_dsp_uc_memmap_vpu5(struct iris_hfi_device *device)
+{
+	/* initialize DSP QTBL & UCREGION with CPU queues */
+	__write_register(device, HFI_DSP_QTBL_ADDR,
+		(u32)device->dsp_iface_q_table.align_device_addr);
+	__write_register(device, HFI_DSP_UC_REGION_ADDR,
+		(u32)device->dsp_iface_q_table.align_device_addr);
+	__write_register(device, HFI_DSP_UC_REGION_SIZE,
+		device->dsp_iface_q_table.mem_data.size);
+}
+
+static void clock_config_on_enable_vpu5(struct iris_hfi_device *device)
+{
+		__write_register(device, CVP_WRAPPER_CPU_CLOCK_CONFIG, 0);
+}
+
+static int __set_ubwc_config(struct iris_hfi_device *device)
+{
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	if (!device->res->ubwc_config)
+		return 0;
+
+	rc = call_hfi_pkt_op(device, sys_ubwc_config, pkt,
+		device->res->ubwc_config);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"ubwc config setting to FW failed\n");
+		rc = -ENOTEMPTY;
+		goto fail_to_set_ubwc_config;
+	}
+
+	if (__iface_cmdq_write(device, pkt)) {
+		rc = -ENOTEMPTY;
+		goto fail_to_set_ubwc_config;
+	}
+
+fail_to_set_ubwc_config:
+	return rc;
+}
+
+static int __iris_power_on(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+
+	if (device->power_enabled)
+		return 0;
+
+	/* Vote for all hardware resources */
+	rc = __vote_buses(device, device->bus_vote.data,
+			device->bus_vote.data_count);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to vote buses, err: %d\n", rc);
+		goto fail_vote_buses;
+	}
+
+	rc = __enable_regulators(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable GDSC, err = %d\n", rc);
+		goto fail_enable_gdsc;
+	}
+
+	rc = call_iris_op(device, reset_ahb2axi_bridge, device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to reset ahb2axi: %d\n", rc);
+		goto fail_enable_clks;
+	}
+
+	rc = __prepare_enable_clks(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable clocks: %d\n", rc);
+		goto fail_enable_clks;
+	}
+
+	rc = __scale_clocks(device);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"Failed to scale clocks, perf may regress\n");
+		rc = 0;
+	}
+
+	/*Do not access registers before this point!*/
+	device->power_enabled = true;
+
+	dprintk(CVP_PWR, "Done with scaling\n");
+	/*
+	 * Re-program all of the registers that get reset as a result of
+	 * regulator_disable() and _enable()
+	 */
+	__set_registers(device);
+
+	dprintk(CVP_CORE, "Done with register set\n");
+	call_iris_op(device, interrupt_init, device);
+	dprintk(CVP_CORE, "Done with interrupt enabling\n");
+	device->intr_status = 0;
+	enable_irq(device->cvp_hal_data->irq);
+
+	/*
+	 * Hand off control of regulators to h/w _after_ enabling clocks.
+	 * Note that the GDSC will turn off when switching from normal
+	 * (s/w triggered) to fast (HW triggered) unless the h/w vote is
+	 * present. Since Iris isn't up yet, the GDSC will be off briefly.
+	 */
+	if (__enable_hw_power_collapse(device))
+		dprintk(CVP_ERR, "Failed to enabled inter-frame PC\n");
+
+	return rc;
+
+fail_enable_clks:
+	__disable_regulators(device);
+fail_enable_gdsc:
+	__unvote_buses(device);
+fail_vote_buses:
+	device->power_enabled = false;
+	return rc;
+}
+
+void power_off_common(struct iris_hfi_device *device)
+{
+	if (!device->power_enabled)
+		return;
+
+	if (!(device->intr_status & CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+		disable_irq_nosync(device->cvp_hal_data->irq);
+	device->intr_status = 0;
+
+	__disable_unprepare_clks(device);
+	if (__disable_regulators(device))
+		dprintk(CVP_WARN, "Failed to disable regulators\n");
+
+	if (__unvote_buses(device))
+		dprintk(CVP_WARN, "Failed to unvote for buses\n");
+	device->power_enabled = false;
+}
+
+static inline int __suspend(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	} else if (!device->power_enabled) {
+		dprintk(CVP_PWR, "Power already disabled\n");
+		return 0;
+	}
+
+	dprintk(CVP_PWR, "Entering suspend\n");
+
+	if (device->res->pm_qos_latency_us &&
+		pm_qos_request_active(&device->qos))
+		pm_qos_remove_request(&device->qos);
+
+	rc = __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_SUSPEND);
+	if (rc) {
+		dprintk(CVP_WARN, "Failed to suspend cvp core %d\n", rc);
+		goto err_tzbsp_suspend;
+	}
+
+	__disable_subcaches(device);
+
+	call_iris_op(device, power_off, device);
+	dprintk(CVP_PWR, "Iris power off\n");
+	return rc;
+
+err_tzbsp_suspend:
+	return rc;
+}
+
+static void power_off_iris2(struct iris_hfi_device *device)
+{
+	u32 lpi_status, reg_status = 0, count = 0, max_count = 1000;
+	u32 pc_ready, wfi_status, sbm_ln0_low;
+	u32 main_sbm_ln0_low, main_sbm_ln1_high;
+
+	if (!device->power_enabled || !device->res->sw_power_collapsible)
+		return;
+
+	if (!(device->intr_status & CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+		disable_irq_nosync(device->cvp_hal_data->irq);
+	device->intr_status = 0;
+
+	/* HPG 6.1.2 Step 1  */
+	__write_register(device, CVP_CPU_CS_X2RPMh, 0x3);
+
+	/* HPG 6.1.2 Step 2, noc to low power */
+	__write_register(device, CVP_AON_WRAPPER_MVP_NOC_LPI_CONTROL, 0x1);
+	while (!reg_status && count < max_count) {
+		lpi_status =
+			 __read_register(device,
+				CVP_AON_WRAPPER_MVP_NOC_LPI_STATUS);
+		reg_status = lpi_status & BIT(0);
+		/* Wait for noc lpi status to be set */
+		usleep_range(50, 100);
+		count++;
+	}
+	dprintk(CVP_PWR,
+		"Noc: lpi_status %x noc_status %x (count %d)\n",
+		lpi_status, reg_status, count);
+	if (count == max_count) {
+		wfi_status = __read_register(device, CVP_WRAPPER_CPU_STATUS);
+		pc_ready = __read_register(device, CVP_CTRL_STATUS);
+		sbm_ln0_low =
+			__read_register(device, CVP_NOC_SBM_SENSELN0_LOW);
+		main_sbm_ln0_low = __read_register(device,
+				CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_LOW);
+		main_sbm_ln1_high = __read_register(device,
+				CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN1_HIGH);
+		dprintk(CVP_WARN,
+			"NOC not in qaccept status %x %x %x %x %x %x %x\n",
+			reg_status, lpi_status, wfi_status, pc_ready,
+			sbm_ln0_low, main_sbm_ln0_low, main_sbm_ln1_high);
+	}
+
+	/* HPG 6.1.2 Step 3, debug bridge to low power */
+	__write_register(device,
+		CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x7);
+
+	reg_status = 0;
+	count = 0;
+	while ((reg_status != 0x7) && count < max_count) {
+		lpi_status = __read_register(device,
+			CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+		reg_status = lpi_status & 0x7;
+		/* Wait for debug bridge lpi status to be set */
+		usleep_range(50, 100);
+		count++;
+	}
+	dprintk(CVP_PWR,
+		"DBLP Set : lpi_status %d reg_status %d (count %d)\n",
+		lpi_status, reg_status, count);
+	if (count == max_count) {
+		dprintk(CVP_WARN,
+			"DBLP Set: status %x %x\n", reg_status, lpi_status);
+	}
+
+	/* HPG 6.1.2 Step 4, debug bridge to lpi release */
+	__write_register(device,
+		CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x0);
+	lpi_status = 0x1;
+	count = 0;
+	while (lpi_status && count < max_count) {
+		lpi_status = __read_register(device,
+				 CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+		usleep_range(50, 100);
+		count++;
+	}
+	dprintk(CVP_PWR,
+		"DBLP Release: lpi_status %d(count %d)\n",
+		lpi_status, count);
+	if (count == max_count) {
+		dprintk(CVP_WARN,
+			"DBLP Release: lpi_status %x\n", lpi_status);
+	}
+
+	/* HPG 6.1.2 Step 6 */
+	__disable_unprepare_clks(device);
+
+	/* HPG 6.1.2 Step 7 & 8 */
+	if (call_iris_op(device, reset_ahb2axi_bridge, device))
+		dprintk(CVP_ERR, "Failed to reset ahb2axi\n");
+
+	if (__unvote_buses(device))
+		dprintk(CVP_WARN, "Failed to unvote for buses\n");
+
+	/* HPG 6.1.2 Step 5 */
+	if (__disable_regulators(device))
+		dprintk(CVP_WARN, "Failed to disable regulators\n");
+
+	/*Do not access registers after this point!*/
+	device->power_enabled = false;
+}
+
+static inline int __resume(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	u32 flags = 0, reg_gdsc, reg_cbcr;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	} else if (device->power_enabled) {
+		goto exit;
+	} else if (!__core_in_valid_state(device)) {
+		dprintk(CVP_PWR, "iris_hfi_device in deinit state.");
+		return -EINVAL;
+	}
+
+	dprintk(CVP_PWR, "Resuming from power collapse\n");
+	rc = __iris_power_on(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to power on cvp\n");
+		goto err_iris_power_on;
+	}
+
+
+
+	reg_gdsc = __read_register(device, CVP_CC_MVS1C_GDSCR);
+	reg_cbcr = __read_register(device, CVP_CC_MVS1C_CBCR);
+	if (!(reg_gdsc & 0x80000000) || (reg_cbcr & 0x80000000))
+		dprintk(CVP_ERR, "CVP power on failed gdsc %x cbcr %x\n",
+			reg_gdsc, reg_cbcr);
+
+	/* Reboot the firmware */
+	rc = __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_RESUME);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to resume cvp core %d\n", rc);
+		goto err_set_cvp_state;
+	}
+
+	__setup_ucregion_memory_map(device);
+	/* Wait for boot completion */
+	rc = __boot_firmware(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to reset cvp core\n");
+		goto err_reset_core;
+	}
+
+	/*
+	 * Work around for H/W bug, need to reprogram these registers once
+	 * firmware is out reset
+	 */
+	__set_threshold_registers(device);
+
+	if (device->res->pm_qos_latency_us)
+		pm_qos_add_request(&device->qos, PM_QOS_CPU_DMA_LATENCY,
+				device->res->pm_qos_latency_us);
+
+	__sys_set_debug(device, msm_cvp_fw_debug);
+
+	__enable_subcaches(device);
+	__set_subcaches(device);
+
+
+	__dsp_resume(device, flags);
+
+	dprintk(CVP_PWR, "Resumed from power collapse\n");
+exit:
+	/* Don't reset skip_pc_count for SYS_PC_PREP cmd */
+	if (device->last_packet_type != HFI_CMD_SYS_PC_PREP)
+		device->skip_pc_count = 0;
+	return rc;
+err_reset_core:
+	__tzbsp_set_cvp_state(TZ_SUBSYS_STATE_SUSPEND);
+err_set_cvp_state:
+	call_iris_op(device, power_off, device);
+err_iris_power_on:
+	dprintk(CVP_ERR, "Failed to resume from power collapse\n");
+	return rc;
+}
+
+static int __load_fw(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	/* Initialize resources */
+	rc = __init_resources(device, device->res);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init resources: %d\n", rc);
+		goto fail_init_res;
+	}
+
+	rc = __initialize_packetization(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to initialize packetization\n");
+		goto fail_init_pkt;
+	}
+
+	rc = __iris_power_on(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to power on iris in in load_fw\n");
+		goto fail_iris_power_on;
+	}
+
+	if ((!device->res->use_non_secure_pil && !device->res->firmware_base)
+			|| device->res->use_non_secure_pil) {
+		if (!device->resources.fw.cookie)
+			device->resources.fw.cookie =
+				subsystem_get_with_fwname("evass",
+				device->res->fw_name);
+
+		if (IS_ERR_OR_NULL(device->resources.fw.cookie)) {
+			dprintk(CVP_ERR, "Failed to download firmware\n");
+			device->resources.fw.cookie = NULL;
+			rc = -ENOMEM;
+			goto fail_load_fw;
+		}
+	}
+
+	if (!device->res->firmware_base) {
+		rc = __protect_cp_mem(device);
+		if (rc) {
+			dprintk(CVP_ERR, "Failed to protect memory\n");
+			goto fail_protect_mem;
+		}
+	}
+	return rc;
+fail_protect_mem:
+	if (device->resources.fw.cookie)
+		subsystem_put(device->resources.fw.cookie);
+	device->resources.fw.cookie = NULL;
+fail_load_fw:
+	call_iris_op(device, power_off, device);
+fail_iris_power_on:
+fail_init_pkt:
+	__deinit_resources(device);
+fail_init_res:
+	return rc;
+}
+
+static void __unload_fw(struct iris_hfi_device *device)
+{
+	if (!device->resources.fw.cookie)
+		return;
+
+	cancel_delayed_work(&iris_hfi_pm_work);
+	if (device->state != IRIS_STATE_DEINIT)
+		flush_workqueue(device->iris_pm_workq);
+
+	subsystem_put(device->resources.fw.cookie);
+	__interface_queues_release(device);
+	call_iris_op(device, power_off, device);
+	device->resources.fw.cookie = NULL;
+	__deinit_resources(device);
+
+	dprintk(CVP_WARN, "Firmware unloaded\n");
+}
+
+static int iris_hfi_get_fw_info(void *dev, struct cvp_hal_fw_info *fw_info)
+{
+	int i = 0;
+	struct iris_hfi_device *device = dev;
+
+	if (!device || !fw_info) {
+		dprintk(CVP_ERR,
+			"%s Invalid parameter: device = %pK fw_info = %pK\n",
+			__func__, device, fw_info);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	while (cvp_driver->fw_version[i++] != 'V' && i < CVP_VERSION_LENGTH)
+		;
+
+	if (i == CVP_VERSION_LENGTH - 1) {
+		dprintk(CVP_WARN, "Iris version string is not proper\n");
+		fw_info->version[0] = '\0';
+		goto fail_version_string;
+	}
+
+	memcpy(&fw_info->version[0], &cvp_driver->fw_version[0],
+			CVP_VERSION_LENGTH);
+	fw_info->version[CVP_VERSION_LENGTH - 1] = '\0';
+
+fail_version_string:
+	dprintk(CVP_CORE, "F/W version retrieved : %s\n", fw_info->version);
+	fw_info->base_addr = device->cvp_hal_data->firmware_base;
+	fw_info->register_base = device->res->register_base;
+	fw_info->register_size = device->cvp_hal_data->register_size;
+	fw_info->irq = device->cvp_hal_data->irq;
+
+	mutex_unlock(&device->lock);
+	return 0;
+}
+
+static int iris_hfi_get_core_capabilities(void *dev)
+{
+	dprintk(CVP_CORE, "%s not supported yet!\n", __func__);
+	return 0;
+}
+
+static void __noc_error_info_iris2(struct iris_hfi_device *device)
+{
+	u32 val = 0;
+
+	val = __read_register(device, CVP_NOC_ERR_SWID_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_SWID_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_SWID_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_SWID_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_MAINCTL_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_MAINCTL_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRVLD_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRVLD_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRCLR_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRCLR_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG0_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG0_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG0_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG0_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG1_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG1_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG1_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG1_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG2_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG2_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG2_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG2_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG3_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG3_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG3_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG3_HIGH:     %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_CORE_ERR_SWID_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC__CORE_ERL_MAIN_SWID_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_SWID_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_SWID_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_MAINCTL_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRVLD_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRCLR_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_HIGH:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_LOW:     %#x\n", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_HIGH:     %#x\n", val);
+}
+
+static int iris_hfi_noc_error_info(void *dev)
+{
+	struct iris_hfi_device *device;
+
+	if (!dev) {
+		dprintk(CVP_ERR, "%s: null device\n", __func__);
+		return -EINVAL;
+	}
+	device = dev;
+
+	mutex_lock(&device->lock);
+	dprintk(CVP_ERR, "%s: non error information\n", __func__);
+
+	call_iris_op(device, noc_error_info, device);
+
+	mutex_unlock(&device->lock);
+
+	return 0;
+}
+
+static int __initialize_packetization(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!device || !device->res) {
+		dprintk(CVP_ERR, "%s - invalid param\n", __func__);
+		return -EINVAL;
+	}
+
+	device->packetization_type = HFI_PACKETIZATION_4XX;
+
+	device->pkt_ops = cvp_hfi_get_pkt_ops_handle(
+		device->packetization_type);
+	if (!device->pkt_ops) {
+		rc = -EINVAL;
+		dprintk(CVP_ERR, "Failed to get pkt_ops handle\n");
+	}
+
+	return rc;
+}
+
+void __init_cvp_ops(struct iris_hfi_device *device)
+{
+	device->vpu_ops = &iris2_ops;
+}
+
+static struct iris_hfi_device *__add_device(u32 device_id,
+			struct msm_cvp_platform_resources *res,
+			hfi_cmd_response_callback callback)
+{
+	struct iris_hfi_device *hdevice = NULL;
+	int rc = 0;
+
+	if (!res || !callback) {
+		dprintk(CVP_ERR, "Invalid Parameters\n");
+		return NULL;
+	}
+
+	dprintk(CVP_INFO, "%s: device_id: %d\n", __func__, device_id);
+
+	hdevice = kzalloc(sizeof(*hdevice), GFP_KERNEL);
+	if (!hdevice) {
+		dprintk(CVP_ERR, "failed to allocate new device\n");
+		goto exit;
+	}
+
+	hdevice->response_pkt = kmalloc_array(cvp_max_packets,
+				sizeof(*hdevice->response_pkt), GFP_KERNEL);
+	if (!hdevice->response_pkt) {
+		dprintk(CVP_ERR, "failed to allocate response_pkt\n");
+		goto err_cleanup;
+	}
+
+	hdevice->raw_packet =
+		kzalloc(CVP_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL);
+	if (!hdevice->raw_packet) {
+		dprintk(CVP_ERR, "failed to allocate raw packet\n");
+		goto err_cleanup;
+	}
+
+	rc = __init_regs_and_interrupts(hdevice, res);
+	if (rc)
+		goto err_cleanup;
+
+	hdevice->res = res;
+	hdevice->device_id = device_id;
+	hdevice->callback = callback;
+
+	__init_cvp_ops(hdevice);
+
+	hdevice->cvp_workq = create_singlethread_workqueue(
+		"msm_cvp_workerq_iris");
+	if (!hdevice->cvp_workq) {
+		dprintk(CVP_ERR, ": create cvp workq failed\n");
+		goto err_cleanup;
+	}
+
+	hdevice->iris_pm_workq = create_singlethread_workqueue(
+			"pm_workerq_iris");
+	if (!hdevice->iris_pm_workq) {
+		dprintk(CVP_ERR, ": create pm workq failed\n");
+		goto err_cleanup;
+	}
+
+	mutex_init(&hdevice->lock);
+	INIT_LIST_HEAD(&hdevice->sess_head);
+
+	return hdevice;
+
+err_cleanup:
+	if (hdevice->iris_pm_workq)
+		destroy_workqueue(hdevice->iris_pm_workq);
+	if (hdevice->cvp_workq)
+		destroy_workqueue(hdevice->cvp_workq);
+	kfree(hdevice->response_pkt);
+	kfree(hdevice->raw_packet);
+	kfree(hdevice);
+exit:
+	return NULL;
+}
+
+static struct iris_hfi_device *__get_device(u32 device_id,
+				struct msm_cvp_platform_resources *res,
+				hfi_cmd_response_callback callback)
+{
+	if (!res || !callback) {
+		dprintk(CVP_ERR, "Invalid params: %pK %pK\n", res, callback);
+		return NULL;
+	}
+
+	return __add_device(device_id, res, callback);
+}
+
+void cvp_iris_hfi_delete_device(void *device)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *dev = NULL;
+
+	if (!device)
+		return;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	if (core)
+		dev = core->device->hfi_device_data;
+
+	if (!dev)
+		return;
+
+	mutex_destroy(&dev->lock);
+	destroy_workqueue(dev->cvp_workq);
+	destroy_workqueue(dev->iris_pm_workq);
+	free_irq(dev->cvp_hal_data->irq, dev);
+	iounmap(dev->cvp_hal_data->register_base);
+	iounmap(dev->cvp_hal_data->gcc_reg_base);
+	kfree(dev->cvp_hal_data);
+	kfree(dev->response_pkt);
+	kfree(dev->raw_packet);
+	kfree(dev);
+}
+
+static int iris_hfi_validate_session(void *sess, const char *func)
+{
+	struct cvp_hal_session *session = sess;
+	int rc = 0;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, " %s Invalid Params %pK\n", __func__, session);
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+	if (!__is_session_valid(device, session, func))
+		rc = -ECONNRESET;
+
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static void iris_init_hfi_callbacks(struct cvp_hfi_device *hdev)
+{
+	hdev->core_init = iris_hfi_core_init;
+	hdev->core_release = iris_hfi_core_release;
+	hdev->core_trigger_ssr = iris_hfi_core_trigger_ssr;
+	hdev->session_init = iris_hfi_session_init;
+	hdev->session_end = iris_hfi_session_end;
+	hdev->session_abort = iris_hfi_session_abort;
+	hdev->session_clean = iris_hfi_session_clean;
+	hdev->session_set_buffers = iris_hfi_session_set_buffers;
+	hdev->session_release_buffers = iris_hfi_session_release_buffers;
+	hdev->session_send = iris_hfi_session_send;
+	hdev->session_flush = iris_hfi_session_flush;
+	hdev->scale_clocks = iris_hfi_scale_clocks;
+	hdev->vote_bus = iris_hfi_vote_buses;
+	hdev->get_fw_info = iris_hfi_get_fw_info;
+	hdev->get_core_capabilities = iris_hfi_get_core_capabilities;
+	hdev->suspend = iris_hfi_suspend;
+	hdev->resume = iris_hfi_resume;
+	hdev->flush_debug_queue = iris_hfi_flush_debug_queue;
+	hdev->noc_error_info = iris_hfi_noc_error_info;
+	hdev->validate_session = iris_hfi_validate_session;
+}
+
+int cvp_iris_hfi_initialize(struct cvp_hfi_device *hdev, u32 device_id,
+		struct msm_cvp_platform_resources *res,
+		hfi_cmd_response_callback callback)
+{
+	int rc = 0;
+
+	if (!hdev || !res || !callback) {
+		dprintk(CVP_ERR, "Invalid params: %pK %pK %pK\n",
+			hdev, res, callback);
+		rc = -EINVAL;
+		goto err_iris_hfi_init;
+	}
+
+	hdev->hfi_device_data = __get_device(device_id, res, callback);
+
+	if (IS_ERR_OR_NULL(hdev->hfi_device_data)) {
+		rc = PTR_ERR(hdev->hfi_device_data) ?: -EINVAL;
+		goto err_iris_hfi_init;
+	}
+
+	iris_init_hfi_callbacks(hdev);
+
+err_iris_hfi_init:
+	return rc;
+}
+

+ 216 - 0
msm/eva/cvp_hfi.h

@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __H_CVP_HFI_H__
+#define __H_CVP_HFI_H__
+
+#include <media/msm_media_info.h>
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_api.h"
+
+#define HFI_CMD_SESSION_CVP_START	\
+	(HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_CMD_START_OFFSET + 0x1000)
+
+#define  HFI_CMD_SESSION_CVP_SET_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x001)
+#define  HFI_CMD_SESSION_CVP_RELEASE_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x002)
+
+#define  HFI_CMD_SESSION_CVP_DS\
+	(HFI_CMD_SESSION_CVP_START + 0x003)
+#define  HFI_CMD_SESSION_CVP_HCD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x004)
+#define  HFI_CMD_SESSION_CVP_HCD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x005)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x006)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x007)
+#define  HFI_CMD_SESSION_CVP_SVM\
+	(HFI_CMD_SESSION_CVP_START + 0x008)
+#define  HFI_CMD_SESSION_CVP_NCC_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x009)
+#define  HFI_CMD_SESSION_CVP_NCC_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x00A)
+#define  HFI_CMD_SESSION_CVP_DFS_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x00B)
+#define  HFI_CMD_SESSION_CVP_DFS_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x00C)
+#define  HFI_CMD_SESSION_CVP_FTEXT\
+	(HFI_CMD_SESSION_CVP_START + 0x00F)
+
+/* ==========CHAINED OPERATIONS===================*/
+#define  HFI_CMD_SESSION_CVP_CV_HOG_SVM_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x010)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_SVM_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x011)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_SVM_HCD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x012)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_SVM_HCD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x013)
+#define  HFI_CMD_SESSION_CVP_OPTICAL_FLOW\
+	(HFI_CMD_SESSION_CVP_START + 0x014)
+
+/* ===========USECASE OPERATIONS===============*/
+#define  HFI_CMD_SESSION_CVP_DC_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x030)
+#define  HFI_CMD_SESSION_CVP_DC_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x031)
+#define  HFI_CMD_SESSION_CVP_DCM_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x034)
+#define  HFI_CMD_SESSION_CVP_DCM_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x035)
+
+#define  HFI_CMD_SESSION_CVP_DME_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x039)
+#define  HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x03B)
+#define  HFI_CMD_SESSION_CVP_DME_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x03A)
+
+#define  HFI_CMD_SESSION_CVP_CV_TME_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x047)
+#define  HFI_CMD_SESSION_CVP_CV_TME_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x048)
+#define  HFI_CMD_SESSION_CVP_CV_OD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x049)
+#define  HFI_CMD_SESSION_CVP_CV_OD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x04A)
+#define  HFI_CMD_SESSION_CVP_CV_ODT_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x04B)
+#define  HFI_CMD_SESSION_CVP_CV_ODT_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x04C)
+
+#define  HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x04D)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x050)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x051)
+#define HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x052)
+#define HFI_CMD_SESSION_CVP_FD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x053)
+#define HFI_CMD_SESSION_CVP_FD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x054)
+#define HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x055)
+#define HFI_CMD_SESSION_CVP_FLUSH\
+	(HFI_CMD_SESSION_CVP_START + 0x057)
+#define  HFI_CMD_SESSION_CVP_ICA_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x100)
+#define  HFI_CMD_SESSION_CVP_ICA_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x101)
+
+
+#define HFI_MSG_SESSION_CVP_START	\
+	(HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x1000)
+
+#define HFI_MSG_SESSION_CVP_SET_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x001)
+#define HFI_MSG_SESSION_CVP_RELEASE_BUFFERS \
+	(HFI_MSG_SESSION_CVP_START + 0x002)
+#define HFI_MSG_SESSION_CVP_DS\
+	(HFI_MSG_SESSION_CVP_START + 0x003)
+#define HFI_MSG_SESSION_CVP_HCD\
+	(HFI_MSG_SESSION_CVP_START + 0x004)
+#define HFI_MSG_SESSION_CVP_CV_HOG\
+	(HFI_MSG_SESSION_CVP_START + 0x005)
+#define HFI_MSG_SESSION_CVP_SVM\
+	(HFI_MSG_SESSION_CVP_START + 0x006)
+#define HFI_MSG_SESSION_CVP_NCC\
+	(HFI_MSG_SESSION_CVP_START + 0x007)
+#define HFI_MSG_SESSION_CVP_DFS\
+	(HFI_MSG_SESSION_CVP_START + 0x008)
+#define HFI_MSG_SESSION_CVP_TME\
+	(HFI_MSG_SESSION_CVP_START + 0x009)
+#define HFI_MSG_SESSION_CVP_FTEXT\
+	(HFI_MSG_SESSION_CVP_START + 0x00A)
+
+#define HFI_MSG_SESSION_CVP_ICA\
+	(HFI_MSG_SESSION_CVP_START + 0x014)
+
+#define HFI_MSG_SESSION_CVP_DME\
+	(HFI_MSG_SESSION_CVP_START + 0x023)
+#define HFI_MSG_SESSION_CVP_OPERATION_CONFIG (HFI_MSG_SESSION_CVP_START + 0x030)
+
+#define HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x034)
+#define HFI_MSG_SESSION_CVP_SET_MODEL_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x036)
+#define HFI_MSG_SESSION_CVP_FD\
+	(HFI_MSG_SESSION_CVP_START + 0x037)
+#define HFI_MSG_SESSION_CVP_RELEASE_PERSIST_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x038)
+#define HFI_MSG_SESSION_CVP_FLUSH_DEPRECATE\
+	(HFI_CMD_SESSION_CVP_START + 0x03A)
+#define HFI_MSG_SESSION_CVP_FLUSH\
+	(HFI_MSG_SESSION_CVP_START + 0x03A)
+
+#define CVP_IFACEQ_MAX_PKT_SIZE       1024
+#define CVP_IFACEQ_MED_PKT_SIZE       768
+#define CVP_IFACEQ_MIN_PKT_SIZE       8
+#define CVP_IFACEQ_VAR_SMALL_PKT_SIZE 100
+#define CVP_IFACEQ_VAR_LARGE_PKT_SIZE 512
+#define CVP_IFACEQ_VAR_HUGE_PKT_SIZE  (1024*12)
+
+struct cvp_hfi_cmd_session_flush_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 flush_type;
+};
+
+struct cvp_hfi_cmd_session_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_msg_sys_session_abort_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct cvp_hfi_msg_sys_property_info_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[128];
+};
+
+enum session_flags {
+	SESSION_PAUSE = BIT(1),
+};
+
+struct cvp_hal_session {
+	struct list_head list;
+	void *session_id;
+	u32 flags;
+	void *device;
+};
+
+struct msm_cvp_fw {
+	void *cookie;
+};
+
+int cvp_hfi_process_msg_packet(u32 device_id,
+	void *msg_hdr, struct msm_cvp_cb_info *info);
+
+enum cvp_status cvp_hfi_process_sys_init_done_prop_read(
+	struct cvp_hfi_msg_sys_init_done_packet *pkt,
+	struct cvp_hal_sys_init_done *sys_init_done);
+
+enum cvp_status hfi_process_session_init_done_prop_read(
+	struct cvp_hfi_msg_sys_session_init_done_packet *pkt,
+	struct cvp_hal_session_init_done *session_init_done);
+
+#endif

+ 379 - 0
msm/eva/cvp_hfi_api.h

@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __CVP_HFI_API_H__
+#define __CVP_HFI_API_H__
+
+#include <linux/log2.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/hash.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp_resources.h"
+#include "cvp_hfi_helper.h"
+
+#define CONTAINS(__a, __sz, __t) (\
+	(__t >= __a) && \
+	(__t < __a + __sz) \
+)
+
+#define OVERLAPS(__t, __tsz, __a, __asz) (\
+	(__t <= __a) && \
+	(__t + __tsz >= __a + __asz) \
+)
+
+#define CVP_VERSION_LENGTH 128
+
+/* 16 encoder and 16 decoder sessions */
+#define CVP_MAX_SESSIONS	32
+
+#define HFI_DFS_CONFIG_CMD_SIZE	38
+#define HFI_DFS_FRAME_CMD_SIZE	16
+#define HFI_DFS_FRAME_BUFFERS_OFFSET 8
+#define HFI_DFS_BUF_NUM 4
+
+#define HFI_DME_CONFIG_CMD_SIZE	194
+#define HFI_DME_BASIC_CONFIG_CMD_SIZE	51
+#define HFI_DME_FRAME_CMD_SIZE	28
+#define HFI_DME_FRAME_BUFFERS_OFFSET 12
+#define HFI_DME_BUF_NUM 8
+
+#define HFI_PERSIST_CMD_SIZE	11
+#define HFI_PERSIST_BUFFERS_OFFSET 7
+#define HFI_PERSIST_BUF_NUM     2
+
+#define HFI_DS_CMD_SIZE	50
+#define HFI_DS_BUFFERS_OFFSET	44
+#define HFI_DS_BUF_NUM	3
+
+#define HFI_OF_CONFIG_CMD_SIZE 34
+#define HFI_OF_FRAME_CMD_SIZE 24
+#define HFI_OF_BUFFERS_OFFSET 8
+#define HFI_OF_BUF_NUM 8
+
+#define HFI_ODT_CONFIG_CMD_SIZE 23
+#define HFI_ODT_FRAME_CMD_SIZE 33
+#define HFI_ODT_BUFFERS_OFFSET 11
+#define HFI_ODT_BUF_NUM 11
+
+#define HFI_OD_CONFIG_CMD_SIZE 24
+#define HFI_OD_FRAME_CMD_SIZE 12
+#define HFI_OD_BUFFERS_OFFSET 6
+#define HFI_OD_BUF_NUM 3
+
+#define HFI_NCC_CONFIG_CMD_SIZE 47
+#define HFI_NCC_FRAME_CMD_SIZE 22
+#define HFI_NCC_BUFFERS_OFFSET 8
+#define HFI_NCC_BUF_NUM 7
+
+#define HFI_ICA_CONFIG_CMD_SIZE 127
+#define HFI_ICA_FRAME_CMD_SIZE 14
+#define HFI_ICA_BUFFERS_OFFSET 6
+#define HFI_ICA_BUF_NUM 4
+
+#define HFI_HCD_CONFIG_CMD_SIZE 46
+#define HFI_HCD_FRAME_CMD_SIZE 18
+#define HFI_HCD_BUFFERS_OFFSET 12
+#define HFI_HCD_BUF_NUM 3
+
+#define HFI_DCM_CONFIG_CMD_SIZE 20
+#define HFI_DCM_FRAME_CMD_SIZE 19
+#define HFI_DCM_BUFFERS_OFFSET 9
+#define HFI_DCM_BUF_NUM 5
+
+#define HFI_PYS_HCD_CONFIG_CMD_SIZE 461
+#define HFI_PYS_HCD_FRAME_CMD_SIZE 66
+#define HFI_PYS_HCD_BUFFERS_OFFSET 14
+#define HFI_PYS_HCD_BUF_NUM 26
+
+#define HFI_FD_CONFIG_CMD_SIZE 28
+#define HFI_FD_FRAME_CMD_SIZE  10
+#define HFI_FD_BUFFERS_OFFSET  6
+#define HFI_FD_BUF_NUM 2
+
+#define HFI_MODEL_CMD_SIZE 9
+#define HFI_MODEL_BUFFERS_OFFSET 7
+#define HFI_MODEL_BUF_NUM 1
+
+#define HFI_VERSION_MAJOR_MASK 0xFF000000
+#define HFI_VERSION_MAJOR_SHFIT 24
+#define HFI_VERSION_MINOR_MASK 0x00FFFFE0
+#define HFI_VERSION_MINOR_SHIFT 5
+#define HFI_VERSION_BRANCH_MASK 0x0000001F
+#define HFI_VERSION_BRANCH_SHIFT 0
+
+enum cvp_status {
+	CVP_ERR_NONE = 0x0,
+	CVP_ERR_FAIL = 0x80000000,
+	CVP_ERR_ALLOC_FAIL,
+	CVP_ERR_ILLEGAL_OP,
+	CVP_ERR_BAD_PARAM,
+	CVP_ERR_BAD_HANDLE,
+	CVP_ERR_NOT_SUPPORTED,
+	CVP_ERR_BAD_STATE,
+	CVP_ERR_MAX_CLIENTS,
+	CVP_ERR_IFRAME_EXPECTED,
+	CVP_ERR_HW_FATAL,
+	CVP_ERR_BITSTREAM_ERR,
+	CVP_ERR_INDEX_NOMORE,
+	CVP_ERR_SEQHDR_PARSE_FAIL,
+	CVP_ERR_INSUFFICIENT_BUFFER,
+	CVP_ERR_BAD_POWER_STATE,
+	CVP_ERR_NO_VALID_SESSION,
+	CVP_ERR_TIMEOUT,
+	CVP_ERR_CMDQFULL,
+	CVP_ERR_START_CODE_NOT_FOUND,
+	CVP_ERR_NOC_ERROR,
+	CVP_ERR_CLIENT_PRESENT = 0x90000001,
+	CVP_ERR_CLIENT_FATAL,
+	CVP_ERR_CMD_QUEUE_FULL,
+	CVP_ERR_UNUSED = 0x10000000
+};
+
+enum hal_property {
+	HAL_UNUSED_PROPERTY = 0xFFFFFFFF,
+};
+
+enum hal_ssr_trigger_type {
+	SSR_ERR_FATAL = 1,
+	SSR_SW_DIV_BY_ZERO,
+	SSR_HW_WDOG_IRQ,
+	SSR_SESSION_ABORT,
+};
+
+enum hal_intra_refresh_mode {
+	HAL_INTRA_REFRESH_NONE,
+	HAL_INTRA_REFRESH_CYCLIC,
+	HAL_INTRA_REFRESH_RANDOM,
+	HAL_UNUSED_INTRA = 0x10000000,
+};
+
+enum cvp_resource_id {
+	CVP_RESOURCE_NONE,
+	CVP_RESOURCE_SYSCACHE,
+	CVP_UNUSED_RESOURCE = 0x10000000,
+};
+
+struct cvp_resource_hdr {
+	enum cvp_resource_id resource_id;
+	void *resource_handle;
+};
+
+struct cvp_hal_fw_info {
+	char version[CVP_VERSION_LENGTH];
+	phys_addr_t base_addr;
+	int register_base;
+	int register_size;
+	int irq;
+};
+
+enum hal_event_type {
+	HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES,
+	HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES,
+	HAL_EVENT_RELEASE_BUFFER_REFERENCE,
+	HAL_UNUSED_SEQCHG = 0x10000000,
+};
+
+/* HAL Response */
+#define IS_HAL_SYS_CMD(cmd) ((cmd) >= HAL_SYS_INIT_DONE && \
+		(cmd) <= HAL_SYS_ERROR)
+#define IS_HAL_SESSION_CMD(cmd) ((cmd) >= HAL_SESSION_EVENT_CHANGE && \
+		(cmd) <= HAL_SESSION_ERROR)
+enum hal_command_response {
+	HAL_NO_RESP,
+	HAL_SYS_INIT_DONE,
+	HAL_SYS_SET_RESOURCE_DONE,
+	HAL_SYS_RELEASE_RESOURCE_DONE,
+	HAL_SYS_PING_ACK_DONE,
+	HAL_SYS_PC_PREP_DONE,
+	HAL_SYS_IDLE,
+	HAL_SYS_DEBUG,
+	HAL_SYS_WATCHDOG_TIMEOUT,
+	HAL_SYS_ERROR,
+	/* SESSION COMMANDS_DONE */
+	HAL_SESSION_EVENT_CHANGE,
+	HAL_SESSION_INIT_DONE,
+	HAL_SESSION_END_DONE,
+	HAL_SESSION_SET_BUFFER_DONE,
+	HAL_SESSION_ABORT_DONE,
+	HAL_SESSION_STOP_DONE,
+	HAL_SESSION_CVP_OPERATION_CONFIG,
+	HAL_SESSION_FLUSH_DONE,
+	HAL_SESSION_SUSPEND_DONE,
+	HAL_SESSION_RESUME_DONE,
+	HAL_SESSION_SET_PROP_DONE,
+	HAL_SESSION_GET_PROP_DONE,
+	HAL_SESSION_RELEASE_BUFFER_DONE,
+	HAL_SESSION_REGISTER_BUFFER_DONE,
+	HAL_SESSION_UNREGISTER_BUFFER_DONE,
+	HAL_SESSION_RELEASE_RESOURCE_DONE,
+	HAL_SESSION_DFS_CONFIG_CMD_DONE,
+	HAL_SESSION_DFS_FRAME_CMD_DONE,
+	HAL_SESSION_DME_CONFIG_CMD_DONE,
+	HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE,
+	HAL_SESSION_DME_FRAME_CMD_DONE,
+	HAL_SESSION_TME_CONFIG_CMD_DONE,
+	HAL_SESSION_ODT_CONFIG_CMD_DONE,
+	HAL_SESSION_OD_CONFIG_CMD_DONE,
+	HAL_SESSION_NCC_CONFIG_CMD_DONE,
+	HAL_SESSION_ICA_CONFIG_CMD_DONE,
+	HAL_SESSION_HCD_CONFIG_CMD_DONE,
+	HAL_SESSION_DC_CONFIG_CMD_DONE,
+	HAL_SESSION_DCM_CONFIG_CMD_DONE,
+	HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE,
+	HAL_SESSION_FD_CONFIG_CMD_DONE,
+	HAL_SESSION_PERSIST_SET_DONE,
+	HAL_SESSION_PERSIST_REL_DONE,
+	HAL_SESSION_MODEL_BUF_CMD_DONE,
+	HAL_SESSION_ICA_FRAME_CMD_DONE,
+	HAL_SESSION_FD_FRAME_CMD_DONE,
+	HAL_SESSION_PROPERTY_INFO,
+	HAL_SESSION_ERROR,
+	HAL_RESPONSE_UNUSED = 0x10000000,
+};
+
+struct msm_cvp_capability {
+	u32 reserved[183];
+};
+
+struct cvp_hal_sys_init_done {
+	u32 dec_codec_supported;
+	u32 enc_codec_supported;
+	u32 codec_count;
+	struct msm_cvp_capability *capabilities;
+	u32 max_sessions_supported;
+};
+
+struct cvp_hal_session_init_done {
+	struct msm_cvp_capability capability;
+};
+
+struct msm_cvp_cb_cmd_done {
+	u32 device_id;
+	void *session_id;
+	enum cvp_status status;
+	u32 size;
+	union {
+		struct cvp_hfi_msg_session_hdr msg_hdr;
+		struct cvp_resource_hdr resource_hdr;
+		struct cvp_hal_sys_init_done sys_init_done;
+		struct cvp_hal_session_init_done session_init_done;
+		u32 buffer_addr;
+	} data;
+};
+
+struct msm_cvp_cb_data_done {
+	u32 device_id;
+	void *session_id;
+	enum cvp_status status;
+	u32 size;
+	u32 client_data;
+};
+
+struct msm_cvp_cb_info {
+	enum hal_command_response response_type;
+	union {
+		struct msm_cvp_cb_cmd_done cmd;
+		struct msm_cvp_cb_data_done data;
+	} response;
+};
+
+enum msm_cvp_hfi_type {
+	CVP_HFI_IRIS,
+};
+
+enum msm_cvp_thermal_level {
+	CVP_THERMAL_NORMAL = 0,
+	CVP_THERMAL_LOW,
+	CVP_THERMAL_HIGH,
+	CVP_THERMAL_CRITICAL
+};
+
+struct msm_cvp_gov_data {
+	struct cvp_bus_vote_data *data;
+	u32 data_count;
+};
+
+enum msm_cvp_power_mode {
+	CVP_POWER_NORMAL = 0,
+	CVP_POWER_LOW,
+	CVP_POWER_TURBO
+};
+
+struct cvp_bus_vote_data {
+	u32 domain;
+	u32 ddr_bw;
+	u32 sys_cache_bw;
+	enum msm_cvp_power_mode power_mode;
+	bool use_sys_cache;
+};
+
+struct cvp_hal_cmd_sys_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+#define call_hfi_op(q, op, args...)			\
+	(((q) && (q)->op) ? ((q)->op(args)) : 0)
+
+struct msm_cvp_hfi_defs {
+	unsigned int size;
+	unsigned int type;
+	unsigned int buf_offset;
+	unsigned int buf_num;
+	enum hal_command_response resp;
+};
+
+struct cvp_hfi_device {
+	void *hfi_device_data;
+	/*Add function pointers for all the hfi functions below*/
+	int (*core_init)(void *device);
+	int (*core_release)(void *device);
+	int (*core_trigger_ssr)(void *device, enum hal_ssr_trigger_type);
+	int (*session_init)(void *device, void *session_id, void **new_session);
+	int (*session_end)(void *session);
+	int (*session_abort)(void *session);
+	int (*session_set_buffers)(void *sess, u32 iova, u32 size);
+	int (*session_release_buffers)(void *sess);
+	int (*session_send)(void *sess, struct cvp_kmd_hfi_packet *in_pkt);
+	int (*session_flush)(void *sess);
+	int (*scale_clocks)(void *dev, u32 freq);
+	int (*vote_bus)(void *dev, struct cvp_bus_vote_data *data,
+			int num_data);
+	int (*get_fw_info)(void *dev, struct cvp_hal_fw_info *fw_info);
+	int (*session_clean)(void *sess);
+	int (*get_core_capabilities)(void *dev);
+	int (*suspend)(void *dev);
+	int (*resume)(void *dev);
+	int (*flush_debug_queue)(void *dev);
+	int (*noc_error_info)(void *dev);
+	int (*validate_session)(void *sess, const char *func);
+};
+
+typedef void (*hfi_cmd_response_callback) (enum hal_command_response cmd,
+			void *data);
+typedef void (*msm_cvp_callback) (enum hal_command_response response,
+			void *callback);
+
+struct cvp_hfi_device *cvp_hfi_initialize(enum msm_cvp_hfi_type hfi_type,
+		u32 device_id, struct msm_cvp_platform_resources *res,
+		hfi_cmd_response_callback callback);
+void cvp_hfi_deinitialize(enum msm_cvp_hfi_type hfi_type,
+			struct cvp_hfi_device *hdev);
+
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr);
+int get_signal_from_pkt_type(unsigned int type);
+int get_hfi_version(void);
+unsigned int get_msg_size(void);
+unsigned int get_msg_session_id(void *msg);
+unsigned int get_msg_errorcode(void *msg);
+int get_msg_opconfigs(void *msg, unsigned int *session_id,
+		unsigned int *error_type, unsigned int *config_id);
+extern const struct msm_cvp_hfi_defs cvp_hfi_defs[];
+
+#endif /*__CVP_HFI_API_H__ */

+ 453 - 0
msm/eva/cvp_hfi_helper.h

@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __H_CVP_HFI_HELPER_H__
+#define __H_CVP_HFI_HELPER_H__
+
+#define HFI_COMMON_BASE				(0)
+#define HFI_DOMAIN_BASE_COMMON		(HFI_COMMON_BASE + 0)
+#define HFI_DOMAIN_BASE_CVP			(HFI_COMMON_BASE + 0x04000000)
+
+#define HFI_ARCH_COMMON_OFFSET		(0)
+
+#define  HFI_CMD_START_OFFSET		(0x00010000)
+#define  HFI_MSG_START_OFFSET		(0x00020000)
+
+#define HFI_ERR_NONE						HFI_COMMON_BASE
+#define HFI_ERR_SYS_FATAL				(HFI_COMMON_BASE + 0x1)
+#define HFI_ERR_SYS_INVALID_PARAMETER		(HFI_COMMON_BASE + 0x2)
+#define HFI_ERR_SYS_VERSION_MISMATCH		(HFI_COMMON_BASE + 0x3)
+#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES	(HFI_COMMON_BASE + 0x4)
+#define HFI_ERR_SYS_MAX_SESSIONS_REACHED	(HFI_COMMON_BASE + 0x5)
+#define HFI_ERR_SYS_UNSUPPORTED_CODEC		(HFI_COMMON_BASE + 0x6)
+#define HFI_ERR_SYS_SESSION_IN_USE			(HFI_COMMON_BASE + 0x7)
+#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE	(HFI_COMMON_BASE + 0x8)
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN		(HFI_COMMON_BASE + 0x9)
+#define HFI_ERR_SYS_NOC_ERROR			(HFI_COMMON_BASE + 0x11)
+#define HFI_ERR_SESSION_FATAL			(HFI_COMMON_BASE + 0x1001)
+#define HFI_ERR_SESSION_INVALID_PARAMETER	(HFI_COMMON_BASE + 0x1002)
+#define HFI_ERR_SESSION_BAD_POINTER		(HFI_COMMON_BASE + 0x1003)
+#define HFI_ERR_SESSION_INVALID_SESSION_ID	(HFI_COMMON_BASE + 0x1004)
+#define HFI_ERR_SESSION_INVALID_STREAM_ID	(HFI_COMMON_BASE + 0x1005)
+#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION		\
+	(HFI_COMMON_BASE + 0x1006)
+#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY	(HFI_COMMON_BASE + 0x1007)
+
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING	(HFI_COMMON_BASE + 0x1008)
+
+#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES	(HFI_COMMON_BASE + 0x1009)
+
+#define HFI_ERR_SESSION_STREAM_CORRUPT		(HFI_COMMON_BASE + 0x100B)
+#define HFI_ERR_SESSION_ENC_OVERFLOW		(HFI_COMMON_BASE + 0x100C)
+#define HFI_ERR_SESSION_UNSUPPORTED_STREAM	(HFI_COMMON_BASE + 0x100D)
+#define HFI_ERR_SESSION_CMDSIZE			(HFI_COMMON_BASE + 0x100E)
+#define HFI_ERR_SESSION_UNSUPPORT_CMD		(HFI_COMMON_BASE + 0x100F)
+#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE	(HFI_COMMON_BASE + 0x1010)
+#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL	(HFI_COMMON_BASE + 0x1011)
+#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR	(HFI_COMMON_BASE + 0x1012)
+#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED	(HFI_COMMON_BASE + 0x1013)
+#define HFI_ERR_SESSION_FLUSHED			(HFI_COMMON_BASE + 0x101C)
+
+#define HFI_EVENT_SYS_ERROR				(HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_SESSION_ERROR			(HFI_COMMON_BASE + 0x2)
+
+#define  HFI_TME_PROFILE_DEFAULT	0x00000001
+#define  HFI_TME_PROFILE_FRC		0x00000002
+#define  HFI_TME_PROFILE_ASW		0x00000004
+#define  HFI_TME_PROFILE_DFS_BOKEH	0x00000008
+
+#define HFI_TME_LEVEL_INTEGER		0x00000001
+
+#define HFI_BUFFER_INPUT				(HFI_COMMON_BASE + 0x1)
+#define HFI_BUFFER_OUTPUT				(HFI_COMMON_BASE + 0x2)
+#define HFI_BUFFER_OUTPUT2				(HFI_COMMON_BASE + 0x3)
+#define HFI_BUFFER_INTERNAL_PERSIST		(HFI_COMMON_BASE + 0x4)
+#define HFI_BUFFER_INTERNAL_PERSIST_1		(HFI_COMMON_BASE + 0x5)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH	(HFI_COMMON_BASE + 0x6)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1	(HFI_COMMON_BASE + 0x7)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2	(HFI_COMMON_BASE + 0x8)
+#define HFI_BUFFER_COMMON_INTERNAL_RECON	(HFI_COMMON_BASE + 0x9)
+#define HFI_BUFFER_EXTRADATA_OUTPUT		(HFI_COMMON_BASE + 0xA)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2		(HFI_COMMON_BASE + 0xB)
+#define HFI_BUFFER_EXTRADATA_INPUT		(HFI_COMMON_BASE + 0xC)
+
+
+#define HFI_PROPERTY_SYS_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_DEBUG_CONFIG		\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x001)
+#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO	\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x002)
+#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ				\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x003)
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR         \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x004)
+#define  HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL     \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x005)
+#define  HFI_PROPERTY_SYS_IMAGE_VERSION    \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x006)
+#define  HFI_PROPERTY_SYS_CONFIG_COVERAGE    \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x007)
+#define  HFI_PROPERTY_SYS_UBWC_CONFIG    \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x008)
+
+#define HFI_DEBUG_MSG_LOW					0x00000001
+#define HFI_DEBUG_MSG_MEDIUM					0x00000002
+#define HFI_DEBUG_MSG_HIGH					0x00000004
+#define HFI_DEBUG_MSG_ERROR					0x00000008
+#define HFI_DEBUG_MSG_FATAL					0x00000010
+#define HFI_DEBUG_MSG_PERF					0x00000020
+
+#define HFI_DEBUG_MODE_QUEUE					0x00000001
+#define HFI_DEBUG_MODE_QDSS					0x00000002
+
+struct cvp_hfi_debug_config {
+	u32 debug_config;
+	u32 debug_mode;
+};
+
+struct cvp_hfi_enable {
+	u32 enable;
+};
+
+#define HFI_RESOURCE_SYSCACHE 0x00000002
+
+struct cvp_hfi_resource_subcache_type {
+	u32 size;
+	u32 sc_id;
+};
+
+struct cvp_hfi_resource_syscache_info_type {
+	u32 num_entries;
+	struct cvp_hfi_resource_subcache_type rg_subcache_entries[1];
+};
+
+#define HFI_CMD_SYS_COMMON_START			\
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + HFI_CMD_START_OFFSET \
+	+ 0x0000)
+#define HFI_CMD_SYS_INIT		(HFI_CMD_SYS_COMMON_START + 0x001)
+#define HFI_CMD_SYS_PC_PREP		(HFI_CMD_SYS_COMMON_START + 0x002)
+#define HFI_CMD_SYS_SET_RESOURCE	(HFI_CMD_SYS_COMMON_START + 0x003)
+#define HFI_CMD_SYS_RELEASE_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x004)
+#define HFI_CMD_SYS_SET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x005)
+#define HFI_CMD_SYS_GET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x006)
+#define HFI_CMD_SYS_SESSION_INIT	(HFI_CMD_SYS_COMMON_START + 0x007)
+#define HFI_CMD_SYS_SESSION_END		(HFI_CMD_SYS_COMMON_START + 0x008)
+#define HFI_CMD_SYS_SET_BUFFERS		(HFI_CMD_SYS_COMMON_START + 0x009)
+#define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_COMMON_START + 0x00A)
+#define HFI_CMD_SYS_TEST_START		(HFI_CMD_SYS_COMMON_START + 0x100)
+
+#define HFI_MSG_SYS_COMMON_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x0000)
+#define HFI_MSG_SYS_INIT_DONE			(HFI_MSG_SYS_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE		(HFI_MSG_SYS_COMMON_START + 0x2)
+#define HFI_MSG_SYS_RELEASE_RESOURCE	(HFI_MSG_SYS_COMMON_START + 0x3)
+#define HFI_MSG_SYS_DEBUG			(HFI_MSG_SYS_COMMON_START + 0x4)
+#define HFI_MSG_SYS_SESSION_INIT_DONE	(HFI_MSG_SYS_COMMON_START + 0x6)
+#define HFI_MSG_SYS_SESSION_END_DONE	(HFI_MSG_SYS_COMMON_START + 0x7)
+#define HFI_MSG_SYS_IDLE		(HFI_MSG_SYS_COMMON_START + 0x8)
+#define HFI_MSG_SYS_COV                 (HFI_MSG_SYS_COMMON_START + 0x9)
+#define HFI_MSG_SYS_PROPERTY_INFO	(HFI_MSG_SYS_COMMON_START + 0xA)
+#define HFI_MSG_SYS_SESSION_ABORT_DONE	(HFI_MSG_SYS_COMMON_START + 0xC)
+#define HFI_MSG_SESSION_SYNC_DONE      (HFI_MSG_SESSION_OX_START + 0xD)
+
+#define HFI_MSG_SESSION_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x1000)
+#define HFI_MSG_EVENT_NOTIFY	(HFI_MSG_SESSION_COMMON_START + 0x1)
+#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE	\
+	(HFI_MSG_SESSION_COMMON_START + 0x2)
+
+#define HFI_CMD_SYS_TEST_SSR	(HFI_CMD_SYS_TEST_START + 0x1)
+#define HFI_TEST_SSR_SW_ERR_FATAL	0x1
+#define HFI_TEST_SSR_SW_DIV_BY_ZERO	0x2
+#define HFI_TEST_SSR_HW_WDOG_IRQ	0x3
+
+struct cvp_hal_cmd_pkt_hdr {
+	u32 size;
+	u32 packet_type;
+};
+
+struct cvp_hal_msg_pkt_hdr {
+	u32 size;
+	u32 packet;
+};
+
+struct cvp_hal_session_cmd_pkt {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct cvp_hfi_cmd_sys_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 arch_type;
+};
+
+struct cvp_hfi_cmd_sys_pc_prep_packet {
+	u32 size;
+	u32 packet_type;
+};
+
+struct cvp_hfi_cmd_sys_set_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 resource_type;
+	u32 rg_resource_data[1];
+};
+
+struct cvp_hfi_cmd_sys_release_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_type;
+	u32 resource_handle;
+};
+
+struct cvp_hfi_cmd_sys_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_cmd_sys_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+enum HFI_SESSION_TYPE {
+	HFI_SESSION_CV = 1,
+	HFI_SESSION_DME,
+	HFI_SESSION_ODT,
+	HFI_SESSION_FD
+};
+
+struct cvp_hfi_cmd_sys_session_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 session_type;
+	u32 session_kmask;
+	u32 session_prio;
+	u32 is_secure;
+	u32 dsp_ac_mask;
+};
+
+struct cvp_hfi_cmd_sys_session_end_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct cvp_hfi_cmd_sys_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 buffer_type;
+	u32 buffer_size;
+	u32 num_buffers;
+	u32 rg_buffer_addr[1];
+};
+
+struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type {
+	u32 size;
+	u32 packet_type;
+	struct {
+		u32 max_channel_override : 1;
+		u32 mal_length_override : 1;
+		u32 hb_override : 1;
+		u32 bank_swzl_level_override : 1;
+		u32 bank_spreading_override : 1;
+		u32 reserved : 27;
+	} override_bit_info;
+	u32 max_channels;
+	u32 mal_length;
+	u32 highest_bank_bit;
+	u32 bank_swzl_level;
+	u32 bank_spreading;
+	u32 reserved[2];
+};
+
+struct cvp_hfi_cmd_session_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_client {
+	u32 transaction_id;
+	u32 data1;
+	u32 data2;
+	u64 kdata;
+	u32 reserved1;
+	u32 reserved2;
+} __packed;
+
+struct cvp_hfi_buf_type {
+	u32 iova;
+	u32 size;
+	u32 offset;
+	u32 flags;
+	u32 reserved1;
+	u32 reserved2;
+};
+
+struct cvp_hfi_cmd_session_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	struct cvp_hfi_client client_data;
+	struct cvp_hfi_buf_type buf_type;
+} __packed;
+
+struct cvp_session_release_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	struct cvp_hfi_client client_data;
+	u32 kernel_type;
+	u32 buffer_type;
+	u32 num_buffers;
+	u32 buffer_idx;
+} __packed;
+
+struct cvp_hfi_cmd_session_hdr {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	struct cvp_hfi_client client_data;
+	u32 stream_idx;
+} __packed;
+
+struct cvp_hfi_msg_session_hdr {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	struct cvp_hfi_client client_data;
+	u32 stream_idx;
+} __packed;
+
+struct cvp_hfi_buffer_mapping_type {
+	u32 index;
+	u32 device_addr;
+	u32 size;
+};
+
+struct cvp_hfi_cmd_session_sync_process_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 sync_id;
+	u32 rg_data[1];
+};
+
+struct cvp_hfi_msg_event_notify_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 event_id;
+	u32 event_data1;
+	u32 event_data2;
+	u32 rg_ext_event_data[1];
+};
+
+struct cvp_hfi_msg_session_op_cfg_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	struct cvp_hfi_client client_data;
+	u32 stream_idx;
+	u32 op_conf_id;
+} __packed;
+
+struct cvp_hfi_msg_sys_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_msg_sys_pc_prep_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+};
+
+struct cvp_hfi_msg_sys_release_resource_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 error_type;
+};
+
+struct cvp_hfi_msg_sys_session_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_msg_sys_session_end_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct cvp_hfi_msg_session_get_sequence_header_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 header_len;
+	u32 sequence_header;
+};
+
+struct cvp_hfi_msg_sys_debug_packet {
+	u32 size;
+	u32 packet_type;
+	u32 msg_type;
+	u32 msg_size;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u8 rg_msg_data[1];
+};
+
+struct cvp_hfi_packet_header {
+	u32 size;
+	u32 packet_type;
+};
+
+struct cvp_hfi_sfr_struct {
+	u32 bufSize;
+	u8 rg_data[1];
+};
+
+struct cvp_hfi_cmd_sys_test_ssr_packet {
+	u32 size;
+	u32 packet_type;
+	u32 trigger_type;
+};
+
+struct cvp_hfi_msg_sys_session_flush_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	struct cvp_hfi_client client_data;
+};
+
+#endif

+ 223 - 0
msm/eva/cvp_hfi_io.h

@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __CVP_HFI_IO_H__
+#define __CVP_HFI_IO_H__
+
+#include <linux/io.h>
+
+#define CVP_CPU_BASE_OFFS			0x000A0000
+#define CVP_AON_BASE_OFFS			0x000E0000
+
+#define CVP_CPU_CS_A2HSOFTINTEN		(CVP_CPU_BASE_OFFS + 0x10)
+#define CVP_CPU_CS_A2HSOFTINTENCLR	(CVP_CPU_BASE_OFFS + 0x14)
+#define CVP_CPU_CS_A2HSOFTINT		(CVP_CPU_BASE_OFFS + 0x18)
+#define CVP_CPU_CS_A2HSOFTINTCLR	(CVP_CPU_BASE_OFFS + 0x1C)
+#define CVP_CPU_CS_VMIMSG		(CVP_CPU_BASE_OFFS + 0x34)
+#define CVP_CPU_CS_VMIMSGAG0		(CVP_CPU_BASE_OFFS + 0x38)
+#define CVP_CPU_CS_VMIMSGAG1		(CVP_CPU_BASE_OFFS + 0x3C)
+#define CVP_CPU_CS_VMIMSGAG2		(CVP_CPU_BASE_OFFS + 0x40)
+#define CVP_CPU_CS_VMIMSGAG3		(CVP_CPU_BASE_OFFS + 0x44)
+#define CVP_CPU_CS_SCIACMD			(CVP_CPU_BASE_OFFS + 0x48)
+#define CVP_CPU_CS_H2XSOFTINTEN		(CVP_CPU_BASE_OFFS + 0x148)
+
+/* CVP_CTRL_STATUS */
+#define CVP_CPU_CS_SCIACMDARG0		(CVP_CPU_BASE_OFFS + 0x4C)
+#define CVP_CPU_CS_SCIACMDARG0_BMSK	0xff
+#define CVP_CPU_CS_SCIACMDARG0_SHFT	0x0
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK	0xfe
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_SHFT	0x1
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_BMSK	0x1
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_SHFT	0x0
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY           0x100
+
+/* HFI_QTBL_INFO */
+#define CVP_CPU_CS_SCIACMDARG1		(CVP_CPU_BASE_OFFS + 0x50)
+
+/* HFI_QTBL_ADDR */
+#define CVP_CPU_CS_SCIACMDARG2		(CVP_CPU_BASE_OFFS + 0x54)
+
+/* HFI_VERSION_INFO */
+#define CVP_CPU_CS_SCIACMDARG3		(CVP_CPU_BASE_OFFS + 0x58)
+
+/* CVP_SFR_ADDR */
+#define CVP_CPU_CS_SCIBCMD		(CVP_CPU_BASE_OFFS + 0x5C)
+
+/* CVP_MMAP_ADDR */
+#define CVP_CPU_CS_SCIBCMDARG0		(CVP_CPU_BASE_OFFS + 0x60)
+
+/* CVP_UC_REGION_ADDR */
+#define CVP_CPU_CS_SCIBARG1		(CVP_CPU_BASE_OFFS + 0x64)
+
+/* CVP_UC_REGION_ADDR */
+#define CVP_CPU_CS_SCIBARG2		(CVP_CPU_BASE_OFFS + 0x68)
+
+#define CVP_CPU_CS_SCIBARG3		(CVP_CPU_BASE_OFFS + 0x6C)
+
+#define CVP_CPU_CS_H2ASOFTINTEN		(CVP_CPU_BASE_OFFS + 0x148)
+#define CVP_CPU_CS_H2ASOFTINTENCLR	(CVP_CPU_BASE_OFFS + 0x14c)
+#define CVP_CPU_CS_H2ASOFTINT		(CVP_CPU_BASE_OFFS + 0x150)
+#define CVP_CPU_CS_H2ASOFTINTCLR	(CVP_CPU_BASE_OFFS + 0x154)
+
+/* FAL10 Feature Control */
+#define CVP_CPU_CS_X2RPMh		(CVP_CPU_BASE_OFFS + 0x168)
+#define CVP_CPU_CS_X2RPMh_MASK0_BMSK	0x1
+#define CVP_CPU_CS_X2RPMh_MASK0_SHFT	0x0
+#define CVP_CPU_CS_X2RPMh_MASK1_BMSK	0x2
+#define CVP_CPU_CS_X2RPMh_MASK1_SHFT	0x1
+#define CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK	0x4
+#define CVP_CPU_CS_X2RPMh_SWOVERRIDE_SHFT	0x3
+
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: cvp_wrapper
+ * --------------------------------------------------------------------------
+ */
+#define CVP_WRAPPER_BASE_OFFS		0x000B0000
+
+#define CVP_WRAPPER_HW_VERSION		(CVP_WRAPPER_BASE_OFFS + 0x00)
+#define CVP_WRAPPER_HW_VERSION_MAJOR_VERSION_MASK  0x78000000
+#define CVP_WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28
+#define CVP_WRAPPER_HW_VERSION_MINOR_VERSION_MASK  0xFFF0000
+#define CVP_WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16
+#define CVP_WRAPPER_HW_VERSION_STEP_VERSION_MASK   0xFFFF
+
+#define CVP_WRAPPER_INTR_STATUS	(CVP_WRAPPER_BASE_OFFS + 0x0C)
+#define CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK	0x8
+#define CVP_WRAPPER_INTR_STATUS_A2H_BMSK	0x4
+
+#define CVP_WRAPPER_INTR_MASK		(CVP_WRAPPER_BASE_OFFS + 0x10)
+#define CVP_FATAL_INTR_BMSK	(CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK | \
+				CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK | \
+				CVP_WRAPPER_INTR_MASK_A2HWD_BMSK)
+#define CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK	0x40
+#define CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK	0x20
+#define CVP_WRAPPER_INTR_MASK_A2HWD_BMSK	0x8
+#define CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK	0x4
+#define CVP_WRAPPER_INTR_MASK_A2HCPU_SHFT	0x2
+
+#define CVP_WRAPPER_INTR_CLEAR		(CVP_WRAPPER_BASE_OFFS + 0x14)
+#define CVP_WRAPPER_TZ_BASE_OFFS		0x000C0000
+#define CVP_WRAPPER_INTR_CLEAR_A2HWD_BMSK	0x10
+#define CVP_WRAPPER_INTR_CLEAR_A2HWD_SHFT	0x4
+#define CVP_WRAPPER_INTR_CLEAR_A2H_BMSK	0x4
+#define CVP_WRAPPER_INTR_CLEAR_A2H_SHFT	0x2
+#define CVP_WRAPPER_CPU_STATUS		(CVP_WRAPPER_TZ_BASE_OFFS + 0x10)
+#define CVP_WRAPPER_CPU_CGC_DIS	(CVP_WRAPPER_BASE_OFFS + 0x2010)
+
+#define CVP_WRAPPER_CPU_CLOCK_CONFIG	(CVP_WRAPPER_BASE_OFFS + 0x50)
+#define CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL	(CVP_WRAPPER_BASE_OFFS + 0x54)
+#define CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS	(CVP_WRAPPER_BASE_OFFS + 0x58)
+#define CVP_WRAPPER_CORE_CLOCK_CONFIG		(CVP_WRAPPER_BASE_OFFS + 0x88)
+
+#define CVP_CTRL_INIT		CVP_CPU_CS_SCIACMD
+
+#define CVP_CTRL_STATUS	CVP_CPU_CS_SCIACMDARG0
+#define CVP_CTRL_INIT_STATUS__M \
+		CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_BMSK
+#define CVP_CTRL_ERROR_STATUS__M \
+		CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK
+#define CVP_CTRL_INIT_IDLE_MSG_BMSK \
+		CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK
+#define CVP_CTRL_STATUS_PC_READY \
+		CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY
+
+
+#define CVP_QTBL_INFO		CVP_CPU_CS_SCIACMDARG1
+
+#define CVP_QTBL_ADDR		CVP_CPU_CS_SCIACMDARG2
+
+#define CVP_VERSION_INFO	CVP_CPU_CS_SCIACMDARG3
+
+#define CVP_SFR_ADDR		CVP_CPU_CS_SCIBCMD
+#define CVP_MMAP_ADDR		CVP_CPU_CS_SCIBCMDARG0
+#define CVP_UC_REGION_ADDR	CVP_CPU_CS_SCIBARG1
+#define CVP_UC_REGION_SIZE	CVP_CPU_CS_SCIBARG2
+
+/* HFI_DSP_QTBL_ADDR
+ * 31:3 - HFI_DSP_QTBL_ADDR
+ *        4-byte aligned Address
+ */
+#define HFI_DSP_QTBL_ADDR	CVP_CPU_CS_VMIMSG
+
+/* HFI_DSP_UC_REGION_ADDR
+ * 31:20 - HFI_DSP_UC_REGION_ADDR
+ *         1MB aligned address.
+ *         Uncached Region start Address. This region covers
+ *         HFI DSP QTable,
+ *         HFI DSP Queue Headers,
+ *         HFI DSP Queues,
+ */
+#define HFI_DSP_UC_REGION_ADDR	CVP_CPU_CS_VMIMSGAG0
+
+/* HFI_DSP_UC_REGION_SIZE
+ * 31:20 - HFI_DSP_UC_REGION_SIZE
+ *         Multiples of 1MB.
+ *         Size of the DSP_UC_REGION Uncached Region
+ */
+#define HFI_DSP_UC_REGION_SIZE	CVP_CPU_CS_VMIMSGAG1
+
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: vcodec noc error log registers
+ * --------------------------------------------------------------------------
+ */
+#define CVP_NOC_BASE_OFFS		0x000D0000
+#define CVP_NOC_ERR_SWID_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x80)
+#define CVP_NOC_ERR_SWID_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0x84)
+#define CVP_NOC_ERR_MAINCTL_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x88)
+#define CVP_NOC_ERR_ERRVLD_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x90)
+#define CVP_NOC_ERR_ERRCLR_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x98)
+#define CVP_NOC_ERR_ERRLOG0_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0xA0)
+#define CVP_NOC_ERR_ERRLOG0_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xA4)
+#define CVP_NOC_ERR_ERRLOG1_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0xA8)
+#define CVP_NOC_ERR_ERRLOG1_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xAC)
+#define CVP_NOC_ERR_ERRLOG2_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0xB0)
+#define CVP_NOC_ERR_ERRLOG2_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xB4)
+#define CVP_NOC_ERR_ERRLOG3_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0xB8)
+#define CVP_NOC_ERR_ERRLOG3_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xBC)
+#define CVP_NOC_SBM_SENSELN0_LOW	(CVP_NOC_BASE_OFFS + 0x300)
+
+#define CVP_NOC_CORE_BASE_OFFS			0x00010000
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1100)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN1_HIGH \
+		(CVP_NOC_CORE_BASE_OFFS + 0x110C)
+#define CVP_NOC_CORE_ERR_SWID_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1200)
+#define CVP_NOC_CORE_ERR_SWID_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1204)
+#define CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1208)
+#define CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1210)
+#define CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1218)
+#define CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1220)
+#define CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1224)
+#define CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1228)
+#define CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x122C)
+#define CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1230)
+#define CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1234)
+#define CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1238)
+#define CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x123C)
+
+
+#define CVP_AON_WRAPPER_MVP_NOC_LPI_CONTROL	(CVP_AON_BASE_OFFS)
+#define CVP_AON_WRAPPER_MVP_NOC_LPI_STATUS	(CVP_AON_BASE_OFFS + 0x4)
+
+#define CVP_CC_BASE_OFFS			0x000F0000
+#define CVP_CC_MVS0C_GDSCR			(CVP_CC_BASE_OFFS + 0xBF8)
+#define CVP_CC_MVS1C_GDSCR			(CVP_CC_BASE_OFFS + 0xC98)
+#define CVP_CC_MVS1C_CBCR			(CVP_CC_BASE_OFFS + 0xCD4)
+#endif

+ 18 - 0
msm/eva/cvp_private.h

@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_V4L2_PRIVATE_H_
+#define _MSM_V4L2_PRIVATE_H_
+
+#include <media/msm_cvp_private.h>
+#include "msm_cvp_debug.h"
+
+long cvp_unblocked_ioctl(struct file *filp,
+		unsigned int cmd, unsigned long arg);
+
+long cvp_compat_ioctl(struct file *filp,
+		unsigned int cmd, unsigned long arg);
+
+#endif

+ 462 - 0
msm/eva/hfi_packetization.c

@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "hfi_packetization.h"
+#include "msm_cvp_debug.h"
+
+/* Set up look-up tables to convert HAL_* to HFI_*.
+ *
+ * The tables below mostly take advantage of the fact that most
+ * HAL_* types are defined bitwise. So if we index them normally
+ * when declaring the tables, we end up with huge arrays with wasted
+ * space.  So before indexing them, we apply log2 to use a more
+ * sensible index.
+ */
+
+int cvp_create_pkt_cmd_sys_init(struct cvp_hfi_cmd_sys_init_packet *pkt,
+			   u32 arch_type)
+{
+	int rc = 0;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->packet_type = HFI_CMD_SYS_INIT;
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_init_packet);
+	pkt->arch_type = arch_type;
+	return rc;
+}
+
+int cvp_create_pkt_cmd_sys_pc_prep(struct cvp_hfi_cmd_sys_pc_prep_packet *pkt)
+{
+	int rc = 0;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->packet_type = HFI_CMD_SYS_PC_PREP;
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_pc_prep_packet);
+	return rc;
+}
+
+int cvp_create_pkt_cmd_sys_debug_config(
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+	u32 mode)
+{
+	struct cvp_hfi_debug_config *hfi;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(struct cvp_hfi_debug_config) + sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
+	hfi = (struct cvp_hfi_debug_config *) &pkt->rg_property_data[1];
+	hfi->debug_config = mode;
+	hfi->debug_mode = HFI_DEBUG_MODE_QUEUE;
+	if (msm_cvp_fw_debug_mode
+			<= (HFI_DEBUG_MODE_QUEUE | HFI_DEBUG_MODE_QDSS))
+		hfi->debug_mode = msm_cvp_fw_debug_mode;
+	return 0;
+}
+
+int cvp_create_pkt_cmd_sys_coverage_config(
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+	u32 mode)
+{
+	if (!pkt) {
+		dprintk(CVP_ERR, "In %s(), No input packet\n", __func__);
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE;
+	pkt->rg_property_data[1] = mode;
+	dprintk(CVP_PKT, "Firmware coverage mode %d\n",
+			pkt->rg_property_data[1]);
+	return 0;
+}
+
+int cvp_create_pkt_cmd_sys_set_idle_indicator(
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+	u32 mode)
+{
+	if (!pkt) {
+		dprintk(CVP_ERR, "In %s(), No input packet\n", __func__);
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR;
+	pkt->rg_property_data[1] = mode;
+	dprintk(CVP_PKT, "Firmware idle indicator mode %d\n",
+			pkt->rg_property_data[1]);
+	return 0;
+}
+
+int cvp_create_pkt_cmd_sys_set_resource(
+		struct cvp_hfi_cmd_sys_set_resource_packet *pkt,
+		struct cvp_resource_hdr *res_hdr,
+		void *res_value)
+{
+	int rc = 0;
+	u32 i = 0;
+
+	if (!pkt || !res_hdr || !res_value) {
+		dprintk(CVP_ERR,
+			"Invalid paramas pkt %pK res_hdr %pK res_value %pK\n",
+				pkt, res_hdr, res_value);
+		return -EINVAL;
+	}
+
+	pkt->packet_type = HFI_CMD_SYS_SET_RESOURCE;
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_resource_packet);
+	pkt->resource_handle = hash32_ptr(res_hdr->resource_handle);
+
+	switch (res_hdr->resource_id) {
+	case CVP_RESOURCE_SYSCACHE:
+	{
+		struct cvp_hfi_resource_syscache_info_type *res_sc_info =
+			(struct cvp_hfi_resource_syscache_info_type *)res_value;
+		struct cvp_hfi_resource_subcache_type *res_sc =
+			(struct cvp_hfi_resource_subcache_type *)
+				&(res_sc_info->rg_subcache_entries[0]);
+
+		struct cvp_hfi_resource_syscache_info_type *hfi_sc_info =
+			(struct cvp_hfi_resource_syscache_info_type *)
+				&pkt->rg_resource_data[0];
+
+		struct cvp_hfi_resource_subcache_type *hfi_sc =
+			(struct cvp_hfi_resource_subcache_type *)
+			&(hfi_sc_info->rg_subcache_entries[0]);
+
+		pkt->resource_type = HFI_RESOURCE_SYSCACHE;
+		hfi_sc_info->num_entries = res_sc_info->num_entries;
+
+		pkt->size += (sizeof(struct cvp_hfi_resource_subcache_type))
+				 * hfi_sc_info->num_entries;
+
+		for (i = 0; i < hfi_sc_info->num_entries; i++) {
+			hfi_sc[i] = res_sc[i];
+		dprintk(CVP_PKT, "entry hfi#%d, sc_id %d, size %d\n",
+				 i, hfi_sc[i].sc_id, hfi_sc[i].size);
+		}
+		break;
+	}
+	default:
+		dprintk(CVP_ERR,
+			"Invalid resource_id %d\n", res_hdr->resource_id);
+		rc = -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+int cvp_create_pkt_cmd_sys_release_resource(
+		struct cvp_hfi_cmd_sys_release_resource_packet *pkt,
+		struct cvp_resource_hdr *res_hdr)
+{
+	int rc = 0;
+
+	if (!pkt || !res_hdr) {
+		dprintk(CVP_ERR,
+			"Invalid paramas pkt %pK res_hdr %pK\n",
+				pkt, res_hdr);
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_release_resource_packet);
+	pkt->packet_type = HFI_CMD_SYS_RELEASE_RESOURCE;
+	pkt->resource_handle = hash32_ptr(res_hdr->resource_handle);
+
+	switch (res_hdr->resource_id) {
+	case CVP_RESOURCE_SYSCACHE:
+		pkt->resource_type = HFI_RESOURCE_SYSCACHE;
+		break;
+	default:
+		dprintk(CVP_ERR,
+			 "Invalid resource_id %d\n", res_hdr->resource_id);
+		rc = -ENOTSUPP;
+	}
+
+	dprintk(CVP_PKT,
+		"rel_res: pkt_type 0x%x res_type 0x%x prepared\n",
+		pkt->packet_type, pkt->resource_type);
+
+	return rc;
+}
+
+inline int cvp_create_pkt_cmd_sys_session_init(
+		struct cvp_hfi_cmd_sys_session_init_packet *pkt,
+		struct cvp_hal_session *session)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst = session->session_id;
+
+	if (!pkt || !inst)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_session_init_packet);
+	pkt->packet_type = HFI_CMD_SYS_SESSION_INIT;
+	pkt->session_id = hash32_ptr(session);
+	pkt->session_type = inst->prop.type;
+	pkt->session_kmask = inst->prop.kernel_mask;
+	pkt->session_prio = inst->prop.priority;
+	pkt->is_secure = inst->prop.is_secure;
+	pkt->dsp_ac_mask = inst->prop.dsp_mask;
+
+	return rc;
+}
+
+static int create_pkt_cmd_sys_ubwc_config(
+		struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+		struct msm_cvp_ubwc_config_data *ubwc_config)
+{
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type *hfi;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type)
+		+ sizeof(u32);
+
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_UBWC_CONFIG;
+	hfi = (struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type *)
+		&pkt->rg_property_data[1];
+
+	hfi->max_channels = ubwc_config->max_channels;
+	hfi->override_bit_info.max_channel_override =
+		ubwc_config->override_bit_info.max_channel_override;
+
+	hfi->mal_length = ubwc_config->mal_length;
+	hfi->override_bit_info.mal_length_override =
+		ubwc_config->override_bit_info.mal_length_override;
+
+	hfi->highest_bank_bit = ubwc_config->highest_bank_bit;
+	hfi->override_bit_info.hb_override =
+		ubwc_config->override_bit_info.hb_override;
+
+	hfi->bank_swzl_level = ubwc_config->bank_swzl_level;
+	hfi->override_bit_info.bank_swzl_level_override =
+		ubwc_config->override_bit_info.bank_swzl_level_override;
+
+	hfi->bank_spreading = ubwc_config->bank_spreading;
+	hfi->override_bit_info.bank_spreading_override =
+		ubwc_config->override_bit_info.bank_spreading_override;
+	hfi->size = sizeof(struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type);
+
+	return rc;
+}
+
+int cvp_create_pkt_cmd_session_cmd(struct cvp_hal_session_cmd_pkt *pkt,
+			int pkt_type, struct cvp_hal_session *session)
+{
+	int rc = 0;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct cvp_hal_session_cmd_pkt);
+	pkt->packet_type = pkt_type;
+	pkt->session_id = hash32_ptr(session);
+
+	return rc;
+}
+
+int cvp_create_pkt_cmd_sys_power_control(
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt, u32 enable)
+{
+	struct cvp_hfi_enable *hfi;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "No input packet\n");
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(struct cvp_hfi_enable) + sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
+	hfi = (struct cvp_hfi_enable *) &pkt->rg_property_data[1];
+	hfi->enable = enable;
+	return 0;
+}
+
+int cvp_create_pkt_cmd_session_set_buffers(
+		void *cmd,
+		struct cvp_hal_session *session,
+		u32 iova,
+		u32 size)
+{
+	int rc = 0;
+	struct cvp_hfi_cmd_session_set_buffers_packet *pkt;
+
+	if (!cmd || !session)
+		return -EINVAL;
+
+	pkt = (struct cvp_hfi_cmd_session_set_buffers_packet *)cmd;
+	pkt->packet_type = HFI_CMD_SESSION_CVP_SET_BUFFERS;
+	pkt->session_id = hash32_ptr(session);
+	pkt->buf_type.iova = iova;
+	pkt->buf_type.size = size;
+	pkt->size = sizeof(struct cvp_hfi_cmd_session_set_buffers_packet);
+
+	return rc;
+}
+
+int cvp_create_pkt_cmd_session_release_buffers(
+		void *cmd,
+		struct cvp_hal_session *session)
+{
+	struct cvp_session_release_buffers_packet *pkt;
+
+	if (!cmd || !session)
+		return -EINVAL;
+
+	pkt = (struct cvp_session_release_buffers_packet *)cmd;
+	pkt->packet_type = HFI_CMD_SESSION_CVP_RELEASE_BUFFERS;
+	pkt->session_id = hash32_ptr(session);
+	pkt->num_buffers = 1;
+	pkt->buffer_type = 0;
+	pkt->size = sizeof(struct cvp_session_release_buffers_packet) +
+			((pkt->num_buffers - 1) * sizeof(u32));
+
+	return 0;
+}
+
+int cvp_create_pkt_cmd_session_send(
+		struct cvp_kmd_hfi_packet *out_pkt,
+		struct cvp_hal_session *session,
+		struct cvp_kmd_hfi_packet *in_pkt)
+{
+	int def_idx;
+	struct cvp_hal_session_cmd_pkt *ptr =
+		(struct cvp_hal_session_cmd_pkt *)in_pkt;
+
+	if (!out_pkt || !in_pkt || !session)
+		return -EINVAL;
+
+	if (ptr->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int))
+		goto error_hfi_packet;
+
+	if (ptr->session_id != hash32_ptr(session))
+		goto error_hfi_packet;
+
+	def_idx = get_pkt_index(ptr);
+	if (def_idx < 0) {
+		memcpy(out_pkt, in_pkt, ptr->size);
+		return 0;
+	}
+
+	if (cvp_hfi_defs[def_idx].type != ptr->packet_type)
+		goto error_hfi_packet;
+
+	memcpy(out_pkt, in_pkt, ptr->size);
+
+	return 0;
+
+error_hfi_packet:
+	dprintk(CVP_ERR, "%s incorrect packet: size=%d type=%d sessionid=%d\n",
+		__func__, ptr->size, ptr->packet_type, ptr->session_id);
+
+	return -EINVAL;
+}
+
+static int get_hfi_ssr_type(enum hal_ssr_trigger_type type)
+{
+	int rc = HFI_TEST_SSR_HW_WDOG_IRQ;
+
+	switch (type) {
+	case SSR_ERR_FATAL:
+		rc = HFI_TEST_SSR_SW_ERR_FATAL;
+		break;
+	case SSR_SW_DIV_BY_ZERO:
+		rc = HFI_TEST_SSR_SW_DIV_BY_ZERO;
+		break;
+	case SSR_HW_WDOG_IRQ:
+		rc = HFI_TEST_SSR_HW_WDOG_IRQ;
+		break;
+	default:
+		dprintk(CVP_WARN,
+			"SSR trigger type not recognized, using WDOG.\n");
+	}
+	return rc;
+}
+
+int cvp_create_pkt_ssr_cmd(enum hal_ssr_trigger_type type,
+		struct cvp_hfi_cmd_sys_test_ssr_packet *pkt)
+{
+	if (!pkt) {
+		dprintk(CVP_ERR, "Invalid params, device: %pK\n", pkt);
+		return -EINVAL;
+	}
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_test_ssr_packet);
+	pkt->packet_type = HFI_CMD_SYS_TEST_SSR;
+	pkt->trigger_type = get_hfi_ssr_type(type);
+	return 0;
+}
+
+int cvp_create_pkt_cmd_sys_image_version(
+		struct cvp_hfi_cmd_sys_get_property_packet *pkt)
+{
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s invalid param :%pK\n", __func__, pkt);
+		return -EINVAL;
+	}
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_get_property_packet);
+	pkt->packet_type = HFI_CMD_SYS_GET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
+	return 0;
+}
+
+static struct cvp_hfi_packetization_ops hfi_default = {
+	.sys_init = cvp_create_pkt_cmd_sys_init,
+	.sys_pc_prep = cvp_create_pkt_cmd_sys_pc_prep,
+	.sys_power_control = cvp_create_pkt_cmd_sys_power_control,
+	.sys_set_resource = cvp_create_pkt_cmd_sys_set_resource,
+	.sys_debug_config = cvp_create_pkt_cmd_sys_debug_config,
+	.sys_coverage_config = cvp_create_pkt_cmd_sys_coverage_config,
+	.sys_set_idle_indicator = cvp_create_pkt_cmd_sys_set_idle_indicator,
+	.sys_release_resource = cvp_create_pkt_cmd_sys_release_resource,
+	.sys_image_version = cvp_create_pkt_cmd_sys_image_version,
+	.sys_ubwc_config = create_pkt_cmd_sys_ubwc_config,
+	.ssr_cmd = cvp_create_pkt_ssr_cmd,
+	.session_init = cvp_create_pkt_cmd_sys_session_init,
+	.session_cmd = cvp_create_pkt_cmd_session_cmd,
+	.session_set_buffers =
+		cvp_create_pkt_cmd_session_set_buffers,
+	.session_release_buffers =
+		cvp_create_pkt_cmd_session_release_buffers,
+	.session_send = cvp_create_pkt_cmd_session_send,
+};
+
+struct cvp_hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
+			enum hfi_packetization_type type)
+{
+	dprintk(CVP_HFI, "%s selected\n",
+		type == HFI_PACKETIZATION_4XX ?
+		"4xx packetization" : "Unknown hfi");
+
+	switch (type) {
+	case HFI_PACKETIZATION_4XX:
+		return &hfi_default;
+	}
+
+	return NULL;
+}

+ 75 - 0
msm/eva/hfi_packetization.h

@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+#ifndef __HFI_PACKETIZATION__
+#define __HFI_PACKETIZATION__
+
+#include <linux/types.h>
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi.h"
+#include "cvp_hfi_api.h"
+
+#define call_hfi_pkt_op(q, op, ...)			\
+	(((q) && (q)->pkt_ops && (q)->pkt_ops->op) ?	\
+	((q)->pkt_ops->op(__VA_ARGS__)) : 0)
+
+enum hfi_packetization_type {
+	HFI_PACKETIZATION_4XX,
+};
+
+struct cvp_hfi_packetization_ops {
+	int (*sys_init)(struct cvp_hfi_cmd_sys_init_packet *pkt, u32 arch_type);
+	int (*sys_pc_prep)(struct cvp_hfi_cmd_sys_pc_prep_packet *pkt);
+	int (*sys_power_control)(
+		struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+		u32 enable);
+	int (*sys_set_resource)(
+		struct cvp_hfi_cmd_sys_set_resource_packet *pkt,
+		struct cvp_resource_hdr *resource_hdr,
+		void *resource_value);
+	int (*sys_debug_config)(struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+			u32 mode);
+	int (*sys_coverage_config)(
+			struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+			u32 mode);
+	int (*sys_set_idle_indicator)(
+		struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+		u32 mode);
+	int (*sys_release_resource)(
+		struct cvp_hfi_cmd_sys_release_resource_packet *pkt,
+		struct cvp_resource_hdr *resource_hdr);
+	int (*sys_image_version)(
+			struct cvp_hfi_cmd_sys_get_property_packet *pkt);
+	int (*sys_ubwc_config)(struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+		struct msm_cvp_ubwc_config_data *ubwc_config);
+	int (*ssr_cmd)(enum hal_ssr_trigger_type type,
+		struct cvp_hfi_cmd_sys_test_ssr_packet *pkt);
+	int (*session_init)(
+		struct cvp_hfi_cmd_sys_session_init_packet *pkt,
+		struct cvp_hal_session *session);
+	int (*session_cmd)(struct cvp_hal_session_cmd_pkt *pkt,
+		int pkt_type, struct cvp_hal_session *session);
+	int (*session_set_buffers)(
+		void *pkt,
+		struct cvp_hal_session *session,
+		u32 iova,
+		u32 size);
+	int (*session_release_buffers)(
+		void *pkt,
+		struct cvp_hal_session *session);
+	int (*session_get_buf_req)(
+		struct cvp_hfi_cmd_session_get_property_packet *pkt,
+		struct cvp_hal_session *session);
+	int (*session_sync_process)(
+		struct cvp_hfi_cmd_session_sync_process_packet *pkt,
+		struct cvp_hal_session *session);
+	int (*session_send)(
+			struct cvp_kmd_hfi_packet *out_pkt,
+			struct cvp_hal_session *session,
+			struct cvp_kmd_hfi_packet *in_pkt);
+};
+
+struct cvp_hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
+		enum hfi_packetization_type);
+#endif

+ 676 - 0
msm/eva/hfi_response_handler.c

@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/hash.h>
+#include <linux/soc/qcom/smem.h>
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_io.h"
+#include "msm_cvp_debug.h"
+#include "cvp_hfi.h"
+#include "msm_cvp_common.h"
+
+extern struct msm_cvp_drv *cvp_driver;
+
+static enum cvp_status hfi_map_err_status(u32 hfi_err)
+{
+	enum cvp_status cvp_err;
+
+	switch (hfi_err) {
+	case HFI_ERR_NONE:
+		cvp_err = CVP_ERR_NONE;
+		break;
+	case HFI_ERR_SYS_FATAL:
+		cvp_err = CVP_ERR_HW_FATAL;
+		break;
+	case HFI_ERR_SYS_NOC_ERROR:
+		cvp_err = CVP_ERR_NOC_ERROR;
+		break;
+	case HFI_ERR_SYS_VERSION_MISMATCH:
+	case HFI_ERR_SYS_INVALID_PARAMETER:
+	case HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE:
+	case HFI_ERR_SESSION_INVALID_PARAMETER:
+	case HFI_ERR_SESSION_INVALID_SESSION_ID:
+	case HFI_ERR_SESSION_INVALID_STREAM_ID:
+		cvp_err = CVP_ERR_BAD_PARAM;
+		break;
+	case HFI_ERR_SYS_INSUFFICIENT_RESOURCES:
+	case HFI_ERR_SYS_UNSUPPORTED_DOMAIN:
+	case HFI_ERR_SYS_UNSUPPORTED_CODEC:
+	case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY:
+	case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+	case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES:
+	case HFI_ERR_SESSION_UNSUPPORTED_STREAM:
+		cvp_err = CVP_ERR_NOT_SUPPORTED;
+		break;
+	case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
+		cvp_err = CVP_ERR_MAX_CLIENTS;
+		break;
+	case HFI_ERR_SYS_SESSION_IN_USE:
+		cvp_err = CVP_ERR_CLIENT_PRESENT;
+		break;
+	case HFI_ERR_SESSION_FATAL:
+		cvp_err = CVP_ERR_CLIENT_FATAL;
+		break;
+	case HFI_ERR_SESSION_BAD_POINTER:
+		cvp_err = CVP_ERR_BAD_PARAM;
+		break;
+	case HFI_ERR_SESSION_INCORRECT_STATE_OPERATION:
+		cvp_err = CVP_ERR_BAD_STATE;
+		break;
+	default:
+		cvp_err = CVP_ERR_FAIL;
+		break;
+	}
+	return cvp_err;
+}
+
+static int hfi_process_sys_error(u32 device_id,
+	struct cvp_hfi_msg_event_notify_packet *pkt,
+	struct msm_cvp_cb_info *info)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	cmd_done.device_id = device_id;
+	cmd_done.status = hfi_map_err_status(pkt->event_data1);
+
+	info->response_type = HAL_SYS_ERROR;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_error(u32 device_id,
+		struct cvp_hfi_msg_event_notify_packet *pkt,
+		struct msm_cvp_cb_info *info)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->event_data1);
+	info->response.cmd = cmd_done;
+	dprintk(CVP_INFO, "Received: SESSION_ERROR with event id : %#x %#x\n",
+		pkt->event_data1, pkt->event_data2);
+	switch (pkt->event_data1) {
+	/* Ignore below errors */
+	case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
+	case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
+		dprintk(CVP_INFO, "Non Fatal: HFI_EVENT_SESSION_ERROR\n");
+		info->response_type = HAL_RESPONSE_UNUSED;
+		break;
+	default:
+		dprintk(CVP_ERR,
+			"%s: session %x data1 %#x, data2 %#x\n", __func__,
+			pkt->session_id, pkt->event_data1, pkt->event_data2);
+		info->response_type = HAL_RESPONSE_UNUSED;
+		break;
+	}
+
+	return 0;
+}
+
+static int hfi_process_event_notify(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_event_notify_packet *pkt =
+			(struct cvp_hfi_msg_event_notify_packet *)hdr;
+
+	dprintk(CVP_HFI, "Received: EVENT_NOTIFY\n");
+
+	if (pkt->size < sizeof(struct cvp_hfi_msg_event_notify_packet)) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -E2BIG;
+	}
+
+	switch (pkt->event_id) {
+	case HFI_EVENT_SYS_ERROR:
+		dprintk(CVP_ERR, "HFI_EVENT_SYS_ERROR: %d, %#x\n",
+			pkt->event_data1, pkt->event_data2);
+		return hfi_process_sys_error(device_id, pkt, info);
+
+	case HFI_EVENT_SESSION_ERROR:
+		dprintk(CVP_INFO, "HFI_EVENT_SESSION_ERROR[%#x]\n",
+				pkt->session_id);
+		return hfi_process_session_error(device_id, pkt, info);
+
+	default:
+		*info = (struct msm_cvp_cb_info) {
+			.response_type =  HAL_RESPONSE_UNUSED,
+		};
+
+		return 0;
+	}
+}
+
+static int hfi_process_sys_init_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_init_done_packet *pkt =
+			(struct cvp_hfi_msg_sys_init_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	enum cvp_status status = CVP_ERR_NONE;
+
+	dprintk(CVP_CORE, "RECEIVED: SYS_INIT_DONE\n");
+	if (sizeof(struct cvp_hfi_msg_sys_init_done_packet) > pkt->size) {
+		dprintk(CVP_ERR, "%s: bad_pkt_size: %d\n", __func__,
+				pkt->size);
+		return -E2BIG;
+	}
+	if (!pkt->num_properties) {
+		dprintk(CVP_CORE,
+				"hal_process_sys_init_done: no_properties\n");
+		goto err_no_prop;
+	}
+
+	status = hfi_map_err_status(pkt->error_type);
+	if (status) {
+		dprintk(CVP_ERR, "%s: status %#x\n",
+			__func__, status);
+		goto err_no_prop;
+	}
+
+err_no_prop:
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = NULL;
+	cmd_done.status = (u32)status;
+	cmd_done.size = sizeof(struct cvp_hal_sys_init_done);
+
+	info->response_type = HAL_SYS_INIT_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+enum cvp_status cvp_hfi_process_sys_init_done_prop_read(
+	struct cvp_hfi_msg_sys_init_done_packet *pkt,
+	struct cvp_hal_sys_init_done *sys_init_done)
+{
+	enum cvp_status status = CVP_ERR_NONE;
+	u32 rem_bytes, num_properties;
+	u8 *data_ptr;
+
+	if (!pkt || !sys_init_done) {
+		dprintk(CVP_ERR,
+			"hfi_msg_sys_init_done: Invalid input\n");
+		return CVP_ERR_FAIL;
+	}
+
+	rem_bytes = pkt->size - sizeof(struct
+			cvp_hfi_msg_sys_init_done_packet) + sizeof(u32);
+
+	if (!rem_bytes) {
+		dprintk(CVP_ERR,
+			"hfi_msg_sys_init_done: missing_prop_info\n");
+		return CVP_ERR_FAIL;
+	}
+
+	status = hfi_map_err_status(pkt->error_type);
+	if (status) {
+		dprintk(CVP_ERR, "%s: status %#x\n", __func__, status);
+		return status;
+	}
+
+	data_ptr = (u8 *) &pkt->rg_property_data[0];
+	num_properties = pkt->num_properties;
+	dprintk(CVP_HFI,
+		"%s: data_start %pK, num_properties %#x\n",
+		__func__, data_ptr, num_properties);
+
+	sys_init_done->capabilities = NULL;
+	return status;
+}
+
+static int hfi_process_session_init_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_init_done_packet *pkt =
+			(struct cvp_hfi_msg_sys_session_init_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	struct cvp_hal_session_init_done session_init_done = { {0} };
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_INIT_DONE[%x]\n", pkt->session_id);
+
+	if (sizeof(struct cvp_hfi_msg_sys_session_init_done_packet)
+			> pkt->size) {
+		dprintk(CVP_ERR,
+			"hal_process_session_init_done: bad_pkt_size\n");
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.data.session_init_done = session_init_done;
+	cmd_done.size = sizeof(struct cvp_hal_session_init_done);
+
+	info->response_type = HAL_SESSION_INIT_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+static int hfi_process_session_end_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_end_done_packet *pkt =
+			(struct cvp_hfi_msg_sys_session_end_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_END_DONE[%#x]\n", pkt->session_id);
+
+	if (!pkt || pkt->size !=
+		sizeof(struct cvp_hfi_msg_sys_session_end_done_packet)) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size\n", __func__);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_END_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_abort_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_abort_done_packet *pkt =
+		(struct cvp_hfi_msg_sys_session_abort_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_ABORT_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (!pkt || pkt->size !=
+		sizeof(struct cvp_hfi_msg_sys_session_abort_done_packet)) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size: %d\n",
+				__func__, pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_ABORT_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_set_buf_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_session_hdr *pkt =
+			(struct cvp_hfi_msg_session_hdr *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	unsigned int pkt_size = get_msg_size();
+
+	if (!pkt || pkt->size < pkt_size) {
+		dprintk(CVP_ERR, "bad packet/packet size %d\n",
+				pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	dprintk(CVP_SESS, "RECEIVED:CVP_SET_BUFFER_DONE[%#x]\n",
+			pkt->session_id);
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)get_msg_session_id(pkt);
+	cmd_done.status = hfi_map_err_status(get_msg_errorcode(pkt));
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_SET_BUFFER_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_flush_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_flush_done_packet *pkt =
+		(struct cvp_hfi_msg_sys_session_flush_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_FLUSH_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (!pkt || pkt->size <
+		sizeof(struct cvp_hfi_msg_sys_session_flush_done_packet)) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size: %d\n",
+				__func__, pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_FLUSH_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_rel_buf_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_session_hdr *pkt =
+			(struct cvp_hfi_msg_session_hdr *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	unsigned int pkt_size = get_msg_size();
+
+	if (!pkt || pkt->size < pkt_size) {
+		dprintk(CVP_ERR, "bad packet/packet size %d\n",
+				pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	dprintk(CVP_SESS, "RECEIVED:CVP_RELEASE_BUFFER_DONE[%#x]\n",
+			pkt->session_id);
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)get_msg_session_id(pkt);
+	cmd_done.status = hfi_map_err_status(get_msg_errorcode(pkt));
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_RELEASE_BUFFER_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_cvp_operation_config(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_session_op_cfg_packet *pkt =
+		(struct cvp_hfi_msg_session_op_cfg_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	int signal;
+	unsigned int conf_id, session_id, error_type;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size < get_msg_size()) {
+		dprintk(CVP_ERR,
+				"%s: bad_pkt_size\n", __func__);
+		return -E2BIG;
+	}
+
+	get_msg_opconfigs(pkt, &session_id, &error_type, &conf_id);
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)session_id;
+	cmd_done.status = hfi_map_err_status(error_type);
+	cmd_done.size = 0;
+
+	dprintk(CVP_HFI,
+		"%s: device_id=%d status=%d, sessionid=%pK config=%x\n",
+		__func__, device_id, cmd_done.status,
+		cmd_done.session_id, pkt->op_conf_id);
+
+	if (pkt->packet_type == HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS)
+		signal = get_signal_from_pkt_type(
+				HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS);
+	else if (pkt->packet_type ==
+			HFI_MSG_SESSION_CVP_RELEASE_PERSIST_BUFFERS)
+		signal = get_signal_from_pkt_type(
+			HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS);
+	else if (pkt->packet_type == HFI_MSG_SESSION_CVP_SET_MODEL_BUFFERS)
+		signal = get_signal_from_pkt_type(
+				HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS);
+	else
+		signal = get_signal_from_pkt_type(conf_id);
+
+	if (signal < 0) {
+		dprintk(CVP_ERR, "%s Invalid op config id\n", __func__);
+		return -EINVAL;
+	}
+
+	info->response_type = signal;
+	info->response.cmd = cmd_done;
+	return 0;
+}
+
+static struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
+	unsigned int session_id)
+{
+	struct msm_cvp_inst *inst = NULL;
+	bool match = false;
+	int count = 0;
+
+	if (!core || !session_id)
+		return NULL;
+
+retry:
+	if (mutex_trylock(&core->lock)) {
+		list_for_each_entry(inst, &core->instances, list) {
+			if (hash32_ptr(inst->session) == session_id) {
+				match = true;
+				break;
+			}
+		}
+
+		inst = match ? inst : NULL;
+		mutex_unlock(&core->lock);
+	} else {
+		if (core->state == CVP_CORE_UNINIT)
+			return NULL;
+		usleep_range(100, 200);
+		count++;
+		if (count < 1000)
+			goto retry;
+		else
+			dprintk(CVP_ERR, "timeout locking core mutex\n");
+	}
+
+	return inst;
+
+}
+
+static int hfi_process_session_cvp_msg(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_session_hdr *pkt =
+			(struct cvp_hfi_msg_session_hdr *)hdr;
+	struct cvp_session_msg *sess_msg;
+	struct msm_cvp_inst *inst = NULL;
+	struct msm_cvp_core *core;
+	void *session_id;
+	struct cvp_session_queue *sq;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int)) {
+		dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
+		return -E2BIG;
+	}
+	session_id = (void *)(uintptr_t)get_msg_session_id(pkt);
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	inst = cvp_get_inst_from_id(core, (unsigned int)session_id);
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	if (pkt->client_data.kdata & FENCE_BIT)
+		sq = &inst->session_queue_fence;
+	else
+		sq = &inst->session_queue;
+
+	sess_msg = kmem_cache_alloc(cvp_driver->msg_cache, GFP_KERNEL);
+	if (sess_msg == NULL) {
+		dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(&sess_msg->pkt, pkt, get_msg_size());
+
+	dprintk(CVP_HFI,
+		"%s: Received msg %x cmd_done.status=%d sessionid=%x\n",
+		__func__, pkt->packet_type,
+		hfi_map_err_status(get_msg_errorcode(pkt)), session_id);
+
+	spin_lock(&sq->lock);
+	if (sq->msg_count >= MAX_NUM_MSGS_PER_SESSION) {
+		dprintk(CVP_ERR, "Reached session queue size limit\n");
+		goto error_handle_msg;
+	}
+	list_add_tail(&sess_msg->node, &sq->msgs);
+	sq->msg_count++;
+	spin_unlock(&sq->lock);
+
+	wake_up_all(&sq->wq);
+
+	info->response_type = HAL_NO_RESP;
+
+	return 0;
+
+error_handle_msg:
+	spin_unlock(&sq->lock);
+	kmem_cache_free(cvp_driver->msg_cache, sess_msg);
+	return -ENOMEM;
+}
+
+static void hfi_process_sys_get_prop_image_version(
+		struct cvp_hfi_msg_sys_property_info_packet *pkt)
+{
+	int i = 0;
+	const u32 version_string_size = 128;
+	u8 *str_image_version;
+	int req_bytes;
+
+	req_bytes = pkt->size - sizeof(*pkt);
+	if (req_bytes < version_string_size ||
+			!pkt->rg_property_data[1] ||
+			pkt->num_properties > 1) {
+		dprintk(CVP_ERR, "%s: bad_pkt: %d\n", __func__, req_bytes);
+		return;
+	}
+	str_image_version = (u8 *)&pkt->rg_property_data[1];
+	/*
+	 * The version string returned by firmware includes null
+	 * characters at the start and in between. Replace the null
+	 * characters with space, to print the version info.
+	 */
+	for (i = 0; i < version_string_size; i++) {
+		if (str_image_version[i] != '\0')
+			cvp_driver->fw_version[i] = str_image_version[i];
+		else
+			cvp_driver->fw_version[i] = ' ';
+	}
+	cvp_driver->fw_version[i - 1] = '\0';
+	dprintk(CVP_HFI, "F/W version: %s\n", cvp_driver->fw_version);
+}
+
+static int hfi_process_sys_property_info(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_property_info_packet *pkt =
+			(struct cvp_hfi_msg_sys_property_info_packet *)hdr;
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size > sizeof(*pkt)) {
+		dprintk(CVP_ERR,
+				"%s: bad_pkt_size %d\n", __func__, pkt->size);
+		return -E2BIG;
+	} else if (!pkt->num_properties) {
+		dprintk(CVP_WARN,
+				"%s: no_properties\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (pkt->rg_property_data[0]) {
+	case HFI_PROPERTY_SYS_IMAGE_VERSION:
+		hfi_process_sys_get_prop_image_version(pkt);
+
+		*info = (struct msm_cvp_cb_info) {
+			.response_type =  HAL_RESPONSE_UNUSED,
+		};
+		return 0;
+	default:
+		dprintk(CVP_WARN,
+				"%s: unknown_prop_id: %x\n",
+				__func__, pkt->rg_property_data[0]);
+		return -ENOTSUPP;
+	}
+
+}
+
+int cvp_hfi_process_msg_packet(u32 device_id, void *hdr,
+			struct msm_cvp_cb_info *info)
+{
+	typedef int (*pkt_func_def)(u32, void *, struct msm_cvp_cb_info *info);
+	pkt_func_def pkt_func = NULL;
+	struct cvp_hal_msg_pkt_hdr *msg_hdr = (struct cvp_hal_msg_pkt_hdr *)hdr;
+
+	if (!info || !msg_hdr || msg_hdr->size < CVP_IFACEQ_MIN_PKT_SIZE) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_HFI, "Received HFI MSG with type %#x\n", msg_hdr->packet);
+	switch (msg_hdr->packet) {
+	case HFI_MSG_EVENT_NOTIFY:
+		pkt_func = (pkt_func_def)hfi_process_event_notify;
+		break;
+	case  HFI_MSG_SYS_INIT_DONE:
+		pkt_func = (pkt_func_def)hfi_process_sys_init_done;
+		break;
+	case HFI_MSG_SYS_SESSION_INIT_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_init_done;
+		break;
+	case HFI_MSG_SYS_PROPERTY_INFO:
+		pkt_func = (pkt_func_def)hfi_process_sys_property_info;
+		break;
+	case HFI_MSG_SYS_SESSION_END_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_end_done;
+		break;
+	case HFI_MSG_SESSION_CVP_SET_BUFFERS:
+		pkt_func = (pkt_func_def) hfi_process_session_set_buf_done;
+		break;
+	case HFI_MSG_SESSION_CVP_RELEASE_BUFFERS:
+		pkt_func = (pkt_func_def)hfi_process_session_rel_buf_done;
+		break;
+	case HFI_MSG_SYS_SESSION_ABORT_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_abort_done;
+		break;
+	case HFI_MSG_SESSION_CVP_FLUSH:
+	case HFI_MSG_SESSION_CVP_FLUSH_DEPRECATE:
+		pkt_func = (pkt_func_def)hfi_process_session_flush_done;
+		break;
+	case HFI_MSG_SESSION_CVP_OPERATION_CONFIG:
+	case HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS:
+	case HFI_MSG_SESSION_CVP_RELEASE_PERSIST_BUFFERS:
+	case HFI_MSG_SESSION_CVP_SET_MODEL_BUFFERS:
+		pkt_func =
+			(pkt_func_def)hfi_process_session_cvp_operation_config;
+		break;
+	case HFI_MSG_SESSION_CVP_DS:
+	case HFI_MSG_SESSION_CVP_DFS:
+	case HFI_MSG_SESSION_CVP_DME:
+	case HFI_MSG_SESSION_CVP_FD:
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_msg;
+		break;
+	default:
+		dprintk(CVP_HFI, "Use default msg handler: %#x\n",
+				msg_hdr->packet);
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_msg;
+		break;
+	}
+
+	return pkt_func ?
+		pkt_func(device_id, hdr, info) : -ENOTSUPP;
+}

+ 1646 - 0
msm/eva/msm_cvp.c

@@ -0,0 +1,1646 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_cvp.h"
+#include "cvp_hfi.h"
+#include "cvp_core_hfi.h"
+#include "msm_cvp_buf.h"
+
+struct cvp_power_level {
+	unsigned long core_sum;
+	unsigned long op_core_sum;
+	unsigned long bw_sum;
+};
+
+static int msm_cvp_get_session_info(struct msm_cvp_inst *inst,
+		struct cvp_kmd_session_info *session)
+{
+	int rc = 0;
+	struct msm_cvp_inst *s;
+
+	if (!inst || !inst->core || !session) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	s->cur_cmd_type = CVP_KMD_GET_SESSION_INFO;
+	session->session_id = hash32_ptr(inst->session);
+	dprintk(CVP_SESS, "%s: id 0x%x\n", __func__, session->session_id);
+
+	s->cur_cmd_type = 0;
+	cvp_put_inst(s);
+	return rc;
+}
+
+
+
+static bool cvp_msg_pending(struct cvp_session_queue *sq,
+				struct cvp_session_msg **msg, u64 *ktid)
+{
+	struct cvp_session_msg *mptr, *dummy;
+	bool result = false;
+
+	mptr = NULL;
+	spin_lock(&sq->lock);
+	if (sq->state != QUEUE_ACTIVE) {
+		/* The session is being deleted */
+		spin_unlock(&sq->lock);
+		*msg = NULL;
+		return true;
+	}
+	result = list_empty(&sq->msgs);
+	if (!result) {
+		if (!ktid) {
+			mptr =
+			list_first_entry(&sq->msgs, struct cvp_session_msg,
+					node);
+			list_del_init(&mptr->node);
+			sq->msg_count--;
+		} else {
+			result = true;
+			list_for_each_entry_safe(mptr, dummy, &sq->msgs, node) {
+				if (*ktid == mptr->pkt.client_data.kdata) {
+					list_del_init(&mptr->node);
+					sq->msg_count--;
+					result = false;
+					break;
+				}
+			}
+			if (result)
+				mptr = NULL;
+		}
+	}
+	spin_unlock(&sq->lock);
+	*msg = mptr;
+	return !result;
+}
+
+static int cvp_wait_process_message(struct msm_cvp_inst *inst,
+				struct cvp_session_queue *sq, u64 *ktid,
+				unsigned long timeout,
+				struct cvp_kmd_hfi_packet *out)
+{
+	struct cvp_session_msg *msg = NULL;
+	struct cvp_hfi_msg_session_hdr *hdr;
+	int rc = 0;
+
+	if (wait_event_timeout(sq->wq,
+		cvp_msg_pending(sq, &msg, ktid), timeout) == 0) {
+		dprintk(CVP_WARN, "session queue wait timeout\n");
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+	if (msg == NULL) {
+		dprintk(CVP_WARN, "%s: queue state %d, msg cnt %d\n", __func__,
+					sq->state, sq->msg_count);
+
+		if (inst->state >= MSM_CVP_CLOSE_DONE ||
+				sq->state != QUEUE_ACTIVE) {
+			rc = -ECONNRESET;
+			goto exit;
+		}
+
+		msm_cvp_comm_kill_session(inst);
+		goto exit;
+	}
+
+	if (out)
+		memcpy(out, &msg->pkt, sizeof(struct cvp_hfi_msg_session_hdr));
+
+	kmem_cache_free(cvp_driver->msg_cache, msg);
+	hdr = (struct cvp_hfi_msg_session_hdr *)out;
+	msm_cvp_unmap_frame(inst, hdr->client_data.kdata);
+
+exit:
+	return rc;
+}
+
+static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
+			struct cvp_kmd_hfi_packet *out_pkt)
+{
+	unsigned long wait_time;
+	struct cvp_session_queue *sq;
+	struct msm_cvp_inst *s;
+	int rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	s->cur_cmd_type = CVP_KMD_RECEIVE_MSG_PKT;
+	wait_time = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
+	sq = &inst->session_queue;
+
+	rc = cvp_wait_process_message(inst, sq, NULL, wait_time, out_pkt);
+
+	s->cur_cmd_type = 0;
+	cvp_put_inst(inst);
+	return rc;
+}
+
+static int msm_cvp_session_process_hfi(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *in_pkt,
+	unsigned int in_offset,
+	unsigned int in_buf_num)
+{
+	int pkt_idx, pkt_type, rc = 0;
+	struct cvp_hfi_device *hdev;
+	unsigned int offset, buf_num, signal;
+	struct cvp_session_queue *sq;
+	struct msm_cvp_inst *s;
+
+	if (!inst || !inst->core || !in_pkt) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_SEND_CMD_PKT;
+	hdev = inst->core->device;
+
+	pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
+	if (pkt_idx < 0) {
+		dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
+				in_pkt->pkt_data[0],
+				in_pkt->pkt_data[1]);
+		offset = in_offset;
+		buf_num = in_buf_num;
+		signal = HAL_NO_RESP;
+	} else {
+		offset = cvp_hfi_defs[pkt_idx].buf_offset;
+		buf_num = cvp_hfi_defs[pkt_idx].buf_num;
+		signal = cvp_hfi_defs[pkt_idx].resp;
+	}
+	if (signal == HAL_NO_RESP) {
+		/* Frame packets are not allowed before session starts*/
+		sq = &inst->session_queue;
+		spin_lock(&sq->lock);
+		if (sq->state != QUEUE_ACTIVE) {
+			spin_unlock(&sq->lock);
+			dprintk(CVP_ERR, "%s: invalid queue state\n", __func__);
+			rc = -EINVAL;
+			goto exit;
+		}
+		spin_unlock(&sq->lock);
+	}
+
+	if (in_offset && in_buf_num) {
+		offset = in_offset;
+		buf_num = in_buf_num;
+	}
+	if (!is_buf_param_valid(buf_num, offset)) {
+		dprintk(CVP_ERR, "Incorrect buffer num and offset in cmd\n");
+		return -EINVAL;
+	}
+	pkt_type = in_pkt->pkt_data[1];
+	if (pkt_type == HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS ||
+		pkt_type == HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS)
+		rc = msm_cvp_map_user_persist(inst, in_pkt, offset, buf_num);
+	else if (pkt_type == HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS)
+		rc = msm_cvp_mark_user_persist(inst, in_pkt, offset, buf_num);
+	else
+		rc = msm_cvp_map_frame(inst, in_pkt, offset, buf_num);
+
+	if (rc)
+		goto exit;
+
+	rc = call_hfi_op(hdev, session_send, (void *)inst->session, in_pkt);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: Failed in call_hfi_op %d, %x\n",
+			__func__, in_pkt->pkt_data[0], in_pkt->pkt_data[1]);
+		goto exit;
+	}
+
+	if (signal != HAL_NO_RESP) {
+		rc = wait_for_sess_signal_receipt(inst, signal);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: wait for signal failed, rc %d %d, %x %d\n",
+				__func__, rc,
+				in_pkt->pkt_data[0],
+				in_pkt->pkt_data[1],
+				signal);
+			goto exit;
+		}
+		if (pkt_type == HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS)
+			rc = msm_cvp_unmap_user_persist(inst, in_pkt,
+					offset, buf_num);
+
+	}
+exit:
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(inst);
+	return rc;
+}
+
+static bool cvp_fence_wait(struct cvp_fence_queue *q,
+			struct cvp_fence_command **fence,
+			enum queue_state *state)
+{
+	struct cvp_fence_command *f;
+
+	*fence = NULL;
+	mutex_lock(&q->lock);
+	*state = q->state;
+	if (*state != QUEUE_ACTIVE) {
+		mutex_unlock(&q->lock);
+		return true;
+	}
+
+	if (list_empty(&q->wait_list)) {
+		mutex_unlock(&q->lock);
+		return false;
+	}
+
+	f = list_first_entry(&q->wait_list, struct cvp_fence_command, list);
+	list_del_init(&f->list);
+	list_add_tail(&q->sched_list, &f->list);
+
+	mutex_unlock(&q->lock);
+	*fence = f;
+
+	return true;
+}
+
+static int cvp_fence_proc(struct msm_cvp_inst *inst,
+			struct cvp_fence_command *fc,
+			struct cvp_hfi_cmd_session_hdr *pkt)
+{
+	int rc = 0;
+	unsigned long timeout;
+	u64 ktid;
+	int synx_state = SYNX_STATE_SIGNALED_SUCCESS;
+	struct cvp_hfi_device *hdev;
+	struct cvp_session_queue *sq;
+	u32 hfi_err = HFI_ERR_NONE;
+	struct cvp_hfi_msg_session_hdr *hdr;
+
+	dprintk(CVP_SYNX, "%s %s\n", current->comm, __func__);
+
+	hdev = inst->core->device;
+	sq = &inst->session_queue_fence;
+	ktid = pkt->client_data.kdata;
+
+	if (cvp_synx_ops(inst, CVP_INPUT_SYNX, fc, &synx_state)) {
+		msm_cvp_unmap_frame(inst, pkt->client_data.kdata);
+		goto exit;
+	}
+
+	rc = call_hfi_op(hdev, session_send, (void *)inst->session,
+			(struct cvp_kmd_hfi_packet *)pkt);
+	if (rc) {
+		dprintk(CVP_ERR, "%s %s: Failed in call_hfi_op %d, %x\n",
+			current->comm, __func__, pkt->size, pkt->packet_type);
+		synx_state = SYNX_STATE_SIGNALED_ERROR;
+		goto exit;
+	}
+
+	timeout = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
+	rc = cvp_wait_process_message(inst, sq, &ktid, timeout,
+				(struct cvp_kmd_hfi_packet *)pkt);
+	hdr = (struct cvp_hfi_msg_session_hdr *)pkt;
+	hfi_err = hdr->error_type;
+	if (rc) {
+		dprintk(CVP_ERR, "%s %s: cvp_wait_process_message rc %d\n",
+			current->comm, __func__, rc);
+		synx_state = SYNX_STATE_SIGNALED_ERROR;
+		goto exit;
+	}
+	if (hfi_err == HFI_ERR_SESSION_FLUSHED) {
+		dprintk(CVP_SYNX, "%s %s: cvp_wait_process_message flushed\n",
+			current->comm, __func__);
+		synx_state = SYNX_STATE_SIGNALED_CANCEL;
+	} else if (hfi_err == HFI_ERR_SESSION_STREAM_CORRUPT) {
+		dprintk(CVP_WARN, "%s %s: cvp_wait_process_msg non-fatal %d\n",
+		current->comm, __func__, hfi_err);
+		synx_state = SYNX_STATE_SIGNALED_SUCCESS;
+	} else if (hfi_err != HFI_ERR_NONE) {
+		dprintk(CVP_ERR, "%s %s: cvp_wait_process_message hfi err %d\n",
+			current->comm, __func__, hfi_err);
+		synx_state = SYNX_STATE_SIGNALED_CANCEL;
+	}
+
+exit:
+	rc = cvp_synx_ops(inst, CVP_OUTPUT_SYNX, fc, &synx_state);
+
+	return rc;
+}
+
+static int cvp_alloc_fence_data(struct cvp_fence_command **f, u32 size)
+{
+	struct cvp_fence_command *fcmd;
+
+	fcmd = kzalloc(sizeof(struct cvp_fence_command), GFP_KERNEL);
+	if (!fcmd)
+		return -ENOMEM;
+
+	fcmd->pkt = kzalloc(size, GFP_KERNEL);
+	if (!fcmd->pkt) {
+		kfree(fcmd);
+		return -ENOMEM;
+	}
+
+	*f = fcmd;
+	return 0;
+}
+
+static void cvp_free_fence_data(struct cvp_fence_command *f)
+{
+	kfree(f->pkt);
+	f->pkt = NULL;
+	kfree(f);
+	f = NULL;
+}
+
+static int cvp_fence_thread(void *data)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst;
+	struct cvp_fence_queue *q;
+	enum queue_state state;
+	struct cvp_fence_command *f;
+	struct cvp_hfi_cmd_session_hdr *pkt;
+	u32 *synx;
+	u64 ktid;
+
+	dprintk(CVP_SYNX, "Enter %s\n", current->comm);
+
+	inst = (struct msm_cvp_inst *)data;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(CVP_ERR, "%s invalid inst %pK\n", current->comm, inst);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	q = &inst->fence_cmd_queue;
+
+wait:
+	dprintk(CVP_SYNX, "%s starts wait\n", current->comm);
+
+	f = NULL;
+	wait_event_interruptible(q->wq, cvp_fence_wait(q, &f, &state));
+	if (state != QUEUE_ACTIVE)
+		goto exit;
+
+	if (!f)
+		goto wait;
+
+	pkt = f->pkt;
+	synx = (u32 *)f->synx;
+
+	ktid = pkt->client_data.kdata & (FENCE_BIT - 1);
+	dprintk(CVP_SYNX, "%s pkt type %d on ktid %llu frameID %llu\n",
+		current->comm, pkt->packet_type, ktid, f->frame_id);
+
+	rc = cvp_fence_proc(inst, f, pkt);
+
+	mutex_lock(&q->lock);
+	cvp_release_synx(inst, f);
+	list_del_init(&f->list);
+	mutex_unlock(&q->lock);
+
+	dprintk(CVP_SYNX, "%s done with %d ktid %llu frameID %llu rc %d\n",
+		current->comm, pkt->packet_type, ktid, f->frame_id, rc);
+
+	cvp_free_fence_data(f);
+
+	goto wait;
+
+exit:
+	dprintk(CVP_SYNX, "%s exit\n", current->comm);
+	cvp_put_inst(inst);
+	do_exit(rc);
+}
+
+static int msm_cvp_session_process_hfi_fence(struct msm_cvp_inst *inst,
+					struct cvp_kmd_arg *arg)
+{
+	int rc = 0;
+	int idx;
+	struct cvp_kmd_hfi_fence_packet *fence_pkt;
+	struct cvp_kmd_hfi_synx_packet *synx_pkt;
+	struct cvp_kmd_fence_ctrl *kfc;
+	struct cvp_hfi_cmd_session_hdr *pkt;
+	unsigned int offset, buf_num, in_offset, in_buf_num;
+	struct msm_cvp_inst *s;
+	struct cvp_fence_command *f;
+	struct cvp_fence_queue *q;
+	u32 *fence;
+	enum op_mode mode;
+
+	if (!inst || !inst->core || !arg || !inst->core->device) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	q = &inst->fence_cmd_queue;
+
+	mutex_lock(&q->lock);
+	mode = q->mode;
+	mutex_unlock(&q->lock);
+
+	if (mode == OP_DRAINING) {
+		dprintk(CVP_SYNX, "%s: flush in progress\n", __func__);
+		rc = -EBUSY;
+		goto exit;
+	}
+
+	in_offset = arg->buf_offset;
+	in_buf_num = arg->buf_num;
+
+	fence_pkt = &arg->data.hfi_fence_pkt;
+	pkt = (struct cvp_hfi_cmd_session_hdr *)&fence_pkt->pkt_data;
+	idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)pkt);
+
+	if (idx < 0 || pkt->size > MAX_HFI_FENCE_OFFSET) {
+		dprintk(CVP_ERR, "%s incorrect packet %d %#x\n", __func__,
+				pkt->size, pkt->packet_type);
+		goto exit;
+	}
+
+	if (in_offset && in_buf_num) {
+		offset = in_offset;
+		buf_num = in_buf_num;
+	} else {
+		offset = cvp_hfi_defs[idx].buf_offset;
+		buf_num = cvp_hfi_defs[idx].buf_num;
+	}
+
+	if (!is_buf_param_valid(buf_num, offset)) {
+		dprintk(CVP_ERR, "Incorrect buf num and offset in cmd\n");
+		goto exit;
+	}
+	rc = msm_cvp_map_frame(inst, (struct cvp_kmd_hfi_packet *)pkt, offset,
+				buf_num);
+	if (rc)
+		goto exit;
+
+	rc = cvp_alloc_fence_data(&f, pkt->size);
+	if (rc)
+		goto exit;
+
+	f->type = cvp_hfi_defs[idx].type;
+	f->mode = OP_NORMAL;
+
+	synx_pkt = &arg->data.hfi_synx_pkt;
+	if (synx_pkt->fence_data[0] != 0xFEEDFACE) {
+		dprintk(CVP_ERR, "%s deprecated synx path\n", __func__);
+		cvp_free_fence_data(f);
+		msm_cvp_unmap_frame(inst, pkt->client_data.kdata);
+		goto exit;
+	} else {
+		kfc = &synx_pkt->fc;
+		fence = (u32 *)&kfc->fences;
+		f->frame_id = kfc->frame_id;
+		f->signature = 0xFEEDFACE;
+		f->num_fences = kfc->num_fences;
+		f->output_index = kfc->output_index;
+	}
+
+
+	dprintk(CVP_SYNX, "%s: frameID %llu ktid %llu\n",
+			__func__, f->frame_id, pkt->client_data.kdata);
+
+	memcpy(f->pkt, pkt, pkt->size);
+
+	f->pkt->client_data.kdata |= FENCE_BIT;
+
+	rc = cvp_import_synx(inst, f, fence);
+	if (rc) {
+		kfree(f);
+		goto exit;
+	}
+
+	mutex_lock(&q->lock);
+	list_add_tail(&f->list, &inst->fence_cmd_queue.wait_list);
+	mutex_unlock(&q->lock);
+
+	wake_up(&inst->fence_cmd_queue.wq);
+
+exit:
+	cvp_put_inst(s);
+	return rc;
+}
+
+static inline int div_by_1dot5(unsigned int a)
+{
+	unsigned long i = a << 1;
+
+	return (unsigned int) i/3;
+}
+
+static inline int max_3(unsigned int a, unsigned int b, unsigned int c)
+{
+	return (a >= b) ? ((a >= c) ? a : c) : ((b >= c) ? b : c);
+}
+
+static bool is_subblock_profile_existed(struct msm_cvp_inst *inst)
+{
+	return (inst->prop.od_cycles ||
+			inst->prop.mpu_cycles ||
+			inst->prop.fdu_cycles ||
+			inst->prop.ica_cycles);
+}
+
+static void aggregate_power_update(struct msm_cvp_core *core,
+	struct cvp_power_level *nrt_pwr,
+	struct cvp_power_level *rt_pwr,
+	unsigned int max_clk_rate)
+{
+	struct msm_cvp_inst *inst;
+	int i;
+	unsigned long fdu_sum[2] = {0}, od_sum[2] = {0}, mpu_sum[2] = {0};
+	unsigned long ica_sum[2] = {0}, fw_sum[2] = {0};
+	unsigned long op_fdu_max[2] = {0}, op_od_max[2] = {0};
+	unsigned long op_mpu_max[2] = {0}, op_ica_max[2] = {0};
+	unsigned long op_fw_max[2] = {0}, bw_sum[2] = {0}, op_bw_max[2] = {0};
+
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst->state == MSM_CVP_CORE_INVALID ||
+			inst->state == MSM_CVP_CORE_UNINIT ||
+			!is_subblock_profile_existed(inst))
+			continue;
+		if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
+			/* Non-realtime session use index 0 */
+			i = 0;
+		} else {
+			i = 1;
+		}
+		dprintk(CVP_PROF, "pwrUpdate %pK fdu %u od %u mpu %u ica %u\n",
+			inst->prop.fdu_cycles,
+			inst->prop.od_cycles,
+			inst->prop.mpu_cycles,
+			inst->prop.ica_cycles);
+
+		dprintk(CVP_PROF, "pwrUpdate fw %u fdu_o %u od_o %u mpu_o %u\n",
+			inst->prop.fw_cycles,
+			inst->prop.fdu_op_cycles,
+			inst->prop.od_op_cycles,
+			inst->prop.mpu_op_cycles);
+
+		dprintk(CVP_PROF, "pwrUpdate ica_o %u fw_o %u bw %u bw_o %u\n",
+			inst->prop.ica_op_cycles,
+			inst->prop.fw_op_cycles,
+			inst->prop.ddr_bw,
+			inst->prop.ddr_op_bw);
+
+		fdu_sum[i] += inst->prop.fdu_cycles;
+		od_sum[i] += inst->prop.od_cycles;
+		mpu_sum[i] += inst->prop.mpu_cycles;
+		ica_sum[i] += inst->prop.ica_cycles;
+		fw_sum[i] += inst->prop.fw_cycles;
+		op_fdu_max[i] =
+			(op_fdu_max[i] >= inst->prop.fdu_op_cycles) ?
+			op_fdu_max[i] : inst->prop.fdu_op_cycles;
+		op_od_max[i] =
+			(op_od_max[i] >= inst->prop.od_op_cycles) ?
+			op_od_max[i] : inst->prop.od_op_cycles;
+		op_mpu_max[i] =
+			(op_mpu_max[i] >= inst->prop.mpu_op_cycles) ?
+			op_mpu_max[i] : inst->prop.mpu_op_cycles;
+		op_ica_max[i] =
+			(op_ica_max[i] >= inst->prop.ica_op_cycles) ?
+			op_ica_max[i] : inst->prop.ica_op_cycles;
+		op_fw_max[i] =
+			(op_fw_max[i] >= inst->prop.fw_op_cycles) ?
+			op_fw_max[i] : inst->prop.fw_op_cycles;
+		bw_sum[i] += inst->prop.ddr_bw;
+		op_bw_max[i] =
+			(op_bw_max[i] >= inst->prop.ddr_op_bw) ?
+			op_bw_max[i] : inst->prop.ddr_op_bw;
+	}
+
+	for (i = 0; i < 2; i++) {
+		fdu_sum[i] = max_3(fdu_sum[i], od_sum[i], mpu_sum[i]);
+		fdu_sum[i] = max_3(fdu_sum[i], ica_sum[i], fw_sum[i]);
+
+		op_fdu_max[i] = max_3(op_fdu_max[i], op_od_max[i],
+			op_mpu_max[i]);
+		op_fdu_max[i] = max_3(op_fdu_max[i],
+			op_ica_max[i], op_fw_max[i]);
+		op_fdu_max[i] =
+			(op_fdu_max[i] > max_clk_rate) ?
+			max_clk_rate : op_fdu_max[i];
+		bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
+			bw_sum[i] : op_bw_max[i];
+	}
+
+	nrt_pwr->core_sum += fdu_sum[0];
+	nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_fdu_max[0]) ?
+			nrt_pwr->op_core_sum : op_fdu_max[0];
+	nrt_pwr->bw_sum += bw_sum[0];
+	rt_pwr->core_sum += fdu_sum[1];
+	rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_fdu_max[1]) ?
+			rt_pwr->op_core_sum : op_fdu_max[1];
+	rt_pwr->bw_sum += bw_sum[1];
+}
+
+/**
+ * adjust_bw_freqs(): calculate CVP clock freq and bw required to sustain
+ * required use case.
+ * Bandwidth vote will be best-effort, not returning error if the request
+ * b/w exceeds max limit.
+ * Clock vote from non-realtime sessions will be best effort, not returning
+ * error if the aggreated session clock request exceeds max limit.
+ * Clock vote from realtime session will be hard request. If aggregated
+ * session clock request exceeds max limit, the function will return
+ * error.
+ */
+static int adjust_bw_freqs(void)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hdev;
+	struct bus_info *bus;
+	struct clock_set *clocks;
+	struct clock_info *cl;
+	struct allowed_clock_rates_table *tbl = NULL;
+	unsigned int tbl_size;
+	unsigned int cvp_min_rate, cvp_max_rate, max_bw, min_bw;
+	struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
+	unsigned long tmp, core_sum, op_core_sum, bw_sum;
+	int i, rc = 0;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+
+	hdev = core->device->hfi_device_data;
+	clocks = &core->resources.clock_set;
+	cl = &clocks->clock_tbl[clocks->count - 1];
+	tbl = core->resources.allowed_clks_tbl;
+	tbl_size = core->resources.allowed_clks_tbl_size;
+	cvp_min_rate = tbl[0].clock_rate;
+	cvp_max_rate = tbl[tbl_size - 1].clock_rate;
+	bus = &core->resources.bus_set.bus_tbl[1];
+	max_bw = bus->range[1];
+	min_bw = max_bw/10;
+
+	aggregate_power_update(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
+	dprintk(CVP_PROF, "PwrUpdate nrt %u %u rt %u %u\n",
+		nrt_pwr.core_sum, nrt_pwr.op_core_sum,
+		rt_pwr.core_sum, rt_pwr.op_core_sum);
+
+	if (rt_pwr.core_sum > cvp_max_rate) {
+		dprintk(CVP_WARN, "%s clk vote out of range %lld\n",
+			__func__, rt_pwr.core_sum);
+		return -ENOTSUPP;
+	}
+
+	core_sum = rt_pwr.core_sum + nrt_pwr.core_sum;
+	op_core_sum = (rt_pwr.op_core_sum >= nrt_pwr.op_core_sum) ?
+		rt_pwr.op_core_sum : nrt_pwr.op_core_sum;
+
+	core_sum = (core_sum >= op_core_sum) ?
+		core_sum : op_core_sum;
+
+	if (core_sum > cvp_max_rate) {
+		core_sum = cvp_max_rate;
+	} else	if (core_sum < cvp_min_rate) {
+		core_sum = cvp_min_rate;
+	} else {
+		for (i = 1; i < tbl_size; i++)
+			if (core_sum <= tbl[i].clock_rate)
+				break;
+		core_sum = tbl[i].clock_rate;
+	}
+
+	bw_sum = rt_pwr.bw_sum + nrt_pwr.bw_sum;
+	bw_sum = bw_sum >> 10;
+	bw_sum = (bw_sum > max_bw) ? max_bw : bw_sum;
+	bw_sum = (bw_sum < min_bw) ? min_bw : bw_sum;
+
+	dprintk(CVP_PROF, "%s %lld %lld\n", __func__,
+		core_sum, bw_sum);
+	if (!cl->has_scaling) {
+		dprintk(CVP_ERR, "Cannot scale CVP clock\n");
+		return -EINVAL;
+	}
+
+	tmp = core->curr_freq;
+	core->curr_freq = core_sum;
+	rc = msm_cvp_set_clocks(core);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to set clock rate %u %s: %d %s\n",
+			core_sum, cl->name, rc, __func__);
+		core->curr_freq = tmp;
+		return rc;
+	}
+	hdev->clk_freq = core->curr_freq;
+	rc = icc_set_bw(bus->client, bw_sum, 0);
+	if (rc)
+		dprintk(CVP_ERR, "Failed voting bus %s to ab %u\n",
+			bus->name, bw_sum);
+
+	return rc;
+}
+
+static int msm_cvp_update_power(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *s;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_UPDATE_POWER;
+	core = inst->core;
+
+	mutex_lock(&core->lock);
+	rc = adjust_bw_freqs();
+	mutex_unlock(&core->lock);
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(s);
+
+	return rc;
+}
+
+static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
+		struct cvp_kmd_buffer *buf)
+{
+	struct cvp_hfi_device *hdev;
+	struct cvp_hal_session *session;
+	struct msm_cvp_inst *s;
+	int rc = 0;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!buf->index)
+		return 0;
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_REGISTER_BUFFER;
+	session = (struct cvp_hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+	hdev = inst->core->device;
+	print_client_buffer(CVP_HFI, "register", inst, buf);
+
+	rc = msm_cvp_map_buf_dsp(inst, buf);
+exit:
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(s);
+	return rc;
+}
+
+static int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
+		struct cvp_kmd_buffer *buf)
+{
+	struct msm_cvp_inst *s;
+	int rc = 0;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!buf->index)
+		return 0;
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_UNREGISTER_BUFFER;
+	print_client_buffer(CVP_HFI, "unregister", inst, buf);
+
+	rc = msm_cvp_unmap_buf_dsp(inst, buf);
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(s);
+	return rc;
+}
+
+static int msm_cvp_session_create(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct synx_initialization_params params;
+
+	if (!inst || !inst->core)
+		return -EINVAL;
+
+	if (inst->state >= MSM_CVP_CLOSE_DONE)
+		return -ECONNRESET;
+
+	if (inst->state != MSM_CVP_CORE_INIT_DONE ||
+		inst->state > MSM_CVP_OPEN_DONE) {
+		dprintk(CVP_ERR,
+			"%s Incorrect CVP state %d to create session\n",
+			__func__, inst->state);
+		return -EINVAL;
+	}
+
+	rc = msm_cvp_comm_try_state(inst, MSM_CVP_OPEN_DONE);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to move instance to open done state\n");
+		goto fail_init;
+	}
+
+	rc = cvp_comm_set_arp_buffers(inst);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"Failed to set ARP buffers\n");
+		goto fail_init;
+	}
+
+	params.name = "cvp-kernel-client";
+	if (synx_initialize(&inst->synx_session_id, &params)) {
+		dprintk(CVP_ERR, "%s synx_initialize failed\n", __func__);
+		rc = -EFAULT;
+	}
+
+fail_init:
+	return rc;
+}
+
+static int session_state_check_init(struct msm_cvp_inst *inst)
+{
+	mutex_lock(&inst->lock);
+	if (inst->state == MSM_CVP_OPEN || inst->state == MSM_CVP_OPEN_DONE) {
+		mutex_unlock(&inst->lock);
+		return 0;
+	}
+	mutex_unlock(&inst->lock);
+
+	return msm_cvp_session_create(inst);
+}
+
+static int cvp_fence_thread_start(struct msm_cvp_inst *inst)
+{
+	u32 tnum = 0;
+	u32 i = 0;
+	int rc = 0;
+	char tname[16];
+	struct task_struct *thread;
+	struct cvp_fence_queue *q;
+	struct cvp_session_queue *sq;
+
+	if (!inst->prop.fthread_nr)
+		return 0;
+
+	q = &inst->fence_cmd_queue;
+	mutex_lock(&q->lock);
+	q->state = QUEUE_ACTIVE;
+	mutex_unlock(&q->lock);
+
+	for (i = 0; i < inst->prop.fthread_nr; ++i) {
+		if (!cvp_get_inst_validate(inst->core, inst)) {
+			rc = -ECONNRESET;
+			goto exit;
+		}
+
+		snprintf(tname, sizeof(tname), "fthread_%d", tnum++);
+		thread = kthread_run(cvp_fence_thread, inst, tname);
+		if (!thread) {
+			dprintk(CVP_ERR, "%s create %s fail", __func__, tname);
+			rc = -ECHILD;
+			goto exit;
+		}
+	}
+
+	sq = &inst->session_queue_fence;
+	spin_lock(&sq->lock);
+	sq->state = QUEUE_ACTIVE;
+	spin_unlock(&sq->lock);
+
+exit:
+	if (rc) {
+		mutex_lock(&q->lock);
+		q->state = QUEUE_STOP;
+		mutex_unlock(&q->lock);
+		wake_up_all(&q->wq);
+	}
+	return rc;
+}
+
+static int cvp_fence_thread_stop(struct msm_cvp_inst *inst)
+{
+	struct cvp_fence_queue *q;
+	struct cvp_session_queue *sq;
+
+	if (!inst->prop.fthread_nr)
+		return 0;
+
+	q = &inst->fence_cmd_queue;
+
+	mutex_lock(&q->lock);
+	q->state = QUEUE_STOP;
+	mutex_unlock(&q->lock);
+
+	sq = &inst->session_queue_fence;
+	spin_lock(&sq->lock);
+	sq->state = QUEUE_STOP;
+	spin_unlock(&sq->lock);
+
+	wake_up_all(&q->wq);
+	wake_up_all(&sq->wq);
+
+	return 0;
+}
+
+static int msm_cvp_session_start(struct msm_cvp_inst *inst,
+		struct cvp_kmd_arg *arg)
+{
+	struct cvp_session_queue *sq;
+
+	sq = &inst->session_queue;
+	spin_lock(&sq->lock);
+	if (sq->msg_count) {
+		dprintk(CVP_ERR, "session start failed queue not empty%d\n",
+			sq->msg_count);
+		spin_unlock(&sq->lock);
+		return -EINVAL;
+	}
+	sq->state = QUEUE_ACTIVE;
+	spin_unlock(&sq->lock);
+
+	return cvp_fence_thread_start(inst);
+}
+
+static int msm_cvp_session_stop(struct msm_cvp_inst *inst,
+		struct cvp_kmd_arg *arg)
+{
+	struct cvp_session_queue *sq;
+	struct cvp_kmd_session_control *sc = &arg->data.session_ctrl;
+
+	sq = &inst->session_queue;
+
+	spin_lock(&sq->lock);
+	if (sq->msg_count) {
+		dprintk(CVP_ERR, "session stop incorrect: queue not empty%d\n",
+			sq->msg_count);
+		sc->ctrl_data[0] = sq->msg_count;
+		spin_unlock(&sq->lock);
+		return -EUCLEAN;
+	}
+	sq->state = QUEUE_STOP;
+
+	pr_info(CVP_DBG_TAG "Stop session: %pK session_id = %d\n",
+		"sess", inst, hash32_ptr(inst->session));
+	spin_unlock(&sq->lock);
+
+	wake_up_all(&inst->session_queue.wq);
+
+	return cvp_fence_thread_stop(inst);
+}
+
+int msm_cvp_session_queue_stop(struct msm_cvp_inst *inst)
+{
+	struct cvp_session_queue *sq;
+
+	sq = &inst->session_queue;
+
+	spin_lock(&sq->lock);
+
+	if (sq->state == QUEUE_STOP) {
+		spin_unlock(&sq->lock);
+		return 0;
+	}
+
+	sq->state = QUEUE_STOP;
+
+	dprintk(CVP_SESS, "Stop session queue: %pK session_id = %d\n",
+			inst, hash32_ptr(inst->session));
+	spin_unlock(&sq->lock);
+
+	wake_up_all(&inst->session_queue.wq);
+
+	return cvp_fence_thread_stop(inst);
+}
+
+static int msm_cvp_session_ctrl(struct msm_cvp_inst *inst,
+		struct cvp_kmd_arg *arg)
+{
+	struct cvp_kmd_session_control *ctrl = &arg->data.session_ctrl;
+	int rc = 0;
+	unsigned int ctrl_type;
+
+	ctrl_type = ctrl->ctrl_type;
+
+	if (!inst && ctrl_type != SESSION_CREATE) {
+		dprintk(CVP_ERR, "%s invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (ctrl_type) {
+	case SESSION_STOP:
+		rc = msm_cvp_session_stop(inst, arg);
+		break;
+	case SESSION_START:
+		rc = msm_cvp_session_start(inst, arg);
+		break;
+	case SESSION_CREATE:
+		rc = msm_cvp_session_create(inst);
+	case SESSION_DELETE:
+		break;
+	case SESSION_INFO:
+	default:
+		dprintk(CVP_ERR, "%s Unsupported session ctrl%d\n",
+			__func__, ctrl->ctrl_type);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int msm_cvp_get_sysprop(struct msm_cvp_inst *inst,
+		struct cvp_kmd_arg *arg)
+{
+	struct cvp_kmd_sys_properties *props = &arg->data.sys_properties;
+	struct cvp_hfi_device *hdev;
+	struct iris_hfi_device *hfi;
+	int i, rc = 0;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+	hfi = hdev->hfi_device_data;
+
+	for (i = 0; i < props->prop_num; i++) {
+		switch (props->prop_data[i].prop_type) {
+		case CVP_KMD_PROP_HFI_VERSION:
+		{
+			props->prop_data[i].data = hfi->version;
+			break;
+		}
+		default:
+			dprintk(CVP_ERR, "unrecognized sys property %d\n",
+				props->prop_data[i].prop_type);
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+
+static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
+		struct cvp_kmd_arg *arg)
+{
+	struct cvp_kmd_sys_properties *props = &arg->data.sys_properties;
+	struct cvp_kmd_sys_property *prop_array;
+	struct cvp_session_prop *session_prop;
+	int i, rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (props->prop_num >= MAX_KMD_PROP_NUM) {
+		dprintk(CVP_ERR, "Too many properties %d to set\n",
+			props->prop_num);
+		return -E2BIG;
+	}
+
+	prop_array = &arg->data.sys_properties.prop_data[0];
+	session_prop = &inst->prop;
+
+	for (i = 0; i < props->prop_num; i++) {
+		switch (prop_array[i].prop_type) {
+		case CVP_KMD_PROP_SESSION_TYPE:
+			session_prop->type = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_SESSION_KERNELMASK:
+			session_prop->kernel_mask = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_SESSION_PRIORITY:
+			session_prop->priority = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_SESSION_SECURITY:
+			session_prop->is_secure = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_SESSION_DSPMASK:
+			session_prop->dsp_mask = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_FDU:
+			session_prop->fdu_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_ICA:
+			session_prop->ica_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case CVP_KMD_PROP_PWR_OD:
+			session_prop->od_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_MPU:
+			session_prop->mpu_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_FW:
+			session_prop->fw_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case CVP_KMD_PROP_PWR_DDR:
+			session_prop->ddr_bw = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_SYSCACHE:
+			session_prop->ddr_cache = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_FDU_OP:
+			session_prop->fdu_op_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_ICA_OP:
+			session_prop->ica_op_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case CVP_KMD_PROP_PWR_OD_OP:
+			session_prop->od_op_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_MPU_OP:
+			session_prop->mpu_op_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_FW_OP:
+			session_prop->fw_op_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case CVP_KMD_PROP_PWR_DDR_OP:
+			session_prop->ddr_op_bw = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_SYSCACHE_OP:
+			session_prop->ddr_op_cache = prop_array[i].data;
+			break;
+		default:
+			dprintk(CVP_ERR,
+				"unrecognized sys property to set %d\n",
+				prop_array[i].prop_type);
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+
+static int cvp_drain_fence_cmd_queue_partial(struct msm_cvp_inst *inst)
+{
+	unsigned long wait_time;
+	struct cvp_fence_queue *q;
+	struct cvp_fence_command *f;
+	int rc = 0;
+	int count = 0, max_count = 0;
+
+	q = &inst->fence_cmd_queue;
+
+	mutex_lock(&q->lock);
+
+	list_for_each_entry(f, &q->sched_list, list) {
+		if (f->mode == OP_FLUSH)
+			continue;
+		++count;
+	}
+
+	list_for_each_entry(f, &q->wait_list, list) {
+		if (f->mode == OP_FLUSH)
+			continue;
+		++count;
+	}
+
+	mutex_unlock(&q->lock);
+	wait_time = count * CVP_MAX_WAIT_TIME * 1000;
+
+	dprintk(CVP_SYNX, "%s: wait %d us for %d fence command\n",
+			__func__, wait_time, count);
+
+	count = 0;
+	max_count = wait_time / 100;
+
+retry:
+	mutex_lock(&q->lock);
+	f = list_first_entry(&q->sched_list, struct cvp_fence_command, list);
+
+	/* Wait for all normal frames to finish before return */
+	if ((f && f->mode == OP_FLUSH) ||
+		(list_empty(&q->sched_list) && list_empty(&q->wait_list))) {
+		mutex_unlock(&q->lock);
+		return rc;
+	}
+
+	mutex_unlock(&q->lock);
+	usleep_range(100, 200);
+	++count;
+	if (count < max_count) {
+		goto retry;
+	} else {
+		rc = -ETIMEDOUT;
+		dprintk(CVP_ERR, "%s: timed out!\n", __func__);
+	}
+
+	return rc;
+}
+
+static int cvp_drain_fence_sched_list(struct msm_cvp_inst *inst)
+{
+	unsigned long wait_time;
+	struct cvp_fence_queue *q;
+	struct cvp_fence_command *f;
+	int rc = 0;
+	int count = 0, max_count = 0;
+	u64 ktid;
+
+	q = &inst->fence_cmd_queue;
+
+	mutex_lock(&q->lock);
+	list_for_each_entry(f, &q->sched_list, list) {
+		ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
+		dprintk(CVP_SYNX, "%s: frame %llu is in sched_list\n",
+			__func__, ktid);
+		dprintk(CVP_SYNX, "%s: frameID %llu is in sched_list\n",
+			__func__, f->frame_id);
+		++count;
+	}
+	mutex_unlock(&q->lock);
+	wait_time = count * CVP_MAX_WAIT_TIME * 1000;
+
+	dprintk(CVP_SYNX, "%s: wait %d us for %d fence command\n",
+			__func__, wait_time, count);
+
+	count = 0;
+	max_count = wait_time / 100;
+
+retry:
+	mutex_lock(&q->lock);
+	if (list_empty(&q->sched_list)) {
+		mutex_unlock(&q->lock);
+		return rc;
+	}
+
+	mutex_unlock(&q->lock);
+	usleep_range(100, 200);
+	++count;
+	if (count < max_count) {
+		goto retry;
+	} else {
+		rc = -ETIMEDOUT;
+		dprintk(CVP_ERR, "%s: timed out!\n", __func__);
+	}
+
+	return rc;
+}
+
+static int cvp_flush_all(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct msm_cvp_inst *s;
+	struct cvp_fence_queue *q;
+	struct cvp_fence_command *f, *d;
+	struct cvp_hfi_device *hdev;
+	u64 ktid;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	q = &inst->fence_cmd_queue;
+	hdev = inst->core->device;
+
+	mutex_lock(&q->lock);
+	q->mode = OP_DRAINING;
+
+	list_for_each_entry_safe(f, d, &q->wait_list, list) {
+		ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
+
+		dprintk(CVP_SESS, "%s: flush frame %llu from wait_list\n",
+			__func__, ktid);
+		dprintk(CVP_SESS, "%s: flush frameID %llu from wait_list\n",
+			__func__, f->frame_id);
+
+		list_del_init(&f->list);
+		msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
+		cvp_cancel_synx(inst, CVP_OUTPUT_SYNX, f);
+		cvp_release_synx(inst, f);
+		cvp_free_fence_data(f);
+	}
+
+	list_for_each_entry(f, &q->sched_list, list) {
+		ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
+
+		dprintk(CVP_SESS, "%s: flush frame %llu from sched_list\n",
+			__func__, ktid);
+		dprintk(CVP_SESS, "%s: flush frameID %llu from sched_list\n",
+			__func__, f->frame_id);
+		cvp_cancel_synx(inst, CVP_INPUT_SYNX, f);
+	}
+
+	mutex_unlock(&q->lock);
+
+	dprintk(CVP_SESS, "%s: send flush to fw\n", __func__);
+
+	/* Send flush to FW */
+	rc = call_hfi_op(hdev, session_flush, (void *)inst->session);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: continue flush without fw. rc %d\n",
+		__func__, rc);
+		goto exit;
+	}
+
+	/* Wait for FW response */
+	rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_FLUSH_DONE);
+	if (rc)
+		dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
+		__func__, rc);
+
+	dprintk(CVP_SESS, "%s: received flush from fw\n", __func__);
+
+exit:
+	rc = cvp_drain_fence_sched_list(inst);
+
+	mutex_lock(&q->lock);
+	q->mode = OP_NORMAL;
+	mutex_unlock(&q->lock);
+
+	cvp_put_inst(s);
+	return rc;
+}
+
+static void cvp_mark_fence_command(struct msm_cvp_inst *inst, u64 frame_id)
+{
+	int found = false;
+	struct cvp_fence_queue *q;
+	struct cvp_fence_command *f;
+
+	q = &inst->fence_cmd_queue;
+
+	list_for_each_entry(f, &q->sched_list, list) {
+		if (found) {
+			f->mode = OP_FLUSH;
+			continue;
+		}
+
+		if (f->frame_id >= frame_id) {
+			found = true;
+			f->mode = OP_FLUSH;
+		}
+	}
+
+	list_for_each_entry(f, &q->wait_list, list) {
+		if (found) {
+			f->mode = OP_FLUSH;
+			continue;
+		}
+
+		if (f->frame_id >= frame_id) {
+			found = true;
+			f->mode = OP_FLUSH;
+		}
+	}
+}
+
+static int cvp_flush_frame(struct msm_cvp_inst *inst, u64 frame_id)
+{
+	int rc = 0;
+	struct msm_cvp_inst *s;
+	struct cvp_fence_queue *q;
+	struct cvp_fence_command *f, *d;
+	u64 ktid;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	q = &inst->fence_cmd_queue;
+
+	mutex_lock(&q->lock);
+	q->mode = OP_DRAINING;
+
+	cvp_mark_fence_command(inst, frame_id);
+
+	list_for_each_entry_safe(f, d, &q->wait_list, list) {
+		if (f->mode != OP_FLUSH)
+			continue;
+
+		ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
+
+		dprintk(CVP_SESS, "%s: flush frame %llu from wait_list\n",
+			__func__, ktid);
+		dprintk(CVP_SESS, "%s: flush frameID %llu from wait_list\n",
+			__func__, f->frame_id);
+
+		list_del_init(&f->list);
+		msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
+		cvp_cancel_synx(inst, CVP_OUTPUT_SYNX, f);
+		cvp_release_synx(inst, f);
+		cvp_free_fence_data(f);
+	}
+
+	list_for_each_entry(f, &q->sched_list, list) {
+		if (f->mode != OP_FLUSH)
+			continue;
+
+		ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
+
+		dprintk(CVP_SESS, "%s: flush frame %llu from sched_list\n",
+			__func__, ktid);
+		dprintk(CVP_SESS, "%s: flush frameID %llu from sched_list\n",
+			__func__, f->frame_id);
+		cvp_cancel_synx(inst, CVP_INPUT_SYNX, f);
+	}
+
+	mutex_unlock(&q->lock);
+
+	rc = cvp_drain_fence_cmd_queue_partial(inst);
+	if (rc)
+		dprintk(CVP_WARN, "%s: continue flush. rc %d\n",
+		__func__, rc);
+
+	rc = cvp_flush_all(inst);
+
+	cvp_put_inst(s);
+	return rc;
+}
+
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
+{
+	int rc = 0;
+
+	if (!inst || !arg) {
+		dprintk(CVP_ERR, "%s: invalid args\n", __func__);
+		return -EINVAL;
+	}
+	dprintk(CVP_HFI, "%s: arg->type = %x", __func__, arg->type);
+
+	if (arg->type != CVP_KMD_SESSION_CONTROL &&
+		arg->type != CVP_KMD_SET_SYS_PROPERTY &&
+		arg->type != CVP_KMD_GET_SYS_PROPERTY) {
+
+		rc = session_state_check_init(inst);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"Incorrect session state %d for command %#x",
+				inst->state, arg->type);
+			return rc;
+		}
+	}
+
+	switch (arg->type) {
+	case CVP_KMD_GET_SESSION_INFO:
+	{
+		struct cvp_kmd_session_info *session =
+			(struct cvp_kmd_session_info *)&arg->data.session;
+
+		rc = msm_cvp_get_session_info(inst, session);
+		break;
+	}
+	case CVP_KMD_UPDATE_POWER:
+	{
+		rc = msm_cvp_update_power(inst);
+		break;
+	}
+	case CVP_KMD_REGISTER_BUFFER:
+	{
+		struct cvp_kmd_buffer *buf =
+			(struct cvp_kmd_buffer *)&arg->data.regbuf;
+
+		rc = msm_cvp_register_buffer(inst, buf);
+		break;
+	}
+	case CVP_KMD_UNREGISTER_BUFFER:
+	{
+		struct cvp_kmd_buffer *buf =
+			(struct cvp_kmd_buffer *)&arg->data.unregbuf;
+
+		rc = msm_cvp_unregister_buffer(inst, buf);
+		break;
+	}
+	case CVP_KMD_RECEIVE_MSG_PKT:
+	{
+		struct cvp_kmd_hfi_packet *out_pkt =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
+		rc = msm_cvp_session_receive_hfi(inst, out_pkt);
+		break;
+	}
+	case CVP_KMD_SEND_CMD_PKT:
+	{
+		struct cvp_kmd_hfi_packet *in_pkt =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
+
+		rc = msm_cvp_session_process_hfi(inst, in_pkt,
+				arg->buf_offset, arg->buf_num);
+		break;
+	}
+	case CVP_KMD_SEND_FENCE_CMD_PKT:
+	{
+		rc = msm_cvp_session_process_hfi_fence(inst, arg);
+		break;
+	}
+	case CVP_KMD_SESSION_CONTROL:
+		rc = msm_cvp_session_ctrl(inst, arg);
+		break;
+	case CVP_KMD_GET_SYS_PROPERTY:
+		rc = msm_cvp_get_sysprop(inst, arg);
+		break;
+	case CVP_KMD_SET_SYS_PROPERTY:
+		rc = msm_cvp_set_sysprop(inst, arg);
+		break;
+	case CVP_KMD_FLUSH_ALL:
+		rc = cvp_flush_all(inst);
+		break;
+	case CVP_KMD_FLUSH_FRAME:
+		rc = cvp_flush_frame(inst, arg->data.frame_id);
+		break;
+	default:
+		dprintk(CVP_HFI, "%s: unknown arg type %#x\n",
+				__func__, arg->type);
+		rc = -ENOTSUPP;
+		break;
+	}
+
+	return rc;
+}
+
+int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hal_session *session;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	dprintk(CVP_SESS, "%s: inst %pK (%#x)\n", __func__,
+		inst, hash32_ptr(inst->session));
+
+	session = (struct cvp_hal_session *)inst->session;
+	if (!session)
+		return rc;
+
+	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CLOSE_DONE);
+	if (rc)
+		dprintk(CVP_ERR, "%s: close failed\n", __func__);
+
+	rc = msm_cvp_session_deinit_buffers(inst);
+	return rc;
+}
+
+int msm_cvp_session_init(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_SESS, "%s: inst %pK (%#x)\n", __func__,
+		inst, hash32_ptr(inst->session));
+
+	/* set default frequency */
+	inst->clk_data.core_id = 0;
+	inst->clk_data.min_freq = 1000;
+	inst->clk_data.ddr_bw = 1000;
+	inst->clk_data.sys_cache_bw = 1000;
+
+	inst->prop.type = HFI_SESSION_CV;
+	if (inst->session_type == MSM_CVP_KERNEL)
+		inst->prop.type = HFI_SESSION_DME;
+
+	inst->prop.kernel_mask = 0xFFFFFFFF;
+	inst->prop.priority = 0;
+	inst->prop.is_secure = 0;
+	inst->prop.dsp_mask = 0;
+	inst->prop.fthread_nr = 2;
+
+	return rc;
+}

+ 36 - 0
msm/eva/msm_cvp.h

@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_CVP_H_
+#define _MSM_CVP_H_
+
+#include "msm_cvp_internal.h"
+#include "msm_cvp_common.h"
+#include "msm_cvp_clocks.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_dsp.h"
+
+static inline bool is_buf_param_valid(u32 buf_num, u32 offset)
+{
+	int max_buf_num;
+
+	max_buf_num = sizeof(struct cvp_kmd_hfi_packet) /
+			sizeof(struct cvp_buf_type);
+
+	if (buf_num > max_buf_num)
+		return false;
+
+	if ((offset + buf_num * sizeof(struct cvp_buf_type)) >
+			sizeof(struct cvp_kmd_hfi_packet))
+		return false;
+
+	return true;
+}
+
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg);
+int msm_cvp_session_init(struct msm_cvp_inst *inst);
+int msm_cvp_session_deinit(struct msm_cvp_inst *inst);
+int msm_cvp_session_queue_stop(struct msm_cvp_inst *inst);
+#endif

+ 936 - 0
msm/eva/msm_cvp_buf.c

@@ -0,0 +1,936 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_cvp_common.h"
+#include "cvp_hfi_api.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_core.h"
+#include "msm_cvp_dsp.h"
+
+void print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
+		struct msm_cvp_smem *smem)
+{
+	if (!(tag & msm_cvp_debug) || !inst || !smem)
+		return;
+
+	if (smem->dma_buf) {
+		dprintk(tag, "%s: %x : %s size %d flags %#x iova %#x", str,
+			hash32_ptr(inst->session), smem->dma_buf->name,
+			smem->size, smem->flags, smem->device_addr);
+	}
+}
+
+static void print_internal_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf)
+{
+	if (!(tag & msm_cvp_debug) || !inst || !cbuf)
+		return;
+
+	if (cbuf->smem->dma_buf) {
+		dprintk(tag,
+		"%s: %x : fd %d off %d %s size %d iova %#x",
+		str, hash32_ptr(inst->session), cbuf->fd,
+		cbuf->offset, cbuf->smem->dma_buf->name, cbuf->size,
+		cbuf->smem->device_addr);
+	} else {
+		dprintk(tag,
+		"%s: %x : idx %2d fd %d off %d size %d iova %#x",
+		str, hash32_ptr(inst->session), cbuf->fd,
+		cbuf->offset, cbuf->size, cbuf->smem->device_addr);
+	}
+}
+
+void print_cvp_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
+		struct cvp_internal_buf *cbuf)
+{
+	dprintk(tag, "%s addr: %x size %u\n", str,
+			cbuf->smem->device_addr, cbuf->size);
+}
+
+void print_client_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst, struct cvp_kmd_buffer *cbuf)
+{
+	if (!(tag & msm_cvp_debug) || !inst || !cbuf)
+		return;
+
+	dprintk(tag,
+		"%s: %x : idx %2d fd %d off %d size %d type %d flags 0x%x\n",
+		str, hash32_ptr(inst->session), cbuf->index, cbuf->fd,
+		cbuf->offset, cbuf->size, cbuf->type, cbuf->flags);
+}
+
+int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct cvp_kmd_buffer *buf)
+{
+	int rc = 0;
+	bool found = false;
+	struct cvp_internal_buf *cbuf;
+	struct msm_cvp_smem *smem = NULL;
+	struct cvp_hal_session *session;
+	struct dma_buf *dma_buf = NULL;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (buf->fd < 0) {
+		dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
+		return 0;
+	}
+
+	if (buf->offset) {
+		dprintk(CVP_ERR,
+			"%s: offset is deprecated, set to 0.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	session = (struct cvp_hal_session *)inst->session;
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
+		if (cbuf->fd == buf->fd) {
+			if (cbuf->size != buf->size) {
+				dprintk(CVP_ERR, "%s: buf size mismatch\n",
+					__func__);
+				mutex_unlock(&inst->cvpdspbufs.lock);
+				return -EINVAL;
+			}
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&inst->cvpdspbufs.lock);
+	if (found) {
+		print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
+		return -EINVAL;
+	}
+
+	dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
+	if (!dma_buf) {
+		dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
+		return 0;
+	}
+
+	cbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
+	if (!cbuf)
+		return -ENOMEM;
+
+	smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
+	if (!smem) {
+		kmem_cache_free(cvp_driver->buf_cache, cbuf);
+		return -ENOMEM;
+	}
+
+	smem->dma_buf = dma_buf;
+	smem->bitmap_index = MAX_DMABUF_NUMS;
+	dprintk(CVP_DSP, "%s: dma_buf = %llx\n", __func__, dma_buf);
+	rc = msm_cvp_map_smem(inst, smem, "map dsp");
+	if (rc) {
+		print_client_buffer(CVP_ERR, "map failed", inst, buf);
+		goto exit;
+	}
+
+	if (buf->index) {
+		rc = cvp_dsp_register_buffer(hash32_ptr(session), buf->fd,
+			smem->dma_buf->size, buf->size, buf->offset,
+			buf->index, (uint32_t)smem->device_addr);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: failed dsp registration for fd=%d rc=%d",
+				__func__, buf->fd, rc);
+			goto exit;
+		}
+	} else {
+		dprintk(CVP_ERR, "%s: buf index is 0 fd=%d", __func__, buf->fd);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	cbuf->smem = smem;
+	cbuf->fd = buf->fd;
+	cbuf->size = buf->size;
+	cbuf->offset = buf->offset;
+	cbuf->ownership = CLIENT;
+	cbuf->index = buf->index;
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	list_add_tail(&cbuf->list, &inst->cvpdspbufs.list);
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	return rc;
+
+exit:
+	if (smem->device_addr) {
+		msm_cvp_unmap_smem(inst, smem, "unmap dsp");
+		msm_cvp_smem_put_dma_buf(smem->dma_buf);
+	}
+	kmem_cache_free(cvp_driver->buf_cache, cbuf);
+	cbuf = NULL;
+	kmem_cache_free(cvp_driver->smem_cache, smem);
+	smem = NULL;
+	return rc;
+}
+
+int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct cvp_kmd_buffer *buf)
+{
+	int rc = 0;
+	bool found;
+	struct cvp_internal_buf *cbuf;
+	struct cvp_hal_session *session;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	session = (struct cvp_hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	found = false;
+	list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
+		if (cbuf->fd == buf->fd) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&inst->cvpdspbufs.lock);
+	if (!found) {
+		print_client_buffer(CVP_ERR, "invalid", inst, buf);
+		return -EINVAL;
+	}
+
+	if (buf->index) {
+		rc = cvp_dsp_deregister_buffer(hash32_ptr(session), buf->fd,
+			cbuf->smem->dma_buf->size, buf->size, buf->offset,
+			buf->index, (uint32_t)cbuf->smem->device_addr);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: failed dsp deregistration fd=%d rc=%d",
+				__func__, buf->fd, rc);
+			return rc;
+		}
+	}
+
+	if (cbuf->smem->device_addr) {
+		msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
+		msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
+	}
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	list_del(&cbuf->list);
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	kmem_cache_free(cvp_driver->smem_cache, cbuf->smem);
+	kmem_cache_free(cvp_driver->buf_cache, cbuf);
+	return rc;
+}
+
+void msm_cvp_cache_operations(struct msm_cvp_smem *smem, u32 type,
+				u32 offset, u32 size)
+{
+	enum smem_cache_ops cache_op;
+
+	if (msm_cvp_cacheop_disabled)
+		return;
+
+	if (!smem) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return;
+	}
+
+	switch (type) {
+	case CVP_KMD_BUFTYPE_INPUT:
+		cache_op = SMEM_CACHE_CLEAN;
+		break;
+	case CVP_KMD_BUFTYPE_OUTPUT:
+		cache_op = SMEM_CACHE_INVALIDATE;
+		break;
+	default:
+		cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
+	}
+
+	dprintk(CVP_MEM,
+		"%s: cache operation enabled for dma_buf: %llx, cache_op: %d, offset: %d, size: %d\n",
+		__func__, smem->dma_buf, cache_op, offset, size);
+	msm_cvp_smem_cache_operations(smem->dma_buf, cache_op, offset, size);
+}
+
+static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
+				struct dma_buf *dma_buf)
+{
+	struct msm_cvp_smem *smem;
+	int i;
+
+	if (inst->dma_cache.nr > MAX_DMABUF_NUMS)
+		return NULL;
+
+	mutex_lock(&inst->dma_cache.lock);
+	for (i = 0; i < inst->dma_cache.nr; i++)
+		if (inst->dma_cache.entries[i]->dma_buf == dma_buf) {
+			set_bit(i, &inst->dma_cache.usage_bitmap);
+			smem = inst->dma_cache.entries[i];
+			smem->bitmap_index = i;
+			atomic_inc(&smem->refcount);
+			/*
+			 * If we find it, it means we already increased
+			 * refcount before, so we put it to avoid double
+			 * incremental.
+			 */
+			msm_cvp_smem_put_dma_buf(smem->dma_buf);
+			mutex_unlock(&inst->dma_cache.lock);
+			print_smem(CVP_MEM, "found", inst, smem);
+			return smem;
+		}
+
+	mutex_unlock(&inst->dma_cache.lock);
+
+	return NULL;
+}
+
+static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
+				struct msm_cvp_smem *smem)
+{
+	unsigned int i;
+	struct msm_cvp_smem *smem2;
+
+	mutex_lock(&inst->dma_cache.lock);
+	if (inst->dma_cache.nr < MAX_DMABUF_NUMS) {
+		inst->dma_cache.entries[inst->dma_cache.nr] = smem;
+		set_bit(inst->dma_cache.nr, &inst->dma_cache.usage_bitmap);
+		smem->bitmap_index = inst->dma_cache.nr;
+		inst->dma_cache.nr++;
+		i = smem->bitmap_index;
+	} else {
+		i = find_first_zero_bit(&inst->dma_cache.usage_bitmap,
+				MAX_DMABUF_NUMS);
+		if (i < MAX_DMABUF_NUMS) {
+			smem2 = inst->dma_cache.entries[i];
+			msm_cvp_unmap_smem(inst, smem2, "unmap cpu");
+			msm_cvp_smem_put_dma_buf(smem2->dma_buf);
+			kmem_cache_free(cvp_driver->smem_cache, smem2);
+
+			inst->dma_cache.entries[i] = smem;
+			smem->bitmap_index = i;
+			set_bit(i, &inst->dma_cache.usage_bitmap);
+		} else {
+			dprintk(CVP_WARN, "%s: not enough memory\n", __func__);
+			mutex_unlock(&inst->dma_cache.lock);
+			return -ENOMEM;
+		}
+	}
+
+	atomic_inc(&smem->refcount);
+	mutex_unlock(&inst->dma_cache.lock);
+	dprintk(CVP_MEM, "Add entry %d into cache\n", i);
+
+	return 0;
+}
+
+static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
+						struct cvp_buf_type *buf)
+{
+	int rc = 0, found = 1;
+	struct msm_cvp_smem *smem = NULL;
+	struct dma_buf *dma_buf = NULL;
+
+	if (buf->fd < 0) {
+		dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
+		return NULL;
+	}
+
+	dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
+	if (!dma_buf) {
+		dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
+		return NULL;
+	}
+
+	smem = msm_cvp_session_find_smem(inst, dma_buf);
+	if (!smem) {
+		found = 0;
+		smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
+		if (!smem)
+			return NULL;
+
+		smem->dma_buf = dma_buf;
+		smem->bitmap_index = MAX_DMABUF_NUMS;
+		rc = msm_cvp_map_smem(inst, smem, "map cpu");
+		if (rc)
+			goto exit;
+
+		rc = msm_cvp_session_add_smem(inst, smem);
+		if (rc && rc != -ENOMEM)
+			goto exit2;
+	}
+
+	if (buf->size > smem->size || buf->size > smem->size - buf->offset) {
+		dprintk(CVP_ERR, "%s: invalid offset %d or size %d\n",
+			__func__, buf->offset, buf->size);
+		if (found) {
+			mutex_lock(&inst->dma_cache.lock);
+			atomic_dec(&smem->refcount);
+			mutex_unlock(&inst->dma_cache.lock);
+			return NULL;
+		}
+		goto exit2;
+	}
+
+	return smem;
+
+exit2:
+	msm_cvp_unmap_smem(inst, smem, "unmap cpu");
+exit:
+	msm_cvp_smem_put_dma_buf(dma_buf);
+	kmem_cache_free(cvp_driver->smem_cache, smem);
+	smem = NULL;
+	return smem;
+}
+
+static u32 msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
+				struct cvp_buf_type *buf)
+{
+	u32 iova = 0;
+	struct msm_cvp_smem *smem = NULL;
+	struct cvp_internal_buf *pbuf;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	pbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
+	if (!pbuf)
+		return 0;
+
+	smem = msm_cvp_session_get_smem(inst, buf);
+	if (!smem)
+		goto exit;
+
+	pbuf->smem = smem;
+	pbuf->fd = buf->fd;
+	pbuf->size = buf->size;
+	pbuf->offset = buf->offset;
+	pbuf->ownership = CLIENT;
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_add_tail(&pbuf->list, &inst->persistbufs.list);
+	mutex_unlock(&inst->persistbufs.lock);
+
+	print_internal_buffer(CVP_MEM, "map persist", inst, pbuf);
+
+	iova = smem->device_addr + buf->offset;
+
+	return iova;
+
+exit:
+	kmem_cache_free(cvp_driver->buf_cache, pbuf);
+	return 0;
+}
+
+u32 msm_cvp_map_frame_buf(struct msm_cvp_inst *inst,
+			struct cvp_buf_type *buf,
+			struct msm_cvp_frame *frame)
+{
+	u32 iova = 0;
+	struct msm_cvp_smem *smem = NULL;
+	u32 nr;
+	u32 type;
+
+	if (!inst || !frame) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return 0;
+	}
+
+	nr = frame->nr;
+	if (nr == MAX_FRAME_BUFFER_NUMS) {
+		dprintk(CVP_ERR, "%s: max frame buffer reached\n", __func__);
+		return 0;
+	}
+
+	smem = msm_cvp_session_get_smem(inst, buf);
+	if (!smem)
+		return 0;
+
+	frame->bufs[nr].fd = buf->fd;
+	frame->bufs[nr].smem = smem;
+	frame->bufs[nr].size = buf->size;
+	frame->bufs[nr].offset = buf->offset;
+
+	print_internal_buffer(CVP_MEM, "map cpu", inst, &frame->bufs[nr]);
+
+	frame->nr++;
+
+	type = CVP_KMD_BUFTYPE_INPUT | CVP_KMD_BUFTYPE_OUTPUT;
+	msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
+
+	iova = smem->device_addr + buf->offset;
+
+	return iova;
+}
+
+static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
+			struct msm_cvp_frame *frame)
+{
+	u32 i;
+	u32 type;
+	struct msm_cvp_smem *smem = NULL;
+	struct cvp_internal_buf *buf;
+
+	type = CVP_KMD_BUFTYPE_OUTPUT;
+
+	for (i = 0; i < frame->nr; ++i) {
+		buf = &frame->bufs[i];
+		smem = buf->smem;
+		msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
+
+		if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
+			/* smem not in dmamap cache */
+			msm_cvp_unmap_smem(inst, smem, "unmap cpu");
+			dma_buf_put(smem->dma_buf);
+			kmem_cache_free(cvp_driver->smem_cache, smem);
+			buf->smem = NULL;
+		} else if (atomic_dec_and_test(&smem->refcount)) {
+			clear_bit(smem->bitmap_index,
+				&inst->dma_cache.usage_bitmap);
+		}
+	}
+
+	kmem_cache_free(cvp_driver->frame_cache, frame);
+}
+
+void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid)
+{
+	struct msm_cvp_frame *frame, *dummy1;
+	bool found;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return;
+	}
+
+	ktid &= (FENCE_BIT - 1);
+	dprintk(CVP_MEM, "%s: unmap frame %llu\n", __func__, ktid);
+
+	found = false;
+	mutex_lock(&inst->frames.lock);
+	list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
+		if (frame->ktid == ktid) {
+			found = true;
+			list_del(&frame->list);
+			break;
+		}
+	}
+	mutex_unlock(&inst->frames.lock);
+
+	if (found)
+		msm_cvp_unmap_frame_buf(inst, frame);
+	else
+		dprintk(CVP_WARN, "%s frame %llu not found!\n", __func__, ktid);
+}
+
+int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
+				struct cvp_kmd_hfi_packet *in_pkt,
+				unsigned int offset, unsigned int buf_num)
+{
+	struct cvp_hfi_cmd_session_hdr *cmd_hdr;
+	struct cvp_internal_buf *pbuf, *dummy;
+	u64 ktid;
+	int rc = 0;
+	struct msm_cvp_smem *smem = NULL;
+
+	if (!offset || !buf_num)
+		return 0;
+
+	cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	ktid = cmd_hdr->client_data.kdata & (FENCE_BIT - 1);
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_for_each_entry_safe(pbuf, dummy, &inst->persistbufs.list, list) {
+		if (pbuf->ktid == ktid && pbuf->ownership == CLIENT) {
+			list_del(&pbuf->list);
+			smem = pbuf->smem;
+
+			dprintk(CVP_MEM, "unmap persist: %x %d %d %#x",
+				hash32_ptr(inst->session), pbuf->fd,
+				pbuf->size, smem->device_addr);
+
+			if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
+				/* smem not in dmamap cache */
+				msm_cvp_unmap_smem(inst, smem,
+						"unmap cpu");
+				dma_buf_put(smem->dma_buf);
+				kmem_cache_free(
+					cvp_driver->smem_cache,
+					smem);
+				pbuf->smem = NULL;
+			} else if (atomic_dec_and_test(
+						&smem->refcount)) {
+				clear_bit(smem->bitmap_index,
+					&inst->dma_cache.usage_bitmap);
+			}
+
+			kmem_cache_free(cvp_driver->buf_cache, pbuf);
+		}
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+	return rc;
+}
+
+int msm_cvp_mark_user_persist(struct msm_cvp_inst *inst,
+			struct cvp_kmd_hfi_packet *in_pkt,
+			unsigned int offset, unsigned int buf_num)
+{
+	struct cvp_hfi_cmd_session_hdr *cmd_hdr;
+	struct cvp_internal_buf *pbuf, *dummy;
+	u64 ktid;
+	struct cvp_buf_type *buf;
+	int i, rc = 0;
+
+	if (!offset || !buf_num)
+		return 0;
+
+	cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
+	ktid &= (FENCE_BIT - 1);
+	cmd_hdr->client_data.kdata = ktid;
+
+	for (i = 0; i < buf_num; i++) {
+		buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
+		offset += sizeof(*buf) >> 2;
+
+		if (buf->fd < 0 || !buf->size)
+			continue;
+
+		mutex_lock(&inst->persistbufs.lock);
+		list_for_each_entry_safe(pbuf, dummy, &inst->persistbufs.list,
+				list) {
+			if (pbuf->ownership == CLIENT) {
+				if (pbuf->fd == buf->fd &&
+					pbuf->size == buf->size)
+					buf->fd = pbuf->smem->device_addr;
+				rc = 1;
+				break;
+			}
+		}
+		mutex_unlock(&inst->persistbufs.lock);
+		if (!rc) {
+			dprintk(CVP_ERR, "%s No persist buf %d found\n",
+				__func__, buf->fd);
+			rc = -EFAULT;
+			break;
+		}
+		pbuf->ktid = ktid;
+		rc = 0;
+	}
+	return rc;
+}
+
+int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
+			struct cvp_kmd_hfi_packet *in_pkt,
+			unsigned int offset, unsigned int buf_num)
+{
+	struct cvp_buf_type *buf;
+	int i;
+	u32 iova;
+
+	if (!offset || !buf_num)
+		return 0;
+
+	for (i = 0; i < buf_num; i++) {
+		buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
+		offset += sizeof(*buf) >> 2;
+
+		if (buf->fd < 0 || !buf->size)
+			continue;
+
+		iova = msm_cvp_map_user_persist_buf(inst, buf);
+		if (!iova) {
+			dprintk(CVP_ERR,
+				"%s: buf %d register failed.\n",
+				__func__, i);
+
+			return -EINVAL;
+		}
+		buf->fd = iova;
+	}
+	return 0;
+}
+
+int msm_cvp_map_frame(struct msm_cvp_inst *inst,
+		struct cvp_kmd_hfi_packet *in_pkt,
+		unsigned int offset, unsigned int buf_num)
+{
+	struct cvp_buf_type *buf;
+	int i;
+	u32 iova;
+	u64 ktid;
+	struct msm_cvp_frame *frame;
+	struct cvp_hfi_cmd_session_hdr *cmd_hdr;
+
+	if (!offset || !buf_num)
+		return 0;
+
+	cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
+	ktid &= (FENCE_BIT - 1);
+	cmd_hdr->client_data.kdata = ktid;
+
+	frame = kmem_cache_zalloc(cvp_driver->frame_cache, GFP_KERNEL);
+	if (!frame)
+		return -ENOMEM;
+
+	frame->ktid = ktid;
+	frame->nr = 0;
+	frame->pkt_type = cmd_hdr->packet_type;
+
+	for (i = 0; i < buf_num; i++) {
+		buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
+		offset += sizeof(*buf) >> 2;
+
+		if (buf->fd < 0 || !buf->size)
+			continue;
+
+		iova = msm_cvp_map_frame_buf(inst, buf, frame);
+		if (!iova) {
+			dprintk(CVP_ERR,
+				"%s: buf %d register failed.\n",
+				__func__, i);
+
+			msm_cvp_unmap_frame_buf(inst, frame);
+			return -EINVAL;
+		}
+		buf->fd = iova;
+	}
+
+	mutex_lock(&inst->frames.lock);
+	list_add_tail(&frame->list, &inst->frames.list);
+	mutex_unlock(&inst->frames.lock);
+	dprintk(CVP_MEM, "%s: map frame %llu\n", __func__, ktid);
+
+	return 0;
+}
+
+int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
+{
+	int rc = 0, i;
+	struct cvp_internal_buf *cbuf, *dummy;
+	struct msm_cvp_frame *frame, *dummy1;
+	struct msm_cvp_smem *smem;
+	struct cvp_hal_session *session;
+
+	session = (struct cvp_hal_session *)inst->session;
+
+	mutex_lock(&inst->frames.lock);
+	list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
+		list_del(&frame->list);
+		msm_cvp_unmap_frame_buf(inst, frame);
+	}
+	mutex_unlock(&inst->frames.lock);
+
+	mutex_lock(&inst->dma_cache.lock);
+	for (i = 0; i < inst->dma_cache.nr; i++) {
+		smem = inst->dma_cache.entries[i];
+		if (atomic_read(&smem->refcount) == 0) {
+			print_smem(CVP_MEM, "free", inst, smem);
+		} else {
+			print_smem(CVP_WARN, "in use", inst, smem);
+		}
+		msm_cvp_unmap_smem(inst, smem, "unmap cpu");
+		msm_cvp_smem_put_dma_buf(smem->dma_buf);
+		kmem_cache_free(cvp_driver->smem_cache, smem);
+		inst->dma_cache.entries[i] = NULL;
+	}
+	mutex_unlock(&inst->dma_cache.lock);
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	list_for_each_entry_safe(cbuf, dummy, &inst->cvpdspbufs.list,
+			list) {
+		print_internal_buffer(CVP_MEM, "remove dspbufs", inst, cbuf);
+		rc = cvp_dsp_deregister_buffer(hash32_ptr(session),
+			cbuf->fd, cbuf->smem->dma_buf->size, cbuf->size,
+			cbuf->offset, cbuf->index,
+			(uint32_t)cbuf->smem->device_addr);
+		if (rc)
+			dprintk(CVP_ERR,
+				"%s: failed dsp deregistration fd=%d rc=%d",
+				__func__, cbuf->fd, rc);
+
+		msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
+		msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
+		list_del(&cbuf->list);
+		kmem_cache_free(cvp_driver->buf_cache, cbuf);
+	}
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	return rc;
+}
+
+void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst)
+{
+	struct cvp_internal_buf *buf;
+	int i;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s - invalid param %pK\n",
+			__func__, inst);
+		return;
+	}
+
+	dprintk(CVP_ERR, "active session cmd %d\n", inst->cur_cmd_type);
+	dprintk(CVP_ERR,
+			"---Buffer details for inst: %pK of type: %d---\n",
+			inst, inst->session_type);
+	mutex_lock(&inst->dma_cache.lock);
+	dprintk(CVP_ERR, "dma cache:\n");
+	if (inst->dma_cache.nr <= MAX_DMABUF_NUMS)
+		for (i = 0; i < inst->dma_cache.nr; i++)
+			print_smem(CVP_ERR, "bufdump", inst,
+					inst->dma_cache.entries[i]);
+	mutex_unlock(&inst->dma_cache.lock);
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	dprintk(CVP_ERR, "dsp buffer list:\n");
+	list_for_each_entry(buf, &inst->cvpdspbufs.list, list)
+		print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	mutex_lock(&inst->persistbufs.lock);
+	dprintk(CVP_ERR, "persist buffer list:\n");
+	list_for_each_entry(buf, &inst->persistbufs.list, list)
+		print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
+	mutex_unlock(&inst->persistbufs.lock);
+}
+
+struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
+			u32 buffer_size)
+{
+	struct cvp_internal_buf *buf;
+	struct msm_cvp_list *buf_list;
+	u32 smem_flags = SMEM_UNCACHED;
+	int rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s Invalid input\n", __func__);
+		return NULL;
+	}
+
+	buf_list = &inst->persistbufs;
+
+	if (!buffer_size)
+		return NULL;
+
+	/* PERSIST buffer requires secure mapping */
+	smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
+
+	buf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
+	if (!buf) {
+		dprintk(CVP_ERR, "%s Out of memory\n", __func__);
+		goto fail_kzalloc;
+	}
+
+	buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
+	if (!buf->smem) {
+		dprintk(CVP_ERR, "%s Out of memory\n", __func__);
+		goto fail_kzalloc;
+	}
+
+	rc = msm_cvp_smem_alloc(buffer_size, 1, smem_flags, 0,
+			&(inst->core->resources), buf->smem);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
+		goto err_no_mem;
+	}
+
+	buf->size = buf->smem->size;
+	buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
+	buf->ownership = DRIVER;
+
+	mutex_lock(&buf_list->lock);
+	list_add_tail(&buf->list, &buf_list->list);
+	mutex_unlock(&buf_list->lock);
+	return buf;
+
+err_no_mem:
+	kmem_cache_free(cvp_driver->buf_cache, buf);
+fail_kzalloc:
+	return NULL;
+}
+
+int cvp_release_arp_buffers(struct msm_cvp_inst *inst)
+{
+	struct msm_cvp_smem *smem;
+	struct list_head *ptr, *next;
+	struct cvp_internal_buf *buf;
+	int rc = 0;
+	struct msm_cvp_core *core;
+	struct cvp_hfi_device *hdev;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	if (!core) {
+		dprintk(CVP_ERR, "Invalid core pointer = %pK\n", core);
+		return -EINVAL;
+	}
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(CVP_ERR, "Invalid device pointer = %pK\n", hdev);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_MEM, "release persist buffer!\n");
+
+	mutex_lock(&inst->persistbufs.lock);
+	/* Workaround for FW: release buffer means release all */
+	if (inst->state <= MSM_CVP_CLOSE_DONE) {
+		rc = call_hfi_op(hdev, session_release_buffers,
+				(void *)inst->session);
+		if (!rc) {
+			mutex_unlock(&inst->persistbufs.lock);
+			rc = wait_for_sess_signal_receipt(inst,
+				HAL_SESSION_RELEASE_BUFFER_DONE);
+			if (rc)
+				dprintk(CVP_WARN,
+				"%s: wait for signal failed, rc %d\n",
+				__func__, rc);
+			mutex_lock(&inst->persistbufs.lock);
+		} else {
+			dprintk(CVP_WARN, "Fail to send Rel prst buf\n");
+		}
+	}
+
+	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+		buf = list_entry(ptr, struct cvp_internal_buf, list);
+		smem = buf->smem;
+		if (!smem) {
+			dprintk(CVP_ERR, "%s invalid smem\n", __func__);
+			mutex_unlock(&inst->persistbufs.lock);
+			return -EINVAL;
+		}
+
+		list_del(&buf->list);
+
+		if (buf->ownership == DRIVER) {
+			dprintk(CVP_MEM,
+			"%s: %x : fd %d %s size %d",
+			"free arp", hash32_ptr(inst->session), buf->fd,
+			smem->dma_buf->name, buf->size);
+			msm_cvp_smem_free(smem);
+			kmem_cache_free(cvp_driver->smem_cache, smem);
+		}
+		buf->smem = NULL;
+		kmem_cache_free(cvp_driver->buf_cache, buf);
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+	return rc;
+}
+

+ 198 - 0
msm/eva/msm_cvp_buf.h

@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_CVP_BUF_H_
+#define _MSM_CVP_BUF_H_
+
+#include <linux/poll.h>
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <linux/refcount.h>
+#include <media/msm_cvp_private.h>
+
+#define MAX_FRAME_BUFFER_NUMS 30
+#define MAX_DMABUF_NUMS 64
+
+struct msm_cvp_inst;
+struct msm_cvp_platform_resources;
+struct msm_cvp_list;
+
+enum smem_cache_ops {
+	SMEM_CACHE_CLEAN,
+	SMEM_CACHE_INVALIDATE,
+	SMEM_CACHE_CLEAN_INVALIDATE,
+};
+
+enum smem_prop {
+	SMEM_UNCACHED = 0x1,
+	SMEM_CACHED = 0x2,
+	SMEM_SECURE = 0x4,
+	SMEM_ADSP = 0x8,
+	SMEM_NON_PIXEL = 0x10
+};
+
+struct msm_cvp_list {
+	struct list_head list;
+	struct mutex lock;
+};
+
+static inline void INIT_MSM_CVP_LIST(struct msm_cvp_list *mlist)
+{
+	mutex_init(&mlist->lock);
+	INIT_LIST_HEAD(&mlist->list);
+}
+
+static inline void DEINIT_MSM_CVP_LIST(struct msm_cvp_list *mlist)
+{
+	mutex_destroy(&mlist->lock);
+}
+
+struct cvp_dma_mapping_info {
+	struct device *dev;
+	struct iommu_domain *domain;
+	struct sg_table *table;
+	struct dma_buf_attachment *attach;
+	struct dma_buf *buf;
+	void *cb_info;
+};
+
+struct msm_cvp_smem {
+	struct list_head list;
+	atomic_t refcount;
+	struct dma_buf *dma_buf;
+	void *kvaddr;
+	u32 device_addr;
+	dma_addr_t dma_handle;
+	u32 size;
+	u32 bitmap_index;
+	u32 flags;
+	u32 ion_flags;
+	struct cvp_dma_mapping_info mapping_info;
+};
+
+struct cvp_dmamap_cache {
+	unsigned long usage_bitmap;
+	struct mutex lock;
+	struct msm_cvp_smem *entries[MAX_DMABUF_NUMS];
+	unsigned int nr;
+};
+
+static inline void INIT_DMAMAP_CACHE(struct cvp_dmamap_cache *cache)
+{
+	mutex_init(&cache->lock);
+	cache->usage_bitmap = 0;
+	cache->nr = 0;
+}
+
+static inline void DEINIT_DMAMAP_CACHE(struct cvp_dmamap_cache *cache)
+{
+	mutex_destroy(&cache->lock);
+}
+
+struct cvp_buf_type {
+	s32 fd;
+	u32 size;
+	u32 offset;
+	u32 flags;
+	union {
+		struct dma_buf *dbuf;
+		struct {
+			u32 reserved1;
+			u32 reserved2;
+		};
+	};
+};
+
+enum buffer_owner {
+	DRIVER,
+	FIRMWARE,
+	CLIENT,
+	MAX_OWNER
+};
+
+struct cvp_internal_buf {
+	struct list_head list;
+	s32 fd;
+	u32 size;
+	u32 offset;
+	u32 type;
+	u32 index;
+	u64 ktid;
+	enum buffer_owner ownership;
+	struct msm_cvp_smem *smem;
+};
+
+struct msm_cvp_frame {
+	struct list_head list;
+	struct cvp_internal_buf bufs[MAX_FRAME_BUFFER_NUMS];
+	u32 nr;
+	u64 ktid;
+	u32 pkt_type;
+};
+
+void print_cvp_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst,
+		struct cvp_internal_buf *cbuf);
+void print_cvp_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst,
+		struct cvp_internal_buf *cbuf);
+void print_client_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst,
+		struct cvp_kmd_buffer *cbuf);
+void print_smem(u32 tag, const char *str,
+		struct msm_cvp_inst *inst,
+		struct msm_cvp_smem *smem);
+
+/*Kernel DMA buffer and IOMMU mapping functions*/
+int msm_cvp_smem_alloc(size_t size, u32 align, u32 flags, int map_kernel,
+			void  *res, struct msm_cvp_smem *smem);
+int msm_cvp_smem_free(struct msm_cvp_smem *smem);
+struct context_bank_info *msm_cvp_smem_get_context_bank(bool is_secure,
+				struct msm_cvp_platform_resources *res,
+				unsigned long ion_flags);
+int msm_cvp_map_smem(struct msm_cvp_inst *inst,
+			struct msm_cvp_smem *smem,
+			const char *str);
+int msm_cvp_unmap_smem(struct msm_cvp_inst *inst,
+			struct msm_cvp_smem *smem,
+			const char *str);
+struct dma_buf *msm_cvp_smem_get_dma_buf(int fd);
+void msm_cvp_smem_put_dma_buf(void *dma_buf);
+int msm_cvp_smem_cache_operations(struct dma_buf *dbuf,
+				enum smem_cache_ops cache_op,
+				unsigned long offset,
+				unsigned long size);
+
+/* CVP driver internal buffer management functions*/
+struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
+					u32 buffer_size);
+int cvp_release_arp_buffers(struct msm_cvp_inst *inst);
+int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
+			struct cvp_kmd_buffer *buf);
+int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst,
+			struct cvp_kmd_buffer *buf);
+void msm_cvp_cache_operations(struct msm_cvp_smem *smem,
+			u32 type, u32 offset, u32 size);
+u32 msm_cvp_map_frame_buf(struct msm_cvp_inst *inst,
+			struct cvp_buf_type *buf,
+			struct msm_cvp_frame *frame);
+int msm_cvp_mark_user_persist(struct msm_cvp_inst *inst,
+			struct cvp_kmd_hfi_packet *in_pkt,
+			unsigned int offset, unsigned int buf_num);
+int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
+			struct cvp_kmd_hfi_packet *in_pkt,
+			unsigned int offset, unsigned int buf_num);
+int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
+			struct cvp_kmd_hfi_packet *in_pkt,
+			unsigned int offset, unsigned int buf_num);
+int msm_cvp_map_frame(struct msm_cvp_inst *inst,
+		struct cvp_kmd_hfi_packet *in_pkt,
+		unsigned int offset, unsigned int buf_num);
+void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid);
+int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst);
+void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst);
+#endif

+ 25 - 0
msm/eva/msm_cvp_clocks.c

@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_cvp_common.h"
+#include "cvp_hfi_api.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_clocks.h"
+
+int msm_cvp_set_clocks(struct msm_cvp_core *core)
+{
+	struct cvp_hfi_device *hdev;
+	int rc;
+
+	if (!core || !core->device) {
+		dprintk(CVP_ERR, "%s Invalid args: %pK\n", __func__, core);
+		return -EINVAL;
+	}
+
+	hdev = core->device;
+	rc = call_hfi_op(hdev, scale_clocks,
+		hdev->hfi_device_data, core->curr_freq);
+	return rc;
+}

+ 12 - 0
msm/eva/msm_cvp_clocks.h

@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef _MSM_CVP_CLOCKS_H_
+#define _MSM_CVP_CLOCKS_H_
+#include "msm_cvp_internal.h"
+
+int msm_cvp_set_clocks(struct msm_cvp_core *core);
+#endif

+ 1610 - 0
msm/eva/msm_cvp_common.c

@@ -0,0 +1,1610 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <asm/div64.h>
+#include "msm_cvp_common.h"
+#include "cvp_hfi_api.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_clocks.h"
+#include "msm_cvp.h"
+#include "cvp_core_hfi.h"
+
+#define IS_ALREADY_IN_STATE(__p, __d) (\
+	(__p >= __d)\
+)
+
+static void handle_session_error(enum hal_command_response cmd, void *data);
+
+static void dump_hfi_queue(struct iris_hfi_device *device)
+{
+	struct cvp_hfi_queue_header *queue;
+	struct cvp_iface_q_info *qinfo;
+	int i;
+	u32 *read_ptr, read_idx;
+
+	dprintk(CVP_ERR, "HFI queues in order of cmd(rd, wr), msg and dbg:\n");
+
+	/*
+	 * mb() to ensure driver reads the updated header values from
+	 * main memory.
+	 */
+	mb();
+	for (i = 0; i <= CVP_IFACEQ_DBGQ_IDX; i++) {
+		qinfo = &device->iface_queues[i];
+		queue = (struct cvp_hfi_queue_header *)qinfo->q_hdr;
+		if (!queue) {
+			dprintk(CVP_ERR, "HFI queue not init, fail to dump\n");
+			return;
+		}
+		dprintk(CVP_ERR, "queue details: %d %d\n",
+				queue->qhdr_read_idx, queue->qhdr_write_idx);
+		if (queue->qhdr_read_idx != queue->qhdr_write_idx) {
+			read_idx = queue->qhdr_read_idx;
+			read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+				(read_idx << 2));
+			dprintk(CVP_ERR, "queue payload: %x %x %x %x\n",
+				read_ptr[0], read_ptr[1],
+				read_ptr[2], read_ptr[3]);
+		}
+
+	}
+}
+
+struct msm_cvp_core *get_cvp_core(int core_id)
+{
+	struct msm_cvp_core *core;
+	int found = 0;
+
+	if (core_id > MSM_CVP_CORES_MAX) {
+		dprintk(CVP_ERR, "Core id = %d is greater than max = %d\n",
+			core_id, MSM_CVP_CORES_MAX);
+		return NULL;
+	}
+	mutex_lock(&cvp_driver->lock);
+	list_for_each_entry(core, &cvp_driver->cores, list) {
+		if (core->id == core_id) {
+			found = 1;
+			break;
+		}
+	}
+	mutex_unlock(&cvp_driver->lock);
+	if (found)
+		return core;
+	return NULL;
+}
+
+static void handle_sys_init_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_core *core;
+	struct cvp_hal_sys_init_done *sys_init_msg;
+	u32 index;
+
+	if (!IS_HAL_SYS_CMD(cmd)) {
+		dprintk(CVP_ERR, "%s - invalid cmd\n", __func__);
+		return;
+	}
+
+	index = SYS_MSG_INDEX(cmd);
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for sys init\n");
+		return;
+	}
+	core = get_cvp_core(response->device_id);
+	if (!core) {
+		dprintk(CVP_ERR, "Wrong device_id received\n");
+		return;
+	}
+	sys_init_msg = &response->data.sys_init_done;
+	if (!sys_init_msg) {
+		dprintk(CVP_ERR, "sys_init_done message not proper\n");
+		return;
+	}
+
+	/* This should come from sys_init_done */
+	core->resources.max_inst_count =
+		sys_init_msg->max_sessions_supported ?
+		min_t(u32, sys_init_msg->max_sessions_supported,
+		MAX_SUPPORTED_INSTANCES) : MAX_SUPPORTED_INSTANCES;
+
+	core->resources.max_secure_inst_count =
+		core->resources.max_secure_inst_count ?
+		core->resources.max_secure_inst_count :
+		core->resources.max_inst_count;
+
+	memcpy(core->capabilities, sys_init_msg->capabilities,
+		sys_init_msg->codec_count * sizeof(struct msm_cvp_capability));
+
+	dprintk(CVP_CORE,
+		"%s: max_inst_count %d, max_secure_inst_count %d\n",
+		__func__, core->resources.max_inst_count,
+		core->resources.max_secure_inst_count);
+
+	complete(&(core->completions[index]));
+}
+
+static void put_inst_helper(struct kref *kref)
+{
+	struct msm_cvp_inst *inst = container_of(kref,
+			struct msm_cvp_inst, kref);
+
+	msm_cvp_destroy(inst);
+}
+
+void cvp_put_inst(struct msm_cvp_inst *inst)
+{
+	if (!inst)
+		return;
+
+	kref_put(&inst->kref, put_inst_helper);
+}
+
+struct msm_cvp_inst *cvp_get_inst(struct msm_cvp_core *core,
+		void *session_id)
+{
+	struct msm_cvp_inst *inst = NULL;
+	bool matches = false;
+
+	if (!core || !session_id)
+		return NULL;
+
+	mutex_lock(&core->lock);
+	/*
+	 * This is as good as !list_empty(!inst->list), but at this point
+	 * we don't really know if inst was kfree'd via close syscall before
+	 * hardware could respond.  So manually walk thru the list of active
+	 * sessions
+	 */
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst == session_id) {
+			/*
+			 * Even if the instance is valid, we really shouldn't
+			 * be receiving or handling callbacks when we've deleted
+			 * our session with HFI
+			 */
+			matches = !!inst->session;
+			break;
+		}
+	}
+
+	/*
+	 * kref_* is atomic_int backed, so no need for inst->lock.  But we can
+	 * always acquire inst->lock and release it in cvp_put_inst
+	 * for a stronger locking system.
+	 */
+	inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
+	mutex_unlock(&core->lock);
+
+	return inst;
+}
+
+struct msm_cvp_inst *cvp_get_inst_validate(struct msm_cvp_core *core,
+		void *session_id)
+{
+	int rc = 0;
+	struct cvp_hfi_device *hdev;
+	struct msm_cvp_inst *s;
+
+	s = cvp_get_inst(core, session_id);
+	if (!s) {
+		dprintk(CVP_ERR, "%s session doesn't exit\n",
+			__builtin_return_address(0));
+		return NULL;
+	}
+
+	hdev = s->core->device;
+	rc = call_hfi_op(hdev, validate_session, s->session, __func__);
+	if (rc) {
+		cvp_put_inst(s);
+		s = NULL;
+	}
+
+	return s;
+}
+
+static void cvp_handle_session_cmd_done(enum hal_command_response cmd,
+	void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+
+	if (!response) {
+		dprintk(CVP_ERR, "%s: Invalid release_buf_done response\n",
+			__func__);
+		return;
+	}
+
+	inst = cvp_get_inst(get_cvp_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: Got response for an inactive session\n",
+			__func__);
+		return;
+	}
+
+	if (response->status)
+		dprintk(CVP_ERR, "HFI MSG error %d cmd response %d\n",
+			response->status, cmd);
+
+	dprintk(CVP_SESS, "%s: inst=%pK\n", __func__, inst);
+
+	if (IS_HAL_SESSION_CMD(cmd)) {
+		dprintk(CVP_INFO, "%s: calling completion for index = %d",
+			__func__, SESSION_MSG_INDEX(cmd));
+		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+	} else
+		dprintk(CVP_ERR,
+			"%s: Invalid inst cmd response: %d\n", __func__, cmd);
+	cvp_put_inst(inst);
+}
+
+static void handle_session_set_buf_done(enum hal_command_response cmd,
+	void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+
+	if (!response) {
+		dprintk(CVP_ERR, "Invalid set_buf_done response\n");
+		return;
+	}
+
+	inst = cvp_get_inst(get_cvp_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "set_buf_done has an inactive session\n");
+		return;
+	}
+
+	if (response->status) {
+		dprintk(CVP_ERR,
+			"set ARP buffer error from FW : %#x\n",
+			response->status);
+	}
+
+	if (IS_HAL_SESSION_CMD(cmd))
+		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+	else
+		dprintk(CVP_ERR, "set_buf_done: invalid cmd: %d\n", cmd);
+	cvp_put_inst(inst);
+
+}
+
+static void handle_session_release_buf_done(enum hal_command_response cmd,
+	void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+	struct cvp_internal_buf *buf;
+	struct list_head *ptr, *next;
+	u32 buf_found = false;
+	u32 address;
+
+	if (!response) {
+		dprintk(CVP_ERR, "Invalid release_buf_done response\n");
+		return;
+	}
+
+	inst = cvp_get_inst(get_cvp_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN,
+			"%s: Got a response for an inactive session\n",
+			__func__);
+		return;
+	}
+
+	address = response->data.buffer_addr;
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+		buf = list_entry(ptr, struct cvp_internal_buf, list);
+		if (address == buf->smem->device_addr + buf->offset) {
+			dprintk(CVP_SESS, "releasing persist: %#x\n",
+					buf->smem->device_addr);
+			buf_found = true;
+		}
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+
+	if (IS_HAL_SESSION_CMD(cmd))
+		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+	else
+		dprintk(CVP_ERR, "Invalid inst cmd response: %d\n", cmd);
+
+	cvp_put_inst(inst);
+}
+
+static void handle_sys_release_res_done(
+		enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_core *core;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for sys init\n");
+		return;
+	}
+	core = get_cvp_core(response->device_id);
+	if (!core) {
+		dprintk(CVP_ERR, "Wrong device_id received\n");
+		return;
+	}
+	complete(&core->completions[
+			SYS_MSG_INDEX(HAL_SYS_RELEASE_RESOURCE_DONE)]);
+}
+
+void change_cvp_inst_state(struct msm_cvp_inst *inst, enum instance_state state)
+{
+	if (!inst) {
+		dprintk(CVP_ERR, "Invalid parameter %s\n", __func__);
+		return;
+	}
+	mutex_lock(&inst->lock);
+	if (inst->state == MSM_CVP_CORE_INVALID) {
+		dprintk(CVP_SESS,
+			"Inst: %pK is in bad state can't change state to %d\n",
+			inst, state);
+		goto exit;
+	}
+	dprintk(CVP_SESS, "Moved inst: %pK from state: %d to state: %d\n",
+		   inst, inst->state, state);
+	inst->state = state;
+exit:
+	mutex_unlock(&inst->lock);
+}
+
+static int signal_session_msg_receipt(enum hal_command_response cmd,
+		struct msm_cvp_inst *inst)
+{
+	if (!inst) {
+		dprintk(CVP_ERR, "Invalid(%pK) instance id\n", inst);
+		return -EINVAL;
+	}
+	if (IS_HAL_SESSION_CMD(cmd)) {
+		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+	} else {
+		dprintk(CVP_ERR, "Invalid inst cmd response: %d\n", cmd);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
+	enum hal_command_response cmd)
+{
+	int rc = 0;
+	struct cvp_hfi_device *hdev;
+
+	if (!IS_HAL_SESSION_CMD(cmd)) {
+		dprintk(CVP_ERR, "Invalid inst cmd response: %d\n", cmd);
+		return -EINVAL;
+	}
+	hdev = (struct cvp_hfi_device *)(inst->core->device);
+	rc = wait_for_completion_timeout(
+		&inst->completions[SESSION_MSG_INDEX(cmd)],
+		msecs_to_jiffies(
+			inst->core->resources.msm_cvp_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(CVP_WARN, "Wait interrupted or timed out: %d\n",
+				SESSION_MSG_INDEX(cmd));
+		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+		dump_hfi_queue(hdev->hfi_device_data);
+		rc = -EIO;
+	} else {
+		rc = 0;
+	}
+	return rc;
+}
+
+static int wait_for_state(struct msm_cvp_inst *inst,
+	enum instance_state flipped_state,
+	enum instance_state desired_state,
+	enum hal_command_response hal_cmd)
+{
+	int rc = 0;
+
+	if (IS_ALREADY_IN_STATE(flipped_state, desired_state)) {
+		dprintk(CVP_INFO, "inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto err_same_state;
+	}
+	dprintk(CVP_SESS, "Waiting for hal_cmd: %d\n", hal_cmd);
+	rc = wait_for_sess_signal_receipt(inst, hal_cmd);
+	if (!rc)
+		change_cvp_inst_state(inst, desired_state);
+err_same_state:
+	return rc;
+}
+
+void msm_cvp_notify_event(struct msm_cvp_inst *inst, int event_type)
+{
+}
+
+static void msm_cvp_comm_generate_max_clients_error(struct msm_cvp_inst *inst)
+{
+	enum hal_command_response cmd = HAL_SESSION_ERROR;
+	struct msm_cvp_cb_cmd_done response = {0};
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
+		return;
+	}
+	dprintk(CVP_ERR, "%s: Too many clients\n", __func__);
+	response.session_id = inst;
+	response.status = CVP_ERR_MAX_CLIENTS;
+	handle_session_error(cmd, (void *)&response);
+}
+
+static void handle_session_init_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst = NULL;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+				"Failed to get valid response for session init\n");
+		return;
+	}
+
+	inst = cvp_get_inst(get_cvp_core(response->device_id),
+		response->session_id);
+
+	if (!inst) {
+		dprintk(CVP_WARN, "%s:Got a response for an inactive session\n",
+				__func__);
+		return;
+	}
+
+	if (response->status) {
+		dprintk(CVP_ERR,
+			"Session init response from FW : %#x\n",
+			response->status);
+		if (response->status == CVP_ERR_MAX_CLIENTS)
+			msm_cvp_comm_generate_max_clients_error(inst);
+		else
+			msm_cvp_comm_generate_session_error(inst);
+
+		signal_session_msg_receipt(cmd, inst);
+		cvp_put_inst(inst);
+		return;
+	}
+
+	dprintk(CVP_SESS, "%s: cvp session %#x\n", __func__,
+		hash32_ptr(inst->session));
+
+	signal_session_msg_receipt(cmd, inst);
+	cvp_put_inst(inst);
+	return;
+
+}
+
+static void handle_event_change(enum hal_command_response cmd, void *data)
+{
+	dprintk(CVP_WARN, "%s is not supported on CVP!\n", __func__);
+}
+
+static void handle_release_res_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for release resource\n");
+		return;
+	}
+
+	inst = cvp_get_inst(get_cvp_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s:Got a response for an inactive session\n",
+				__func__);
+		return;
+	}
+
+	signal_session_msg_receipt(cmd, inst);
+	cvp_put_inst(inst);
+}
+
+static void handle_session_flush(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for release resource\n");
+		return;
+	}
+
+	inst = cvp_get_inst(get_cvp_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s:Got a response for an inactive session\n",
+				__func__);
+		return;
+	}
+
+	signal_session_msg_receipt(cmd, inst);
+	cvp_put_inst(inst);
+}
+
+static void handle_session_error(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct cvp_hfi_device *hdev = NULL;
+	struct msm_cvp_inst *inst = NULL;
+	int event = CVP_SYS_ERROR_EVENT;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for session error\n");
+		return;
+	}
+
+	inst = cvp_get_inst(get_cvp_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s: response for an inactive session\n",
+				__func__);
+		return;
+	}
+
+	hdev = inst->core->device;
+	dprintk(CVP_ERR, "Session error received for inst %pK session %x\n",
+		inst, hash32_ptr(inst->session));
+
+	if (response->status == CVP_ERR_MAX_CLIENTS) {
+		dprintk(CVP_WARN, "Too many clients, rejecting %pK", inst);
+		event = CVP_MAX_CLIENTS_EVENT;
+
+		/*
+		 * Clean the HFI session now. Since inst->state is moved to
+		 * INVALID, forward thread doesn't know FW has valid session
+		 * or not. This is the last place driver knows that there is
+		 * no session in FW. Hence clean HFI session now.
+		 */
+
+		msm_cvp_comm_session_clean(inst);
+	} else if (response->status == CVP_ERR_NOT_SUPPORTED) {
+		dprintk(CVP_WARN, "Unsupported bitstream in %pK", inst);
+		event = CVP_HW_UNSUPPORTED_EVENT;
+	} else {
+		dprintk(CVP_WARN, "Unknown session error (%d) for %pK\n",
+				response->status, inst);
+		event = CVP_SYS_ERROR_EVENT;
+	}
+
+	/* change state before sending error to client */
+	change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+	msm_cvp_notify_event(inst, event);
+	cvp_put_inst(inst);
+}
+
+static void msm_comm_clean_notify_client(struct msm_cvp_core *core)
+{
+	struct msm_cvp_inst *inst = NULL;
+
+	if (!core) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return;
+	}
+
+	dprintk(CVP_WARN, "%s: Core %pK\n", __func__, core);
+	mutex_lock(&core->lock);
+
+	list_for_each_entry(inst, &core->instances, list) {
+		mutex_lock(&inst->lock);
+		inst->state = MSM_CVP_CORE_INVALID;
+		mutex_unlock(&inst->lock);
+		dprintk(CVP_WARN,
+			"%s Send sys error for inst %pK\n", __func__, inst);
+		msm_cvp_notify_event(inst,
+				CVP_SYS_ERROR_EVENT);
+	}
+	mutex_unlock(&core->lock);
+}
+
+static void handle_sys_error(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_core *core = NULL;
+	struct cvp_hfi_device *hdev = NULL;
+	struct msm_cvp_inst *inst = NULL;
+	int rc = 0;
+	unsigned long flags = 0;
+	enum cvp_core_state cur_state;
+
+	subsystem_crashed("evass");
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for sys error\n");
+		return;
+	}
+
+	core = get_cvp_core(response->device_id);
+	if (!core) {
+		dprintk(CVP_ERR,
+				"Got SYS_ERR but unable to identify core\n");
+		return;
+	}
+	hdev = core->device;
+
+	mutex_lock(&core->lock);
+	if (core->state == CVP_CORE_UNINIT) {
+		dprintk(CVP_ERR,
+			"%s: Core %pK already moved to state %d\n",
+			 __func__, core, core->state);
+		mutex_unlock(&core->lock);
+		return;
+	}
+
+	cur_state = core->state;
+	core->state = CVP_CORE_UNINIT;
+	dprintk(CVP_WARN, "SYS_ERROR received for core %pK\n", core);
+	msm_cvp_noc_error_info(core);
+	call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+	list_for_each_entry(inst, &core->instances, list) {
+		dprintk(CVP_WARN,
+			"%s: sys error inst %#x kref %x, cmd %x state %x\n",
+				__func__, inst, kref_read(&inst->kref),
+				inst->cur_cmd_type, inst->state);
+		if (inst->state != MSM_CVP_CORE_INVALID) {
+			change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+			spin_lock_irqsave(&inst->event_handler.lock, flags);
+			inst->event_handler.event = CVP_SSR_EVENT;
+			spin_unlock_irqrestore(
+				&inst->event_handler.lock, flags);
+			wake_up_all(&inst->event_handler.wq);
+		}
+
+		if (!core->trigger_ssr)
+			msm_cvp_print_inst_bufs(inst);
+	}
+
+	/* handle the hw error before core released to get full debug info */
+	msm_cvp_handle_hw_error(core);
+	if (response->status == CVP_ERR_NOC_ERROR) {
+		dprintk(CVP_WARN, "Got NOC error");
+		MSM_CVP_ERROR(true);
+	}
+
+	dprintk(CVP_CORE, "Calling core_release\n");
+	rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data);
+	if (rc) {
+		dprintk(CVP_ERR, "core_release failed\n");
+		core->state = cur_state;
+		mutex_unlock(&core->lock);
+		return;
+	}
+	mutex_unlock(&core->lock);
+
+	dprintk(CVP_WARN, "SYS_ERROR handled.\n");
+}
+
+void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hfi_device *hdev = NULL;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(CVP_ERR, "%s invalid params\n", __func__);
+		return;
+	}
+	if (!inst->session) {
+		dprintk(CVP_SESS, "%s: inst %pK session already cleaned\n",
+			__func__, inst);
+		return;
+	}
+
+	hdev = inst->core->device;
+	mutex_lock(&inst->lock);
+	dprintk(CVP_SESS, "%s: inst %pK\n", __func__, inst);
+	rc = call_hfi_op(hdev, session_clean,
+			(void *)inst->session);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Session clean failed :%pK\n", inst);
+	}
+	inst->session = NULL;
+	mutex_unlock(&inst->lock);
+}
+
+static void handle_session_close(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for session close\n");
+		return;
+	}
+
+	inst = cvp_get_inst(get_cvp_core(response->device_id),
+			response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s: response for an inactive session\n",
+				__func__);
+		return;
+	}
+
+	signal_session_msg_receipt(cmd, inst);
+	show_stats(inst);
+	cvp_put_inst(inst);
+}
+
+void cvp_handle_cmd_response(enum hal_command_response cmd, void *data)
+{
+	dprintk(CVP_HFI, "Command response = %d\n", cmd);
+	switch (cmd) {
+	case HAL_SYS_INIT_DONE:
+		handle_sys_init_done(cmd, data);
+		break;
+	case HAL_SYS_RELEASE_RESOURCE_DONE:
+		handle_sys_release_res_done(cmd, data);
+		break;
+	case HAL_SESSION_INIT_DONE:
+		handle_session_init_done(cmd, data);
+		break;
+	case HAL_SESSION_RELEASE_RESOURCE_DONE:
+		handle_release_res_done(cmd, data);
+		break;
+	case HAL_SESSION_END_DONE:
+	case HAL_SESSION_ABORT_DONE:
+		handle_session_close(cmd, data);
+		break;
+	case HAL_SESSION_EVENT_CHANGE:
+		handle_event_change(cmd, data);
+		break;
+	case HAL_SESSION_FLUSH_DONE:
+		handle_session_flush(cmd, data);
+		break;
+	case HAL_SYS_WATCHDOG_TIMEOUT:
+	case HAL_SYS_ERROR:
+		handle_sys_error(cmd, data);
+		break;
+	case HAL_SESSION_ERROR:
+		handle_session_error(cmd, data);
+		break;
+	case HAL_SESSION_SET_BUFFER_DONE:
+		handle_session_set_buf_done(cmd, data);
+		break;
+	case HAL_SESSION_RELEASE_BUFFER_DONE:
+		handle_session_release_buf_done(cmd, data);
+		break;
+	case HAL_SESSION_DFS_CONFIG_CMD_DONE:
+	case HAL_SESSION_DFS_FRAME_CMD_DONE:
+	case HAL_SESSION_DME_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_FRAME_CMD_DONE:
+	case HAL_SESSION_PERSIST_SET_DONE:
+	case HAL_SESSION_PERSIST_REL_DONE:
+	case HAL_SESSION_TME_CONFIG_CMD_DONE:
+	case HAL_SESSION_ODT_CONFIG_CMD_DONE:
+	case HAL_SESSION_OD_CONFIG_CMD_DONE:
+	case HAL_SESSION_NCC_CONFIG_CMD_DONE:
+	case HAL_SESSION_ICA_CONFIG_CMD_DONE:
+	case HAL_SESSION_HCD_CONFIG_CMD_DONE:
+	case HAL_SESSION_DCM_CONFIG_CMD_DONE:
+	case HAL_SESSION_DC_CONFIG_CMD_DONE:
+	case HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE:
+	case HAL_SESSION_FD_CONFIG_CMD_DONE:
+	case HAL_SESSION_MODEL_BUF_CMD_DONE:
+	case HAL_SESSION_ICA_FRAME_CMD_DONE:
+	case HAL_SESSION_FD_FRAME_CMD_DONE:
+		cvp_handle_session_cmd_done(cmd, data);
+		break;
+	default:
+		dprintk(CVP_HFI, "response unhandled: %d\n", cmd);
+		break;
+	}
+}
+
+static inline enum msm_cvp_thermal_level msm_comm_cvp_thermal_level(int level)
+{
+	switch (level) {
+	case 0:
+		return CVP_THERMAL_NORMAL;
+	case 1:
+		return CVP_THERMAL_LOW;
+	case 2:
+		return CVP_THERMAL_HIGH;
+	default:
+		return CVP_THERMAL_CRITICAL;
+	}
+}
+
+static bool is_core_turbo(struct msm_cvp_core *core, unsigned long freq)
+{
+	int i = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u32 max_freq = 0;
+
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	for (i = 0; i < core->resources.allowed_clks_tbl_size; i++) {
+		if (max_freq < allowed_clks_tbl[i].clock_rate)
+			max_freq = allowed_clks_tbl[i].clock_rate;
+	}
+	return freq >= max_freq;
+}
+
+static bool is_thermal_permissible(struct msm_cvp_core *core)
+{
+	enum msm_cvp_thermal_level tl;
+	unsigned long freq = 0;
+	bool is_turbo = false;
+
+	if (!core->resources.thermal_mitigable)
+		return true;
+
+	if (msm_cvp_thermal_mitigation_disabled) {
+		dprintk(CVP_CORE,
+			"Thermal mitigation not enabled. debugfs %d\n",
+			msm_cvp_thermal_mitigation_disabled);
+		return true;
+	}
+
+	tl = msm_comm_cvp_thermal_level(cvp_driver->thermal_level);
+	freq = core->curr_freq;
+
+	is_turbo = is_core_turbo(core, freq);
+	dprintk(CVP_CORE,
+		"Core freq %ld Thermal level %d Turbo mode %d\n",
+		freq, tl, is_turbo);
+
+	if (is_turbo && tl >= CVP_THERMAL_LOW) {
+		dprintk(CVP_ERR,
+			"CVP session not allowed. Turbo mode %d Thermal level %d\n",
+			is_turbo, tl);
+		return false;
+	}
+	return true;
+}
+
+static int msm_comm_session_abort(struct msm_cvp_inst *inst)
+{
+	int rc = 0, abort_completion = 0;
+	struct cvp_hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(CVP_ERR, "%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+	abort_completion = SESSION_MSG_INDEX(HAL_SESSION_ABORT_DONE);
+
+	dprintk(CVP_WARN, "%s: inst %pK session %x\n", __func__,
+		inst, hash32_ptr(inst->session));
+	rc = call_hfi_op(hdev, session_abort, (void *)inst->session);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s session_abort failed rc: %d\n", __func__, rc);
+		goto exit;
+	}
+	rc = wait_for_completion_timeout(
+			&inst->completions[abort_completion],
+			msecs_to_jiffies(
+				inst->core->resources.msm_cvp_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(CVP_ERR, "%s: inst %pK session %x abort timed out\n",
+				__func__, inst, hash32_ptr(inst->session));
+		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+		dump_hfi_queue(hdev->hfi_device_data);
+		msm_cvp_comm_generate_sys_error(inst);
+		rc = -EBUSY;
+	} else {
+		rc = 0;
+	}
+exit:
+	return rc;
+}
+
+static void handle_thermal_event(struct msm_cvp_core *core)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst;
+
+	if (!core || !core->device) {
+		dprintk(CVP_ERR, "%s Invalid params\n", __func__);
+		return;
+	}
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		if (!inst->session)
+			continue;
+
+		mutex_unlock(&core->lock);
+		if (inst->state >= MSM_CVP_OPEN_DONE &&
+			inst->state < MSM_CVP_CLOSE_DONE) {
+			dprintk(CVP_WARN, "%s: abort inst %pK\n",
+				__func__, inst);
+			rc = msm_comm_session_abort(inst);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s session_abort failed rc: %d\n",
+					__func__, rc);
+				goto err_sess_abort;
+			}
+			change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+			dprintk(CVP_WARN,
+				"%s Send sys error for inst %pK\n",
+				__func__, inst);
+			msm_cvp_notify_event(inst,
+					CVP_SYS_ERROR_EVENT);
+		} else {
+			msm_cvp_comm_generate_session_error(inst);
+		}
+		mutex_lock(&core->lock);
+	}
+	mutex_unlock(&core->lock);
+	return;
+
+err_sess_abort:
+	msm_comm_clean_notify_client(core);
+}
+
+void msm_cvp_comm_handle_thermal_event(void)
+{
+	struct msm_cvp_core *core;
+
+	list_for_each_entry(core, &cvp_driver->cores, list) {
+		if (!is_thermal_permissible(core)) {
+			dprintk(CVP_WARN,
+				"Thermal level critical, stop all active sessions!\n");
+			handle_thermal_event(core);
+		}
+	}
+}
+
+int msm_cvp_comm_check_core_init(struct msm_cvp_core *core)
+{
+	int rc = 0;
+	struct cvp_hfi_device *hdev;
+
+	mutex_lock(&core->lock);
+	if (core->state >= CVP_CORE_INIT_DONE) {
+		dprintk(CVP_INFO, "CVP core: %d is already in state: %d\n",
+				core->id, core->state);
+		goto exit;
+	}
+	dprintk(CVP_CORE, "Waiting for SYS_INIT_DONE\n");
+	rc = wait_for_completion_timeout(
+		&core->completions[SYS_MSG_INDEX(HAL_SYS_INIT_DONE)],
+		msecs_to_jiffies(core->resources.msm_cvp_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(CVP_ERR, "%s: Wait interrupted or timed out: %d\n",
+				__func__, SYS_MSG_INDEX(HAL_SYS_INIT_DONE));
+		hdev = core->device;
+		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+		dump_hfi_queue(hdev->hfi_device_data);
+		rc = -EIO;
+		goto exit;
+	} else {
+		core->state = CVP_CORE_INIT_DONE;
+		rc = 0;
+	}
+	dprintk(CVP_CORE, "SYS_INIT_DONE!!!\n");
+exit:
+	mutex_unlock(&core->lock);
+	return rc;
+}
+
+static int msm_comm_init_core_done(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+
+	rc = msm_cvp_comm_check_core_init(inst->core);
+	if (rc) {
+		dprintk(CVP_ERR, "%s - failed to initialize core\n", __func__);
+		msm_cvp_comm_generate_sys_error(inst);
+		return rc;
+	}
+	change_cvp_inst_state(inst, MSM_CVP_CORE_INIT_DONE);
+	return rc;
+}
+
+static int msm_comm_init_core(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hfi_device *hdev;
+	struct msm_cvp_core *core;
+
+	if (!inst || !inst->core || !inst->core->device)
+		return -EINVAL;
+
+	core = inst->core;
+	hdev = core->device;
+	mutex_lock(&core->lock);
+	if (core->state >= CVP_CORE_INIT) {
+		dprintk(CVP_CORE, "CVP core: %d is already in state: %d\n",
+				core->id, core->state);
+		goto core_already_inited;
+	}
+	if (!core->capabilities) {
+		core->capabilities = kcalloc(CVP_MAX_SESSIONS,
+				sizeof(struct msm_cvp_capability), GFP_KERNEL);
+		if (!core->capabilities) {
+			dprintk(CVP_ERR,
+				"%s: failed to allocate capabilities\n",
+				__func__);
+			rc = -ENOMEM;
+			goto fail_cap_alloc;
+		}
+	} else {
+		dprintk(CVP_WARN,
+			"%s: capabilities memory is expected to be freed\n",
+			__func__);
+	}
+	dprintk(CVP_CORE, "%s: core %pK\n", __func__, core);
+	rc = call_hfi_op(hdev, core_init, hdev->hfi_device_data);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init core, id = %d\n",
+				core->id);
+		goto fail_core_init;
+	}
+	core->state = CVP_CORE_INIT;
+	core->smmu_fault_handled = false;
+	core->trigger_ssr = false;
+
+core_already_inited:
+	change_cvp_inst_state(inst, MSM_CVP_CORE_INIT);
+	mutex_unlock(&core->lock);
+
+	return rc;
+
+fail_core_init:
+	kfree(core->capabilities);
+fail_cap_alloc:
+	core->capabilities = NULL;
+	core->state = CVP_CORE_UNINIT;
+	mutex_unlock(&core->lock);
+	return rc;
+}
+
+int msm_cvp_deinit_core(struct msm_cvp_inst *inst)
+{
+	struct msm_cvp_core *core;
+	struct cvp_hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	hdev = core->device;
+
+	mutex_lock(&core->lock);
+	if (core->state == CVP_CORE_UNINIT) {
+		dprintk(CVP_INFO, "CVP core: %d is already in state: %d\n",
+				core->id, core->state);
+		goto core_already_uninited;
+	}
+
+	if (!core->resources.never_unload_fw) {
+		cancel_delayed_work(&core->fw_unload_work);
+
+		/*
+		 * Delay unloading of firmware. This is useful
+		 * in avoiding firmware download delays in cases where we
+		 * will have a burst of back to back cvp sessions
+		 */
+		schedule_delayed_work(&core->fw_unload_work,
+			msecs_to_jiffies(core->state == CVP_CORE_INIT_DONE ?
+			core->resources.msm_cvp_firmware_unload_delay : 0));
+
+		dprintk(CVP_CORE, "firmware unload delayed by %u ms\n",
+			core->state == CVP_CORE_INIT_DONE ?
+			core->resources.msm_cvp_firmware_unload_delay : 0);
+	}
+
+core_already_uninited:
+	change_cvp_inst_state(inst, MSM_CVP_CORE_UNINIT);
+	mutex_unlock(&core->lock);
+	return 0;
+}
+
+static int msm_comm_session_init_done(int flipped_state,
+	struct msm_cvp_inst *inst)
+{
+	int rc;
+
+	dprintk(CVP_SESS, "inst %pK: waiting for session init done\n", inst);
+	rc = wait_for_state(inst, flipped_state, MSM_CVP_OPEN_DONE,
+			HAL_SESSION_INIT_DONE);
+	if (rc) {
+		dprintk(CVP_ERR, "Session init failed for inst %pK\n", inst);
+		msm_cvp_comm_generate_sys_error(inst);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int msm_comm_session_init(int flipped_state,
+	struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_OPEN)) {
+		dprintk(CVP_INFO, "inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto exit;
+	}
+
+	dprintk(CVP_SESS, "%s: inst %pK\n", __func__, inst);
+	rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data,
+			inst, &inst->session);
+
+	if (rc || !inst->session) {
+		dprintk(CVP_ERR,
+			"Failed to call session init for: %pK, %pK, %d\n",
+			inst->core->device, inst, inst->session_type);
+		rc = -EINVAL;
+		goto exit;
+	}
+	change_cvp_inst_state(inst, MSM_CVP_OPEN);
+
+exit:
+	return rc;
+}
+
+static int msm_comm_session_close(int flipped_state,
+			struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(CVP_ERR, "%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_CLOSE)) {
+		dprintk(CVP_INFO,
+			"inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto exit;
+	}
+	hdev = inst->core->device;
+	dprintk(CVP_SESS, "%s: inst %pK\n", __func__, inst);
+	rc = call_hfi_op(hdev, session_end, (void *) inst->session);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to send close\n");
+		goto exit;
+	}
+	change_cvp_inst_state(inst, MSM_CVP_CLOSE);
+exit:
+	return rc;
+}
+
+int msm_cvp_comm_suspend(int core_id)
+{
+	struct cvp_hfi_device *hdev;
+	struct msm_cvp_core *core;
+	int rc = 0;
+
+	core = get_cvp_core(core_id);
+	if (!core) {
+		dprintk(CVP_ERR,
+			"%s: Failed to find core for core_id = %d\n",
+			__func__, core_id);
+		return -EINVAL;
+	}
+
+	hdev = (struct cvp_hfi_device *)core->device;
+	if (!hdev) {
+		dprintk(CVP_ERR, "%s Invalid device handle\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = call_hfi_op(hdev, suspend, hdev->hfi_device_data);
+	if (rc)
+		dprintk(CVP_WARN, "Failed to suspend\n");
+
+	return rc;
+}
+
+static int get_flipped_state(int present_state, int desired_state)
+{
+	int flipped_state = present_state;
+
+	if (flipped_state < MSM_CVP_CLOSE && desired_state > MSM_CVP_CLOSE) {
+		flipped_state = MSM_CVP_CLOSE + (MSM_CVP_CLOSE - flipped_state);
+		flipped_state &= 0xFFFE;
+		flipped_state = flipped_state - 1;
+	} else if (flipped_state > MSM_CVP_CLOSE
+			&& desired_state < MSM_CVP_CLOSE) {
+		flipped_state = MSM_CVP_CLOSE -
+			(flipped_state - MSM_CVP_CLOSE + 1);
+		flipped_state &= 0xFFFE;
+		flipped_state = flipped_state - 1;
+	}
+	return flipped_state;
+}
+
+int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state)
+{
+	int rc = 0;
+	int flipped_state;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params %pK", __func__, inst);
+		return -EINVAL;
+	}
+	dprintk(CVP_SESS,
+		"Trying to move inst: %pK (%#x) from: %#x to %#x\n",
+		inst, hash32_ptr(inst->session), inst->state, state);
+
+	mutex_lock(&inst->sync_lock);
+	if (inst->state == MSM_CVP_CORE_INVALID) {
+		dprintk(CVP_ERR, "%s: inst %pK is in invalid\n",
+			__func__, inst);
+		mutex_unlock(&inst->sync_lock);
+		return -EINVAL;
+	}
+
+	flipped_state = get_flipped_state(inst->state, state);
+	dprintk(CVP_SESS,
+		"inst: %pK (%#x) flipped_state = %#x %x\n",
+		inst, hash32_ptr(inst->session), flipped_state, state);
+	switch (flipped_state) {
+	case MSM_CVP_CORE_UNINIT_DONE:
+	case MSM_CVP_CORE_INIT:
+		rc = msm_comm_init_core(inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_CVP_CORE_INIT_DONE:
+		rc = msm_comm_init_core_done(inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_CVP_OPEN:
+		rc = msm_comm_session_init(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_CVP_OPEN_DONE:
+		rc = msm_comm_session_init_done(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_CVP_CLOSE:
+		dprintk(CVP_INFO, "to CVP_CLOSE state\n");
+		rc = msm_comm_session_close(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	case MSM_CVP_CLOSE_DONE:
+		dprintk(CVP_INFO, "to CVP_CLOSE_DONE state\n");
+		rc = wait_for_state(inst, flipped_state, MSM_CVP_CLOSE_DONE,
+				HAL_SESSION_END_DONE);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		msm_cvp_comm_session_clean(inst);
+	case MSM_CVP_CORE_UNINIT:
+	case MSM_CVP_CORE_INVALID:
+		dprintk(CVP_INFO, "Sending core uninit\n");
+		rc = msm_cvp_deinit_core(inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+	default:
+		dprintk(CVP_ERR, "State not recognized\n");
+		rc = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&inst->sync_lock);
+
+	if (rc) {
+		dprintk(CVP_ERR,
+				"Failed to move from state: %d to %d\n",
+				inst->state, state);
+		msm_cvp_comm_kill_session(inst);
+	}
+	return rc;
+}
+
+int msm_cvp_noc_error_info(struct msm_cvp_core *core)
+{
+	struct cvp_hfi_device *hdev;
+
+	if (!core || !core->device) {
+		dprintk(CVP_WARN, "%s: Invalid parameters: %pK\n",
+			__func__, core);
+		return -EINVAL;
+	}
+
+	if (!core->resources.non_fatal_pagefaults)
+		return 0;
+
+	if (!core->smmu_fault_handled)
+		return 0;
+
+	hdev = core->device;
+	call_hfi_op(hdev, noc_error_info, hdev->hfi_device_data);
+
+	return 0;
+}
+
+int msm_cvp_trigger_ssr(struct msm_cvp_core *core,
+	enum hal_ssr_trigger_type type)
+{
+	if (!core) {
+		dprintk(CVP_WARN, "%s: Invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	core->ssr_type = type;
+	schedule_work(&core->ssr_work);
+	return 0;
+}
+
+void msm_cvp_ssr_handler(struct work_struct *work)
+{
+	int rc;
+	struct msm_cvp_core *core;
+	struct cvp_hfi_device *hdev;
+
+	core = container_of(work, struct msm_cvp_core, ssr_work);
+	if (!core || !core->device) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return;
+	}
+	hdev = core->device;
+
+	if (core->ssr_type == SSR_SESSION_ABORT) {
+		struct msm_cvp_inst *inst = NULL, *s;
+
+		dprintk(CVP_ERR, "Session abort triggered\n");
+		list_for_each_entry(inst, &core->instances, list) {
+			dprintk(CVP_WARN,
+				"Session to abort: inst %#x cmd %x ref %x\n",
+				inst, inst->cur_cmd_type,
+				kref_read(&inst->kref));
+			break;
+		}
+
+		if (inst != NULL) {
+			s = cvp_get_inst_validate(inst->core, inst);
+			if (!s)
+				return;
+
+			call_hfi_op(hdev, flush_debug_queue,
+				hdev->hfi_device_data);
+			dump_hfi_queue(hdev->hfi_device_data);
+			msm_cvp_comm_kill_session(inst);
+			cvp_put_inst(s);
+		} else {
+			dprintk(CVP_WARN, "No active CVP session to abort\n");
+		}
+
+		return;
+	}
+
+send_again:
+	mutex_lock(&core->lock);
+	if (core->state == CVP_CORE_INIT_DONE) {
+		dprintk(CVP_WARN, "%s: ssr type %d\n", __func__,
+			core->ssr_type);
+		/*
+		 * In current implementation user-initiated SSR triggers
+		 * a fatal error from hardware. However, there is no way
+		 * to know if fatal error is due to SSR or not. Handle
+		 * user SSR as non-fatal.
+		 */
+		core->trigger_ssr = true;
+		rc = call_hfi_op(hdev, core_trigger_ssr,
+				hdev->hfi_device_data, core->ssr_type);
+		if (rc) {
+			if (rc == -EAGAIN) {
+				core->trigger_ssr = false;
+				mutex_unlock(&core->lock);
+				usleep_range(500, 1000);
+				dprintk(CVP_WARN, "Retry ssr\n");
+				goto send_again;
+			}
+			dprintk(CVP_ERR, "%s: trigger_ssr failed\n",
+				__func__);
+			core->trigger_ssr = false;
+		}
+	} else {
+		dprintk(CVP_WARN, "%s: cvp core %pK not initialized\n",
+			__func__, core);
+	}
+	mutex_unlock(&core->lock);
+}
+
+void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst)
+{
+	enum hal_command_response cmd = HAL_SESSION_ERROR;
+	struct msm_cvp_cb_cmd_done response = {0};
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
+		return;
+	}
+	dprintk(CVP_WARN, "%s: inst %pK\n", __func__, inst);
+	response.session_id = inst;
+	response.status = CVP_ERR_FAIL;
+	handle_session_error(cmd, (void *)&response);
+}
+
+void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst)
+{
+	struct msm_cvp_core *core;
+	enum hal_command_response cmd = HAL_SYS_ERROR;
+	struct msm_cvp_cb_cmd_done response  = {0};
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
+		return;
+	}
+	dprintk(CVP_WARN, "%s: inst %pK\n", __func__, inst);
+	core = inst->core;
+	response.device_id = (u32) core->id;
+	handle_sys_error(cmd, (void *) &response);
+
+}
+
+int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	unsigned long flags = 0;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
+		return -EINVAL;
+	} else if (!inst->session) {
+		dprintk(CVP_ERR, "%s: no session to kill for inst %pK\n",
+			__func__, inst);
+		return 0;
+	}
+	dprintk(CVP_WARN, "%s: inst %pK, session %x state %d\n", __func__,
+		inst, hash32_ptr(inst->session), inst->state);
+	/*
+	 * We're internally forcibly killing the session, if fw is aware of
+	 * the session send session_abort to firmware to clean up and release
+	 * the session, else just kill the session inside the driver.
+	 */
+	if (inst->state >= MSM_CVP_OPEN_DONE &&
+			inst->state < MSM_CVP_CLOSE_DONE) {
+		rc = msm_comm_session_abort(inst);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: inst %pK session %x abort failed\n",
+				__func__, inst, hash32_ptr(inst->session));
+			change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+		} else {
+			change_cvp_inst_state(inst, MSM_CVP_CORE_UNINIT);
+		}
+	}
+
+	if (inst->state >= MSM_CVP_CORE_UNINIT) {
+		spin_lock_irqsave(&inst->event_handler.lock, flags);
+		inst->event_handler.event = CVP_SSR_EVENT;
+		spin_unlock_irqrestore(&inst->event_handler.lock, flags);
+		wake_up_all(&inst->event_handler.wq);
+	}
+
+	return rc;
+}
+
+void msm_cvp_fw_unload_handler(struct work_struct *work)
+{
+	struct msm_cvp_core *core = NULL;
+	struct cvp_hfi_device *hdev = NULL;
+	int rc = 0;
+
+	core = container_of(work, struct msm_cvp_core, fw_unload_work.work);
+	if (!core || !core->device) {
+		dprintk(CVP_ERR, "%s - invalid work or core handle\n",
+				__func__);
+		return;
+	}
+
+	hdev = core->device;
+
+	mutex_lock(&core->lock);
+	if (list_empty(&core->instances) &&
+		core->state != CVP_CORE_UNINIT) {
+		if (core->state > CVP_CORE_INIT) {
+			dprintk(CVP_CORE, "Calling cvp_hal_core_release\n");
+			rc = call_hfi_op(hdev, core_release,
+					hdev->hfi_device_data);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"Failed to release core, id = %d\n",
+					core->id);
+				mutex_unlock(&core->lock);
+				return;
+			}
+		}
+		core->state = CVP_CORE_UNINIT;
+		kfree(core->capabilities);
+		core->capabilities = NULL;
+	}
+	mutex_unlock(&core->lock);
+}
+
+static int set_internal_buf_on_fw(struct msm_cvp_inst *inst,
+				struct msm_cvp_smem *handle)
+{
+	struct cvp_hfi_device *hdev;
+	int rc = 0;
+	u32 iova;
+	u32 size;
+
+	if (!inst || !inst->core || !inst->core->device || !handle) {
+		dprintk(CVP_ERR, "%s - invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+
+	iova = handle->device_addr;
+	size = handle->size;
+
+	dprintk(CVP_SESS, "%s: allocated ARP buffer : %x\n", __func__, iova);
+
+	rc = call_hfi_op(hdev, session_set_buffers,
+			(void *) inst->session, iova, size);
+	if (rc) {
+		dprintk(CVP_ERR, "cvp_session_set_buffers failed\n");
+		return rc;
+	}
+	return 0;
+}
+
+/* Set ARP buffer for CVP firmware to handle concurrency */
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_internal_buf *buf;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	buf = cvp_allocate_arp_bufs(inst, ARP_BUF_SIZE);
+	if (!buf) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	rc = set_internal_buf_on_fw(inst, buf->smem);
+	if (rc)
+		goto error;
+
+	rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_SET_BUFFER_DONE);
+	if (rc) {
+		dprintk(CVP_WARN, "wait for set_buffer_done timeout %d\n", rc);
+		goto error;
+	}
+
+	return rc;
+
+error:
+	cvp_release_arp_buffers(inst);
+	return rc;
+}
+
+
+
+

+ 34 - 0
msm/eva/msm_cvp_common.h

@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef _MSM_CVP_COMMON_H_
+#define _MSM_CVP_COMMON_H_
+#include "msm_cvp_internal.h"
+
+void cvp_put_inst(struct msm_cvp_inst *inst);
+struct msm_cvp_inst *cvp_get_inst(struct msm_cvp_core *core,
+		void *session_id);
+struct msm_cvp_inst *cvp_get_inst_validate(struct msm_cvp_core *core,
+		void *session_id);
+void cvp_change_inst_state(struct msm_cvp_inst *inst,
+		enum instance_state state);
+struct msm_cvp_core *get_cvp_core(int core_id);
+int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state);
+int msm_cvp_deinit_core(struct msm_cvp_inst *inst);
+int msm_cvp_comm_suspend(int core_id);
+void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst);
+int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst);
+void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst);
+void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst);
+int msm_cvp_comm_smem_cache_operations(struct msm_cvp_inst *inst,
+		struct msm_cvp_smem *mem, enum smem_cache_ops cache_ops);
+int msm_cvp_comm_check_core_init(struct msm_cvp_core *core);
+int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
+	enum hal_command_response cmd);
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst);
+int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst);
+int msm_cvp_noc_error_info(struct msm_cvp_core *core);
+#endif

+ 394 - 0
msm/eva/msm_cvp_core.c

@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/dma-direction.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp_internal.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp.h"
+#include "msm_cvp_common.h"
+#include <linux/delay.h>
+#include "cvp_hfi_api.h"
+#include "msm_cvp_clocks.h"
+#include <linux/dma-buf.h>
+#include <media/msm_media_info.h>
+
+#define MAX_EVENTS 30
+#define NUM_CYCLES16X16_HCD_FRAME 95
+#define NUM_CYCLES16X16_DME_FRAME 600
+#define NUM_CYCLES16X16_NCC_FRAME 400
+#define NUM_CYCLES16X16_DS_FRAME  80
+#define NUM_CYCLESFW_FRAME  1680000
+#define NUM_DME_MAX_FEATURE_POINTS 500
+#define CYCLES_MARGIN_IN_POWEROF2 3
+
+int msm_cvp_poll(void *instance, struct file *filp,
+		struct poll_table_struct *wait)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_cvp_poll);
+
+int msm_cvp_private(void *cvp_inst, unsigned int cmd,
+		struct cvp_kmd_arg *arg)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst = (struct msm_cvp_inst *)cvp_inst;
+
+	if (!inst || !arg) {
+		dprintk(CVP_ERR, "%s: invalid args\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_cvp_handle_syscall(inst, arg);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_cvp_private);
+
+static bool msm_cvp_check_for_inst_overload(struct msm_cvp_core *core)
+{
+	u32 instance_count = 0;
+	u32 secure_instance_count = 0;
+	struct msm_cvp_inst *inst = NULL;
+	bool overload = false;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		instance_count++;
+		/* This flag is not updated yet for the current instance */
+		if (inst->flags & CVP_SECURE)
+			secure_instance_count++;
+	}
+	mutex_unlock(&core->lock);
+
+	/* Instance count includes current instance as well. */
+
+	if ((instance_count >= core->resources.max_inst_count) ||
+		(secure_instance_count >=
+			core->resources.max_secure_inst_count))
+		overload = true;
+	return overload;
+}
+
+static int __init_session_queue(struct msm_cvp_inst *inst)
+{
+	spin_lock_init(&inst->session_queue.lock);
+	INIT_LIST_HEAD(&inst->session_queue.msgs);
+	inst->session_queue.msg_count = 0;
+	init_waitqueue_head(&inst->session_queue.wq);
+	inst->session_queue.state = QUEUE_ACTIVE;
+	return 0;
+}
+
+static void __init_fence_queue(struct msm_cvp_inst *inst)
+{
+	mutex_init(&inst->fence_cmd_queue.lock);
+	INIT_LIST_HEAD(&inst->fence_cmd_queue.wait_list);
+	INIT_LIST_HEAD(&inst->fence_cmd_queue.sched_list);
+	init_waitqueue_head(&inst->fence_cmd_queue.wq);
+	inst->fence_cmd_queue.state = QUEUE_ACTIVE;
+	inst->fence_cmd_queue.mode = OP_NORMAL;
+
+	spin_lock_init(&inst->session_queue_fence.lock);
+	INIT_LIST_HEAD(&inst->session_queue_fence.msgs);
+	inst->session_queue_fence.msg_count = 0;
+	init_waitqueue_head(&inst->session_queue_fence.wq);
+	inst->session_queue_fence.state = QUEUE_ACTIVE;
+}
+
+static void __deinit_fence_queue(struct msm_cvp_inst *inst)
+{
+	mutex_destroy(&inst->fence_cmd_queue.lock);
+	inst->fence_cmd_queue.state = QUEUE_INVALID;
+	inst->fence_cmd_queue.mode = OP_INVALID;
+}
+
+static void __deinit_session_queue(struct msm_cvp_inst *inst)
+{
+	struct cvp_session_msg *msg, *tmpmsg;
+
+	/* free all messages */
+	spin_lock(&inst->session_queue.lock);
+	list_for_each_entry_safe(msg, tmpmsg, &inst->session_queue.msgs, node) {
+		list_del_init(&msg->node);
+		kmem_cache_free(cvp_driver->msg_cache, msg);
+	}
+	inst->session_queue.msg_count = 0;
+	inst->session_queue.state = QUEUE_STOP;
+	spin_unlock(&inst->session_queue.lock);
+
+	wake_up_all(&inst->session_queue.wq);
+}
+
+void *msm_cvp_open(int core_id, int session_type)
+{
+	struct msm_cvp_inst *inst = NULL;
+	struct msm_cvp_core *core = NULL;
+	int rc = 0;
+	int i = 0;
+
+	if (core_id >= MSM_CVP_CORES_MAX ||
+			session_type >= MSM_CVP_MAX_DEVICES) {
+		dprintk(CVP_ERR, "Invalid input, core_id = %d, session = %d\n",
+			core_id, session_type);
+		goto err_invalid_core;
+	}
+	core = get_cvp_core(core_id);
+	if (!core) {
+		dprintk(CVP_ERR,
+			"Failed to find core for core_id = %d\n", core_id);
+		goto err_invalid_core;
+	}
+
+	if (!core->resources.auto_pil && session_type == MSM_CVP_BOOT) {
+		dprintk(CVP_SESS, "Auto PIL disabled, bypass CVP init at boot");
+		goto err_invalid_core;
+	}
+
+	core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
+	if (msm_cvp_check_for_inst_overload(core)) {
+		dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
+		mutex_lock(&core->lock);
+		list_for_each_entry(inst, &core->instances, list)
+			dprintk(CVP_ERR, "inst %pK, cmd %d id %d\n",
+				inst, inst->cur_cmd_type,
+				hash32_ptr(inst->session));
+		mutex_unlock(&core->lock);
+
+		return NULL;
+	}
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst) {
+		dprintk(CVP_ERR, "Failed to allocate memory\n");
+		rc = -ENOMEM;
+		goto err_invalid_core;
+	}
+
+	pr_info(CVP_DBG_TAG "Opening cvp instance: %pK\n", "sess", inst);
+	mutex_init(&inst->sync_lock);
+	mutex_init(&inst->lock);
+	spin_lock_init(&inst->event_handler.lock);
+
+	INIT_MSM_CVP_LIST(&inst->persistbufs);
+	INIT_DMAMAP_CACHE(&inst->dma_cache);
+	INIT_MSM_CVP_LIST(&inst->cvpdspbufs);
+	INIT_MSM_CVP_LIST(&inst->frames);
+
+	init_waitqueue_head(&inst->event_handler.wq);
+
+	kref_init(&inst->kref);
+
+	inst->session_type = session_type;
+	inst->state = MSM_CVP_CORE_UNINIT_DONE;
+	inst->core = core;
+	inst->clk_data.min_freq = 0;
+	inst->clk_data.curr_freq = 0;
+	inst->clk_data.ddr_bw = 0;
+	inst->clk_data.sys_cache_bw = 0;
+	inst->clk_data.bitrate = 0;
+	inst->clk_data.core_id = 0;
+
+	for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
+		i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
+		init_completion(&inst->completions[i]);
+	}
+
+	msm_cvp_session_init(inst);
+
+	mutex_lock(&core->lock);
+	list_add_tail(&inst->list, &core->instances);
+	mutex_unlock(&core->lock);
+
+	__init_fence_queue(inst);
+
+	rc = __init_session_queue(inst);
+	if (rc)
+		goto fail_init;
+
+	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CORE_INIT_DONE);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to move cvp instance to init state\n");
+		goto fail_init;
+	}
+
+	inst->debugfs_root =
+		msm_cvp_debugfs_init_inst(inst, core->debugfs_root);
+
+	return inst;
+fail_init:
+	__deinit_session_queue(inst);
+	mutex_lock(&core->lock);
+	list_del(&inst->list);
+	mutex_unlock(&core->lock);
+	mutex_destroy(&inst->sync_lock);
+	mutex_destroy(&inst->lock);
+
+	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
+	DEINIT_DMAMAP_CACHE(&inst->dma_cache);
+	DEINIT_MSM_CVP_LIST(&inst->cvpdspbufs);
+	DEINIT_MSM_CVP_LIST(&inst->frames);
+
+	kfree(inst);
+	inst = NULL;
+err_invalid_core:
+	return inst;
+}
+EXPORT_SYMBOL(msm_cvp_open);
+
+static void msm_cvp_clean_sess_queue(struct msm_cvp_inst *inst,
+		struct cvp_session_queue *sq)
+{
+	struct cvp_session_msg *mptr, *dummy;
+	u64 ktid;
+
+	spin_lock(&sq->lock);
+	if (sq->msg_count && sq->state != QUEUE_ACTIVE) {
+		list_for_each_entry_safe(mptr, dummy, &sq->msgs, node) {
+			ktid = mptr->pkt.client_data.kdata;
+			if (ktid) {
+				list_del_init(&mptr->node);
+				sq->msg_count--;
+				msm_cvp_unmap_frame(inst, ktid);
+				kmem_cache_free(cvp_driver->msg_cache, mptr);
+			}
+		}
+	}
+	spin_unlock(&sq->lock);
+}
+
+static void msm_cvp_cleanup_instance(struct msm_cvp_inst *inst)
+{
+	bool empty;
+	int max_retries;
+	struct msm_cvp_frame *frame;
+	struct cvp_session_queue *sq, *sqf;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return;
+	}
+
+	sqf = &inst->session_queue_fence;
+	sq = &inst->session_queue;
+
+	max_retries =  inst->core->resources.msm_cvp_hw_rsp_timeout >> 1;
+	msm_cvp_session_queue_stop(inst);
+
+wait:
+	mutex_lock(&inst->frames.lock);
+	empty = list_empty(&inst->frames.list);
+	if (!empty && max_retries > 0) {
+		mutex_unlock(&inst->frames.lock);
+		usleep_range(1000, 2000);
+		msm_cvp_clean_sess_queue(inst, sqf);
+		msm_cvp_clean_sess_queue(inst, sq);
+		max_retries--;
+		goto wait;
+	}
+	mutex_unlock(&inst->frames.lock);
+
+	if (!empty) {
+		dprintk(CVP_WARN,
+			"Failed to process frames before session close\n");
+		mutex_lock(&inst->frames.lock);
+		list_for_each_entry(frame, &inst->frames.list, list)
+			dprintk(CVP_WARN, "Unprocessed frame %d\n",
+				frame->pkt_type);
+		mutex_unlock(&inst->frames.lock);
+		cvp_dump_fence_queue(inst);
+	}
+
+	if (cvp_release_arp_buffers(inst))
+		dprintk(CVP_ERR,
+			"Failed to release persist buffers\n");
+}
+
+int msm_cvp_destroy(struct msm_cvp_inst *inst)
+{
+	struct msm_cvp_core *core;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+
+	mutex_lock(&core->lock);
+	/* inst->list lives in core->instances */
+	list_del(&inst->list);
+	mutex_unlock(&core->lock);
+
+	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
+	DEINIT_DMAMAP_CACHE(&inst->dma_cache);
+	DEINIT_MSM_CVP_LIST(&inst->cvpdspbufs);
+	DEINIT_MSM_CVP_LIST(&inst->frames);
+
+	mutex_destroy(&inst->sync_lock);
+	mutex_destroy(&inst->lock);
+
+	msm_cvp_debugfs_deinit_inst(inst);
+
+	__deinit_session_queue(inst);
+	__deinit_fence_queue(inst);
+	synx_uninitialize(inst->synx_session_id);
+
+	pr_info(CVP_DBG_TAG "Closed cvp instance: %pK session_id = %d\n",
+		"sess", inst, hash32_ptr(inst->session));
+	if (inst->cur_cmd_type)
+		dprintk(CVP_ERR, "deleted instance has pending cmd %d\n",
+				inst->cur_cmd_type);
+	inst->session = (void *)0xdeadbeef;
+	kfree(inst);
+	return 0;
+}
+
+static void close_helper(struct kref *kref)
+{
+	struct msm_cvp_inst *inst = container_of(kref,
+			struct msm_cvp_inst, kref);
+
+	msm_cvp_destroy(inst);
+}
+
+int msm_cvp_close(void *instance)
+{
+	struct msm_cvp_inst *inst = instance;
+	int rc = 0;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (inst->session_type != MSM_CVP_BOOT) {
+		msm_cvp_cleanup_instance(inst);
+		msm_cvp_session_deinit(inst);
+	}
+
+	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CORE_UNINIT);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to move inst %pK to uninit state\n", inst);
+		rc = msm_cvp_deinit_core(inst);
+	}
+
+	msm_cvp_comm_session_clean(inst);
+
+	kref_put(&inst->kref, close_helper);
+	return 0;
+}
+EXPORT_SYMBOL(msm_cvp_close);
+
+int msm_cvp_suspend(int core_id)
+{
+	return msm_cvp_comm_suspend(core_id);
+}
+EXPORT_SYMBOL(msm_cvp_suspend);

+ 40 - 0
msm/eva/msm_cvp_core.h

@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_CVP_CORE_H_
+#define _MSM_CVP_CORE_H_
+
+#include <linux/poll.h>
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <linux/refcount.h>
+#include <media/msm_cvp_private.h>
+#include "msm_cvp_buf.h"
+#include "msm_cvp_synx.h"
+
+enum core_id {
+	MSM_CORE_CVP = 0,
+	MSM_CVP_CORES_MAX,
+};
+
+enum session_type {
+	MSM_CVP_USER = 1,
+	MSM_CVP_KERNEL,
+	MSM_CVP_BOOT,
+	MSM_CVP_UNKNOWN,
+	MSM_CVP_MAX_DEVICES = MSM_CVP_UNKNOWN,
+};
+
+void *msm_cvp_open(int core_id, int session_type);
+int msm_cvp_close(void *instance);
+int msm_cvp_suspend(int core_id);
+int msm_cvp_poll(void *instance, struct file *filp,
+		struct poll_table_struct *pt);
+int msm_cvp_private(void *cvp_inst, unsigned int cmd,
+		struct cvp_kmd_arg *arg);
+
+#endif

+ 540 - 0
msm/eva/msm_cvp_debug.c

@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include "msm_cvp_debug.h"
+#include "msm_cvp_common.h"
+#include "cvp_core_hfi.h"
+#include "cvp_hfi_api.h"
+
+#define CREATE_TRACE_POINTS
+#define MAX_SSR_STRING_LEN 10
+int msm_cvp_debug = CVP_ERR | CVP_WARN | CVP_FW;
+EXPORT_SYMBOL(msm_cvp_debug);
+
+int msm_cvp_debug_out = CVP_OUT_PRINTK;
+EXPORT_SYMBOL(msm_cvp_debug_out);
+
+int msm_cvp_fw_debug = 0x18;
+int msm_cvp_fw_debug_mode = 1;
+int msm_cvp_fw_low_power_mode = 1;
+bool msm_cvp_fw_coverage = !true;
+bool msm_cvp_thermal_mitigation_disabled = !true;
+bool msm_cvp_cacheop_disabled = true;
+int msm_cvp_clock_voting = !1;
+bool msm_cvp_syscache_disable = !true;
+bool msm_cvp_dsp_disable = !true;
+
+#define MAX_DBG_BUF_SIZE 4096
+
+struct cvp_core_inst_pair {
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *inst;
+};
+
+static int core_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	dprintk(CVP_INFO, "%s: Enter\n", __func__);
+	return 0;
+}
+
+static u32 write_str(char *buffer,
+		size_t size, const char *fmt, ...)
+{
+	va_list args;
+	u32 len;
+
+	va_start(args, fmt);
+	len = vscnprintf(buffer, size, fmt, args);
+	va_end(args);
+	return len;
+}
+
+static ssize_t core_info_read(struct file *file, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct msm_cvp_core *core = file->private_data;
+	struct cvp_hfi_device *hdev;
+	struct cvp_hal_fw_info fw_info = { {0} };
+	char *dbuf, *cur, *end;
+	int i = 0, rc = 0;
+	ssize_t len = 0;
+
+	if (!core || !core->device) {
+		dprintk(CVP_ERR, "Invalid params, core: %pK\n", core);
+		return 0;
+	}
+
+	dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf) {
+		dprintk(CVP_ERR, "%s: Allocation failed!\n", __func__);
+		return -ENOMEM;
+	}
+	cur = dbuf;
+	end = cur + MAX_DBG_BUF_SIZE;
+	hdev = core->device;
+
+	cur += write_str(cur, end - cur, "===============================\n");
+	cur += write_str(cur, end - cur, "CORE %d: %pK\n", core->id, core);
+	cur += write_str(cur, end - cur, "===============================\n");
+	cur += write_str(cur, end - cur, "Core state: %d\n", core->state);
+	rc = call_hfi_op(hdev, get_fw_info, hdev->hfi_device_data, &fw_info);
+	if (rc) {
+		dprintk(CVP_WARN, "Failed to read FW info\n");
+		goto err_fw_info;
+	}
+
+	cur += write_str(cur, end - cur,
+		"FW version : %s\n", &fw_info.version);
+	cur += write_str(cur, end - cur,
+		"base addr: 0x%x\n", fw_info.base_addr);
+	cur += write_str(cur, end - cur,
+		"register_base: 0x%x\n", fw_info.register_base);
+	cur += write_str(cur, end - cur,
+		"register_size: %u\n", fw_info.register_size);
+	cur += write_str(cur, end - cur, "irq: %u\n", fw_info.irq);
+
+err_fw_info:
+	for (i = SYS_MSG_START; i < SYS_MSG_END; i++) {
+		cur += write_str(cur, end - cur, "completions[%d]: %s\n", i,
+			completion_done(&core->completions[SYS_MSG_INDEX(i)]) ?
+			"pending" : "done");
+	}
+	len = simple_read_from_buffer(buf, count, ppos,
+			dbuf, cur - dbuf);
+
+	kfree(dbuf);
+	return len;
+}
+
+static const struct file_operations core_info_fops = {
+	.open = core_info_open,
+	.read = core_info_read,
+};
+
+static int trigger_ssr_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	dprintk(CVP_INFO, "%s: Enter\n", __func__);
+	return 0;
+}
+
+static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long ssr_trigger_val = 0;
+	int rc = 0;
+	struct msm_cvp_core *core = filp->private_data;
+	size_t size = MAX_SSR_STRING_LEN;
+	char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
+
+	if (!buf)
+		return -EINVAL;
+
+	if (!count)
+		goto exit;
+
+	if (count < size)
+		size = count;
+
+	if (copy_from_user(kbuf, buf, size)) {
+		dprintk(CVP_WARN, "%s User memory fault\n", __func__);
+		rc = -EFAULT;
+		goto exit;
+	}
+
+	rc = kstrtoul(kbuf, 0, &ssr_trigger_val);
+	if (rc) {
+		dprintk(CVP_WARN, "returning error err %d\n", rc);
+		rc = -EINVAL;
+	} else {
+		msm_cvp_trigger_ssr(core, ssr_trigger_val);
+		rc = count;
+	}
+exit:
+	return rc;
+}
+
+static const struct file_operations ssr_fops = {
+	.open = trigger_ssr_open,
+	.write = trigger_ssr_write,
+};
+
+static int cvp_power_get(void *data, u64 *val)
+{
+	struct cvp_hfi_device *hfi_ops;
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hfi_device;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	if (!core)
+		return 0;
+	hfi_ops = core->device;
+	if (!hfi_ops)
+		return 0;
+
+	hfi_device = hfi_ops->hfi_device_data;
+	if (!hfi_device)
+		return 0;
+
+	*val = hfi_device->power_enabled;
+	return 0;
+}
+
+#define MIN_PC_INTERVAL 1000
+#define MAX_PC_INTERVAL 1000000
+
+static int cvp_power_set(void *data, u64 val)
+{
+	struct cvp_hfi_device *hfi_ops;
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hfi_device;
+	int rc = 0;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	if (!core)
+		return -EINVAL;
+
+	hfi_ops = core->device;
+	if (!hfi_ops)
+		return -EINVAL;
+
+	hfi_device = hfi_ops->hfi_device_data;
+	if (!hfi_device)
+		return -EINVAL;
+
+	if (val >= MAX_PC_INTERVAL) {
+		hfi_device->res->sw_power_collapsible = 0;
+	} else if (val > MIN_PC_INTERVAL) {
+		hfi_device->res->sw_power_collapsible = 1;
+		hfi_device->res->msm_cvp_pwr_collapse_delay =
+			(unsigned int)val;
+	}
+
+	if (core->state == CVP_CORE_UNINIT)
+		return -EINVAL;
+
+	if (val > 0) {
+		rc = call_hfi_op(hfi_ops, resume, hfi_ops->hfi_device_data);
+		if (rc)
+			dprintk(CVP_ERR, "debugfs fail to power on cvp\n");
+	}
+	return rc;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cvp_pwr_fops, cvp_power_get, cvp_power_set, "%llu\n");
+
+struct dentry *msm_cvp_debugfs_init_drv(void)
+{
+	bool ok = false;
+	struct dentry *dir = NULL;
+
+	dir = debugfs_create_dir("msm_cvp", NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		dir = NULL;
+		goto failed_create_dir;
+	}
+
+#define __debugfs_create(__type, __name, __value) ({                          \
+	struct dentry *f = debugfs_create_##__type(__name, 0644,	\
+		dir, __value);                                                \
+	if (IS_ERR_OR_NULL(f)) {                                              \
+		dprintk(CVP_ERR, "Failed creating debugfs file '%pd/%s'\n",  \
+			dir, __name);                                         \
+		f = NULL;                                                     \
+	}                                                                     \
+	f;                                                                    \
+})
+
+	ok =
+	__debugfs_create(x32, "debug_level", &msm_cvp_debug) &&
+	__debugfs_create(x32, "fw_level", &msm_cvp_fw_debug) &&
+	__debugfs_create(u32, "fw_debug_mode", &msm_cvp_fw_debug_mode) &&
+	__debugfs_create(bool, "fw_coverage", &msm_cvp_fw_coverage) &&
+	__debugfs_create(u32, "fw_low_power_mode",
+			&msm_cvp_fw_low_power_mode) &&
+	__debugfs_create(u32, "debug_output", &msm_cvp_debug_out) &&
+	__debugfs_create(bool, "disable_thermal_mitigation",
+			&msm_cvp_thermal_mitigation_disabled) &&
+	__debugfs_create(bool, "disable_cacheop",
+			&msm_cvp_cacheop_disabled) &&
+	__debugfs_create(bool, "disable_cvp_syscache",
+			&msm_cvp_syscache_disable);
+
+#undef __debugfs_create
+
+	if (!ok)
+		goto failed_create_dir;
+
+	debugfs_create_file("cvp_power", 0644, dir, NULL, &cvp_pwr_fops);
+
+	return dir;
+
+failed_create_dir:
+	if (dir)
+		debugfs_remove_recursive(cvp_driver->debugfs_root);
+
+	return NULL;
+}
+
+static int _clk_rate_set(void *data, u64 val)
+{
+	struct msm_cvp_core *core;
+	struct cvp_hfi_device *dev;
+	struct allowed_clock_rates_table *tbl = NULL;
+	unsigned int tbl_size, i;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	dev = core->device;
+	tbl = core->resources.allowed_clks_tbl;
+	tbl_size = core->resources.allowed_clks_tbl_size;
+
+	if (val == 0) {
+		struct iris_hfi_device *hdev = dev->hfi_device_data;
+
+		msm_cvp_clock_voting = 0;
+		call_hfi_op(dev, scale_clocks, hdev, hdev->clk_freq);
+		return 0;
+	}
+
+	for (i = 0; i < tbl_size; i++)
+		if (val <= tbl[i].clock_rate)
+			break;
+
+	if (i == tbl_size)
+		msm_cvp_clock_voting = tbl[tbl_size-1].clock_rate;
+	else
+		msm_cvp_clock_voting = tbl[i].clock_rate;
+
+	dprintk(CVP_WARN, "Override cvp_clk_rate with %d\n",
+			msm_cvp_clock_voting);
+
+	call_hfi_op(dev, scale_clocks, dev->hfi_device_data,
+		msm_cvp_clock_voting);
+
+	return 0;
+}
+
+static int _clk_rate_get(void *data, u64 *val)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hdev;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	hdev = core->device->hfi_device_data;
+	if (msm_cvp_clock_voting)
+		*val = msm_cvp_clock_voting;
+	else
+		*val = hdev->clk_freq;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, _clk_rate_get, _clk_rate_set, "%llu\n");
+
+
+struct dentry *msm_cvp_debugfs_init_core(struct msm_cvp_core *core,
+		struct dentry *parent)
+{
+	struct dentry *dir = NULL;
+	char debugfs_name[MAX_DEBUGFS_NAME];
+
+	if (!core) {
+		dprintk(CVP_ERR, "Invalid params, core: %pK\n", core);
+		goto failed_create_dir;
+	}
+
+	snprintf(debugfs_name, MAX_DEBUGFS_NAME, "core%d", core->id);
+	dir = debugfs_create_dir(debugfs_name, parent);
+	if (IS_ERR_OR_NULL(dir)) {
+		dir = NULL;
+		dprintk(CVP_ERR, "Failed to create debugfs for msm_cvp\n");
+		goto failed_create_dir;
+	}
+	if (!debugfs_create_file("info", 0444, dir, core, &core_info_fops)) {
+		dprintk(CVP_ERR, "debugfs_create_file: fail\n");
+		goto failed_create_dir;
+	}
+	if (!debugfs_create_file("trigger_ssr", 0200,
+			dir, core, &ssr_fops)) {
+		dprintk(CVP_ERR, "debugfs_create_file: fail\n");
+		goto failed_create_dir;
+	}
+	if (!debugfs_create_file("clock_rate", 0644, dir,
+			NULL, &clk_rate_fops)) {
+		dprintk(CVP_ERR, "debugfs_create_file: clock_rate fail\n");
+		goto failed_create_dir;
+	}
+
+failed_create_dir:
+	return dir;
+}
+
+static int inst_info_open(struct inode *inode, struct file *file)
+{
+	dprintk(CVP_INFO, "Open inode ptr: %pK\n", inode->i_private);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int publish_unreleased_reference(struct msm_cvp_inst *inst,
+		char **dbuf, char *end)
+{
+	dprintk(CVP_SESS, "%s deprecated function\n", __func__);
+	return 0;
+}
+
+static void put_inst_helper(struct kref *kref)
+{
+	struct msm_cvp_inst *inst = container_of(kref,
+			struct msm_cvp_inst, kref);
+
+	msm_cvp_destroy(inst);
+}
+
+static ssize_t inst_info_read(struct file *file, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cvp_core_inst_pair *idata = file->private_data;
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *inst, *temp = NULL;
+	char *dbuf, *cur, *end;
+	int i;
+	ssize_t len = 0;
+
+	if (!idata || !idata->core || !idata->inst) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return 0;
+	}
+
+	core = idata->core;
+	inst = idata->inst;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(temp, &core->instances, list) {
+		if (temp == inst)
+			break;
+	}
+	inst = ((temp == inst) && kref_get_unless_zero(&inst->kref)) ?
+		inst : NULL;
+	mutex_unlock(&core->lock);
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: Instance has become obsolete", __func__);
+		return 0;
+	}
+
+	dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf) {
+		dprintk(CVP_ERR, "%s: Allocation failed!\n", __func__);
+		len = -ENOMEM;
+		goto failed_alloc;
+	}
+	cur = dbuf;
+	end = cur + MAX_DBG_BUF_SIZE;
+
+	cur += write_str(cur, end - cur, "==============================\n");
+	cur += write_str(cur, end - cur, "INSTANCE: %pK (%s)\n", inst,
+		inst->session_type == MSM_CVP_USER ? "User" : "Kernel");
+	cur += write_str(cur, end - cur, "==============================\n");
+	cur += write_str(cur, end - cur, "core: %pK\n", inst->core);
+	cur += write_str(cur, end - cur, "state: %d\n", inst->state);
+	cur += write_str(cur, end - cur, "secure: %d\n",
+		!!(inst->flags & CVP_SECURE));
+	for (i = SESSION_MSG_START; i < SESSION_MSG_END; i++) {
+		cur += write_str(cur, end - cur, "completions[%d]: %s\n", i,
+		completion_done(&inst->completions[SESSION_MSG_INDEX(i)]) ?
+		"pending" : "done");
+	}
+
+	publish_unreleased_reference(inst, &cur, end);
+	len = simple_read_from_buffer(buf, count, ppos,
+		dbuf, cur - dbuf);
+
+	kfree(dbuf);
+failed_alloc:
+	kref_put(&inst->kref, put_inst_helper);
+	return len;
+}
+
+static int inst_info_release(struct inode *inode, struct file *file)
+{
+	dprintk(CVP_INFO, "Release inode ptr: %pK\n", inode->i_private);
+	file->private_data = NULL;
+	return 0;
+}
+
+static const struct file_operations inst_info_fops = {
+	.open = inst_info_open,
+	.read = inst_info_read,
+	.release = inst_info_release,
+};
+
+struct dentry *msm_cvp_debugfs_init_inst(struct msm_cvp_inst *inst,
+		struct dentry *parent)
+{
+	struct dentry *dir = NULL, *info = NULL;
+	char debugfs_name[MAX_DEBUGFS_NAME];
+	struct cvp_core_inst_pair *idata = NULL;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "Invalid params, inst: %pK\n", inst);
+		goto exit;
+	}
+	snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%p", inst);
+
+	idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+	if (!idata) {
+		dprintk(CVP_ERR, "%s: Allocation failed!\n", __func__);
+		goto exit;
+	}
+
+	idata->core = inst->core;
+	idata->inst = inst;
+
+	dir = debugfs_create_dir(debugfs_name, parent);
+	if (IS_ERR_OR_NULL(dir)) {
+		dir = NULL;
+		dprintk(CVP_ERR, "Failed to create debugfs for msm_cvp\n");
+		goto failed_create_dir;
+	}
+
+	info = debugfs_create_file("info", 0444, dir,
+			idata, &inst_info_fops);
+	if (!info) {
+		dprintk(CVP_ERR, "debugfs_create_file: info fail\n");
+		goto failed_create_file;
+	}
+
+	dir->d_inode->i_private = info->d_inode->i_private;
+	inst->debug.pdata[FRAME_PROCESSING].sampling = true;
+	return dir;
+
+failed_create_file:
+	debugfs_remove_recursive(dir);
+	dir = NULL;
+failed_create_dir:
+	kfree(idata);
+exit:
+	return dir;
+}
+
+void msm_cvp_debugfs_deinit_inst(struct msm_cvp_inst *inst)
+{
+	struct dentry *dentry = NULL;
+
+	if (!inst || !inst->debugfs_root)
+		return;
+
+	dentry = inst->debugfs_root;
+	if (dentry->d_inode) {
+		dprintk(CVP_INFO, "Destroy %pK\n", dentry->d_inode->i_private);
+		kfree(dentry->d_inode->i_private);
+		dentry->d_inode->i_private = NULL;
+	}
+	debugfs_remove_recursive(dentry);
+	inst->debugfs_root = NULL;
+}

+ 181 - 0
msm/eva/msm_cvp_debug.h

@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_CVP_DEBUG__
+#define __MSM_CVP_DEBUG__
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include "msm_cvp_internal.h"
+#include "trace/events/msm_cvp_events.h"
+
+#ifndef CVP_DBG_LABEL
+#define CVP_DBG_LABEL "msm_cvp"
+#endif
+
+#define CVP_DBG_TAG CVP_DBG_LABEL ": %4s: "
+
+/* To enable messages OR these values and
+ * echo the result to debugfs file.
+ *
+ * To enable all messages set debug_level = 0x101F
+ */
+
+enum cvp_msg_prio {
+	CVP_ERR  = 0x000001,
+	CVP_WARN = 0x000002,
+	CVP_INFO = 0x000004,
+	CVP_PROF = 0x000010,
+	CVP_PKT  = 0x000020,
+	CVP_MEM  = 0x000040,
+	CVP_SYNX = 0x000080,
+	CVP_CORE = 0x000100,
+	CVP_REG  = 0x000200,
+	CVP_PWR  = 0x000400,
+	CVP_DSP  = 0x000800,
+	CVP_FW   = 0x001000,
+	CVP_SESS = 0x002000,
+	CVP_HFI  = 0x004000,
+	CVP_DBG  = CVP_MEM | CVP_SYNX | CVP_CORE | CVP_REG |
+		CVP_PWR | CVP_DSP | CVP_SESS | CVP_HFI,
+};
+
+enum cvp_msg_out {
+	CVP_OUT_PRINTK = 0,
+};
+
+enum msm_cvp_debugfs_event {
+	MSM_CVP_DEBUGFS_EVENT_ETB,
+	MSM_CVP_DEBUGFS_EVENT_EBD,
+	MSM_CVP_DEBUGFS_EVENT_FTB,
+	MSM_CVP_DEBUGFS_EVENT_FBD,
+};
+
+extern int msm_cvp_debug;
+extern int msm_cvp_debug_out;
+extern int msm_cvp_fw_debug;
+extern int msm_cvp_fw_debug_mode;
+extern int msm_cvp_fw_low_power_mode;
+extern bool msm_cvp_fw_coverage;
+extern bool msm_cvp_thermal_mitigation_disabled;
+extern bool msm_cvp_cacheop_disabled;
+extern int msm_cvp_clock_voting;
+extern bool msm_cvp_syscache_disable;
+extern bool msm_cvp_dsp_disable;
+
+#define dprintk(__level, __fmt, arg...)	\
+	do { \
+		if (msm_cvp_debug & __level) { \
+			if (msm_cvp_debug_out == CVP_OUT_PRINTK) { \
+				pr_info(CVP_DBG_TAG __fmt, \
+					get_debug_level_str(__level),	\
+					## arg); \
+			} \
+		} \
+	} while (0)
+
+#define MSM_CVP_ERROR(value)					\
+	do {	if (value)					\
+			dprintk(CVP_ERR, "BugOn");		\
+		WARN_ON(value);					\
+	} while (0)
+
+
+struct dentry *msm_cvp_debugfs_init_drv(void);
+struct dentry *msm_cvp_debugfs_init_core(struct msm_cvp_core *core,
+		struct dentry *parent);
+struct dentry *msm_cvp_debugfs_init_inst(struct msm_cvp_inst *inst,
+		struct dentry *parent);
+void msm_cvp_debugfs_deinit_inst(struct msm_cvp_inst *inst);
+
+static inline char *get_debug_level_str(int level)
+{
+	switch (level) {
+	case CVP_ERR:
+		return "err";
+	case CVP_WARN:
+		return "warn";
+	case CVP_INFO:
+		return "info";
+	case CVP_DBG:
+		return "dbg";
+	case CVP_PROF:
+		return "prof";
+	case CVP_PKT:
+		return "pkt";
+	case CVP_MEM:
+		return "mem";
+	case CVP_SYNX:
+		return "synx";
+	case CVP_CORE:
+		return "core";
+	case CVP_REG:
+		return "reg";
+	case CVP_PWR:
+		return "pwr";
+	case CVP_DSP:
+		return "dsp";
+	case CVP_FW:
+		return "fw";
+	case CVP_SESS:
+		return "sess";
+	case CVP_HFI:
+		return "hfi";
+	default:
+		return "???";
+	}
+}
+
+static inline void show_stats(struct msm_cvp_inst *i)
+{
+	int x;
+
+	for (x = 0; x < MAX_PROFILING_POINTS; x++) {
+		if (i->debug.pdata[x].name[0] &&
+				(msm_cvp_debug & CVP_PROF)) {
+			if (i->debug.samples) {
+				dprintk(CVP_PROF, "%s averaged %d ms/sample\n",
+						i->debug.pdata[x].name,
+						i->debug.pdata[x].cumulative /
+						i->debug.samples);
+			}
+
+			dprintk(CVP_PROF, "%s Samples: %d\n",
+					i->debug.pdata[x].name,
+					i->debug.samples);
+		}
+	}
+}
+
+static inline void msm_cvp_res_handle_fatal_hw_error(
+	struct msm_cvp_platform_resources *resources,
+	bool enable_fatal)
+{
+	enable_fatal &= resources->debug_timeout;
+	MSM_CVP_ERROR(enable_fatal);
+}
+
+static inline void msm_cvp_handle_hw_error(struct msm_cvp_core *core)
+{
+	bool enable_fatal = true;
+
+	/*
+	 * In current implementation user-initiated SSR triggers
+	 * a fatal error from hardware. However, there is no way
+	 * to know if fatal error is due to SSR or not. Handle
+	 * user SSR as non-fatal.
+	 */
+	if (core->trigger_ssr) {
+		core->trigger_ssr = false;
+		enable_fatal = false;
+	}
+
+	/* CVP driver can decide FATAL handling of HW errors
+	 * based on multiple factors. This condition check will
+	 * be enhanced later.
+	 */
+	msm_cvp_res_handle_fatal_hw_error(&core->resources, enable_fatal);
+}
+
+#endif

+ 599 - 0
msm/eva/msm_cvp_dsp.c

@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/rpmsg.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <soc/qcom/secure_buffer.h>
+#include "msm_cvp_dsp.h"
+#include "msm_cvp_internal.h"
+
+struct cvp_dsp_apps gfa_cv;
+static int hlosVM[HLOS_VM_NUM] = {VMID_HLOS};
+static int dspVM[DSP_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
+static int dspVMperm[DSP_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC,
+				PERM_READ | PERM_WRITE | PERM_EXEC };
+static int hlosVMperm[HLOS_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC };
+
+static int cvp_dsp_send_cmd(struct cvp_dsp_cmd_msg *cmd, uint32_t len)
+{
+	int rc = 0;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	dprintk(CVP_DSP, "%s: cmd = %d\n", __func__, cmd->type);
+
+	if (IS_ERR_OR_NULL(me->chan)) {
+		dprintk(CVP_ERR, "%s: DSP GLink is not ready\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+	rc = rpmsg_send(me->chan->ept, cmd, len);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: DSP rpmsg_send failed rc=%d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+exit:
+	return rc;
+}
+
+static int cvp_dsp_send_cmd_sync(struct cvp_dsp_cmd_msg *cmd, uint32_t len)
+{
+	int rc = 0;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	dprintk(CVP_DSP, "%s: cmd = %d\n", __func__, cmd->type);
+
+	me->pending_dsp2cpu_rsp.type = cmd->type;
+	rc = cvp_dsp_send_cmd(cmd, len);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: cvp_dsp_send_cmd failed rc=%d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+	if (!wait_for_completion_timeout(&me->completions[cmd->type],
+			msecs_to_jiffies(CVP_DSP_RESPONSE_TIMEOUT))) {
+		dprintk(CVP_ERR, "%s cmd %d timeout\n", __func__, cmd->type);
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+exit:
+	me->pending_dsp2cpu_rsp.type = CVP_INVALID_RPMSG_TYPE;
+	return rc;
+}
+
+static int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
+					uint32_t size_in_bytes)
+{
+	int rc = 0;
+	struct cvp_dsp_cmd_msg cmd;
+
+	cmd.type = CPU2DSP_SEND_HFI_QUEUE;
+	cmd.msg_ptr = (uint64_t)phys_addr;
+	cmd.msg_ptr_len = size_in_bytes;
+	cmd.ddr_type = of_fdt_get_ddrtype();
+	if (cmd.ddr_type < 0) {
+		dprintk(CVP_ERR,
+			"%s: Incorrect DDR type value %d\n",
+			__func__, cmd.ddr_type);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_DSP,
+		"%s: address of buffer, PA=0x%pK  size_buff=%d ddr_type=%d\n",
+		__func__, phys_addr, size_in_bytes, cmd.ddr_type);
+
+	rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed rc = %d\n",
+			__func__, rc);
+		goto exit;
+	}
+exit:
+	return rc;
+}
+
+static int cvp_hyp_assign_to_dsp(uint64_t addr, uint32_t size)
+{
+	int rc = 0;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	if (!me->hyp_assigned) {
+		rc = hyp_assign_phys(addr, size, hlosVM, HLOS_VM_NUM, dspVM,
+			dspVMperm, DSP_VM_NUM);
+		if (rc) {
+			dprintk(CVP_ERR, "%s failed. rc=%d\n", __func__, rc);
+			return rc;
+		}
+		me->addr = addr;
+		me->size = size;
+		me->hyp_assigned = true;
+	}
+
+	return rc;
+}
+
+static int cvp_hyp_assign_from_dsp(void)
+{
+	int rc = 0;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	if (me->hyp_assigned) {
+		rc = hyp_assign_phys(me->addr, me->size, dspVM, DSP_VM_NUM,
+				hlosVM, hlosVMperm, HLOS_VM_NUM);
+		if (rc) {
+			dprintk(CVP_ERR, "%s failed. rc=%d\n", __func__, rc);
+			return rc;
+		}
+		me->addr = 0;
+		me->size = 0;
+		me->hyp_assigned = false;
+	}
+
+	return rc;
+}
+
+static int cvp_dsp_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	if (strcmp(rpdev->dev.parent->of_node->name, "cdsp")) {
+		dprintk(CVP_ERR,
+			"%s: Failed to probe rpmsg device.Node name:%s\n",
+			__func__, rpdev->dev.parent->of_node->name);
+		return -EINVAL;
+	}
+
+	mutex_lock(&me->lock);
+	me->chan = rpdev;
+	me->state = DSP_PROBED;
+	complete(&me->completions[CPU2DSP_MAX_CMD]);
+	mutex_unlock(&me->lock);
+
+	return 0;
+}
+
+static void cvp_dsp_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	dprintk(CVP_WARN, "%s: CDSP SSR triggered\n", __func__);
+
+	mutex_lock(&me->lock);
+	cvp_hyp_assign_from_dsp();
+
+	me->chan = NULL;
+	me->state = DSP_UNINIT;
+	mutex_unlock(&me->lock);
+	/* kernel driver needs clean all dsp sessions */
+
+}
+
+static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
+	void *data, int len, void *priv, u32 addr)
+{
+	struct cvp_dsp_rsp_msg *rsp = (struct cvp_dsp_rsp_msg *)data;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	dprintk(CVP_DSP, "%s: type = 0x%x ret = 0x%x len = 0x%x\n",
+		__func__, rsp->type, rsp->ret, len);
+
+	if (rsp->type < CPU2DSP_MAX_CMD && len == sizeof(*rsp)) {
+		if (me->pending_dsp2cpu_rsp.type == rsp->type) {
+			memcpy(&me->pending_dsp2cpu_rsp, rsp,
+				sizeof(struct cvp_dsp_rsp_msg));
+			complete(&me->completions[rsp->type]);
+		} else {
+			dprintk(CVP_ERR, "%s: CPU2DSP resp %d, pending %d\n",
+					__func__, rsp->type,
+					me->pending_dsp2cpu_rsp.type);
+			goto exit;
+		}
+	} else if (rsp->type < CVP_DSP_MAX_CMD &&
+			len == sizeof(struct cvp_dsp2cpu_cmd_msg)) {
+		if (me->pending_dsp2cpu_cmd.type != CVP_INVALID_RPMSG_TYPE) {
+			dprintk(CVP_ERR, "%s: DSP2CPU cmd:%d pending %d\n",
+					__func__, rsp->type,
+					me->pending_dsp2cpu_cmd.type);
+			goto exit;
+		}
+		memcpy(&me->pending_dsp2cpu_cmd, rsp,
+			sizeof(struct cvp_dsp2cpu_cmd_msg));
+		complete(&me->completions[CPU2DSP_MAX_CMD]);
+	} else {
+		dprintk(CVP_ERR, "%s: Invalid type: %d\n", __func__, rsp->type);
+		return 0;
+	}
+
+	return 0;
+exit:
+	dprintk(CVP_ERR, "concurrent dsp cmd type = %d, rsp type = %d\n",
+			me->pending_dsp2cpu_cmd.type,
+			me->pending_dsp2cpu_rsp.type);
+	return 0;
+}
+
+int cvp_dsp_suspend(uint32_t session_flag)
+{
+	int rc = 0;
+	struct cvp_dsp_cmd_msg cmd;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	cmd.type = CPU2DSP_SUSPEND;
+
+	mutex_lock(&me->lock);
+	if (me->state != DSP_READY)
+		goto exit;
+
+	/* Use cvp_dsp_send_cmd_sync after dsp driver is ready */
+	rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed rc = %d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+	me->state = DSP_SUSPEND;
+
+exit:
+	mutex_unlock(&me->lock);
+	return rc;
+}
+
+int cvp_dsp_resume(uint32_t session_flag)
+{
+	int rc = 0;
+	struct cvp_dsp_cmd_msg cmd;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	cmd.type = CPU2DSP_RESUME;
+
+	mutex_lock(&me->lock);
+	if (me->state != DSP_SUSPEND)
+		goto exit;
+
+	/* Use cvp_dsp_send_cmd_sync after dsp driver is ready */
+	rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed rc = %d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+	me->state = DSP_READY;
+
+exit:
+	mutex_unlock(&me->lock);
+	return rc;
+}
+
+int cvp_dsp_shutdown(uint32_t session_flag)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	int rc = 0;
+	struct cvp_dsp_cmd_msg cmd;
+
+	cmd.type = CPU2DSP_SHUTDOWN;
+
+	mutex_lock(&me->lock);
+	if (me->state == DSP_INVALID)
+		goto exit;
+
+	me->state = DSP_INACTIVE;
+	rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with rc = %d\n",
+			__func__, rc);
+		cvp_hyp_assign_from_dsp();
+		goto exit;
+	}
+
+	rc = cvp_hyp_assign_from_dsp();
+
+exit:
+	mutex_unlock(&me->lock);
+	return rc;
+}
+
+int cvp_dsp_register_buffer(uint32_t session_id, uint32_t buff_fd,
+			uint32_t buff_fd_size, uint32_t buff_size,
+			uint32_t buff_offset, uint32_t buff_index,
+			uint32_t buff_fd_iova)
+{
+	struct cvp_dsp_cmd_msg cmd;
+	int rc;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	cmd.type = CPU2DSP_REGISTER_BUFFER;
+	cmd.session_id = session_id;
+	cmd.buff_fd = buff_fd;
+	cmd.buff_fd_size = buff_fd_size;
+	cmd.buff_size = buff_size;
+	cmd.buff_offset = buff_offset;
+	cmd.buff_index = buff_index;
+	cmd.buff_fd_iova = buff_fd_iova;
+
+	dprintk(CVP_DSP,
+		"%s: type=0x%x, buff_fd_iova=0x%x buff_index=0x%x\n",
+		__func__, cmd.type, buff_fd_iova,
+		cmd.buff_index);
+	dprintk(CVP_DSP, "%s: buff_size=0x%x session_id=0x%x\n",
+		__func__, cmd.buff_size, cmd.session_id);
+
+	mutex_lock(&me->lock);
+	rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
+	if (rc) {
+		dprintk(CVP_ERR, "%s send failed rc = %d\n", __func__, rc);
+		goto exit;
+	}
+
+exit:
+	mutex_unlock(&me->lock);
+	return rc;
+}
+
+int cvp_dsp_deregister_buffer(uint32_t session_id, uint32_t buff_fd,
+			uint32_t buff_fd_size, uint32_t buff_size,
+			uint32_t buff_offset, uint32_t buff_index,
+			uint32_t buff_fd_iova)
+{
+	struct cvp_dsp_cmd_msg cmd;
+	int rc;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	cmd.type = CPU2DSP_DEREGISTER_BUFFER;
+	cmd.session_id = session_id;
+	cmd.buff_fd = buff_fd;
+	cmd.buff_fd_size = buff_fd_size;
+	cmd.buff_size = buff_size;
+	cmd.buff_offset = buff_offset;
+	cmd.buff_index = buff_index;
+	cmd.buff_fd_iova = buff_fd_iova;
+
+	dprintk(CVP_DSP,
+		"%s: type=0x%x, buff_fd_iova=0x%x buff_index=0x%x\n",
+		__func__, cmd.type, buff_fd_iova,
+		cmd.buff_index);
+	dprintk(CVP_DSP, "%s: buff_size=0x%x session_id=0x%x\n",
+		__func__, cmd.buff_size, cmd.session_id);
+
+	mutex_lock(&me->lock);
+	rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
+	if (rc) {
+		dprintk(CVP_ERR, "%s send failed rc = %d\n", __func__, rc);
+		goto exit;
+	}
+
+exit:
+	mutex_unlock(&me->lock);
+	return rc;
+}
+
+static const struct rpmsg_device_id cvp_dsp_rpmsg_match[] = {
+	{ CVP_APPS_DSP_GLINK_GUID },
+	{ },
+};
+
+static struct rpmsg_driver cvp_dsp_rpmsg_client = {
+	.id_table = cvp_dsp_rpmsg_match,
+	.probe = cvp_dsp_rpmsg_probe,
+	.remove = cvp_dsp_rpmsg_remove,
+	.callback = cvp_dsp_rpmsg_callback,
+	.drv = {
+		.name = "qcom,msm_cvp_dsp_rpmsg",
+	},
+};
+
+void cvp_dsp_send_hfi_queue(void)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *device;
+	struct cvp_dsp_apps *me = &gfa_cv;
+	uint64_t addr;
+	uint32_t size;
+	int rc;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	if (core && core->device)
+		device = core->device->hfi_device_data;
+	else
+		return;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	dprintk(CVP_DSP, "Entering %s\n", __func__);
+
+	mutex_lock(&device->lock);
+	mutex_lock(&me->lock);
+
+	addr = (uint64_t)device->dsp_iface_q_table.mem_data.dma_handle;
+	size = device->dsp_iface_q_table.mem_data.size;
+
+	if (!addr || !size) {
+		dprintk(CVP_DSP, "%s: HFI queue is not ready\n", __func__);
+		goto exit;
+	}
+
+	if (me->state != DSP_PROBED && me->state != DSP_INACTIVE)
+		goto exit;
+
+	rc = cvp_hyp_assign_to_dsp(addr, size);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: cvp_hyp_assign_to_dsp. rc=%d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+	rc = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)addr, size);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: Send HFI Queue failed rc = %d\n",
+			__func__, rc);
+
+		goto exit;
+	}
+
+	dprintk(CVP_DSP, "%s: dsp initialized\n", __func__);
+	me->state = DSP_READY;
+
+exit:
+	mutex_unlock(&me->lock);
+	mutex_unlock(&device->lock);
+}
+
+static int cvp_dsp_thread(void *data)
+{
+	int rc = 0, old_state;
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_cmd_msg cmd;
+	struct cvp_hfi_device *hdev;
+	struct msm_cvp_core *core;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	if (!core) {
+		dprintk(CVP_ERR, "%s: Failed to find core\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	hdev = (struct cvp_hfi_device *)core->device;
+	if (!hdev) {
+		dprintk(CVP_ERR, "%s Invalid device handle\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+wait_dsp:
+	rc = wait_for_completion_interruptible(
+			&me->completions[CPU2DSP_MAX_CMD]);
+
+	if (me->state == DSP_INVALID)
+		goto exit;
+
+	if (me->state == DSP_UNINIT)
+		goto wait_dsp;
+
+	if (me->state == DSP_PROBED) {
+		cvp_dsp_send_hfi_queue();
+		goto wait_dsp;
+	}
+
+	cmd.type = me->pending_dsp2cpu_cmd.type;
+
+	if (rc == -ERESTARTSYS) {
+		dprintk(CVP_WARN, "%s received interrupt signal\n", __func__);
+	} else {
+		mutex_lock(&me->lock);
+		switch (me->pending_dsp2cpu_cmd.type) {
+		case DSP2CPU_POWERON:
+		{
+			if (me->state == DSP_READY)
+				break;
+
+			mutex_unlock(&me->lock);
+			old_state = me->state;
+			me->state = DSP_READY;
+			rc = call_hfi_op(hdev, resume, hdev->hfi_device_data);
+			if (rc) {
+				dprintk(CVP_WARN, "%s Failed to resume cvp\n",
+						__func__);
+				mutex_lock(&me->lock);
+				me->state = old_state;
+				cmd.ret = 1;
+				break;
+			}
+			mutex_lock(&me->lock);
+			cmd.ret = 0;
+			break;
+		}
+		case DSP2CPU_POWEROFF:
+		{
+			me->state = DSP_SUSPEND;
+			cmd.ret = 0;
+			break;
+		}
+		default:
+			dprintk(CVP_ERR, "unrecognaized dsp cmds: %d\n",
+					me->pending_dsp2cpu_cmd.type);
+			break;
+		}
+		me->pending_dsp2cpu_cmd.type = CVP_INVALID_RPMSG_TYPE;
+		mutex_unlock(&me->lock);
+	}
+	/* Responds to DSP */
+	rc = cvp_dsp_send_cmd(&cmd, sizeof(struct cvp_dsp_cmd_msg));
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed rc = %d cmd type=%d\n",
+			__func__, rc, cmd.type);
+	goto wait_dsp;
+exit:
+	dprintk(CVP_DBG, "dsp thread exit\n");
+	do_exit(rc);
+}
+
+int cvp_dsp_device_init(void)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	char tname[16];
+	int rc;
+	int i;
+
+	mutex_init(&me->lock);
+	me->state = DSP_INVALID;
+	me->hyp_assigned = false;
+
+	for (i = 0; i <= CPU2DSP_MAX_CMD; i++)
+		init_completion(&me->completions[i]);
+
+	me->pending_dsp2cpu_cmd.type = CVP_INVALID_RPMSG_TYPE;
+	me->pending_dsp2cpu_rsp.type = CVP_INVALID_RPMSG_TYPE;
+
+	rc = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s : register_rpmsg_driver failed rc = %d\n",
+			__func__, rc);
+		goto register_bail;
+	}
+	snprintf(tname, sizeof(tname), "cvp-dsp-thread");
+	me->state = DSP_UNINIT;
+	me->dsp_thread = kthread_run(cvp_dsp_thread, me, tname);
+	if (!me->dsp_thread) {
+		dprintk(CVP_ERR, "%s create %s fail", __func__, tname);
+		rc = -ECHILD;
+		me->state = DSP_INVALID;
+		goto register_bail;
+	}
+	return 0;
+
+register_bail:
+	return rc;
+}
+
+void cvp_dsp_device_exit(void)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	int i;
+
+	mutex_lock(&me->lock);
+	me->state = DSP_INVALID;
+	mutex_unlock(&me->lock);
+
+	for (i = 0; i <= CPU2DSP_MAX_CMD; i++)
+		complete_all(&me->completions[i]);
+
+	mutex_destroy(&me->lock);
+	unregister_rpmsg_driver(&cvp_dsp_rpmsg_client);
+}

+ 145 - 0
msm/eva/msm_cvp_dsp.h

@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef MSM_CVP_DSP_H
+#define MSM_CVP_DSP_H
+
+#include <linux/types.h>
+#include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
+
+#define CVP_APPS_DSP_GLINK_GUID "cvp-glink-apps-dsp"
+#define CVP_APPS_DSP_SMD_GUID "cvp-smd-apps-dsp"
+
+#define VMID_CDSP_Q6 (30)
+#define HLOS_VM_NUM 1
+#define DSP_VM_NUM 2
+#define CVP_DSP_MAX_RESERVED 5
+#define CVP_DSP2CPU_RESERVED 8
+#define CVP_DSP_RESPONSE_TIMEOUT 300
+#define CVP_INVALID_RPMSG_TYPE 0xBADDFACE
+
+int cvp_dsp_device_init(void);
+void cvp_dsp_device_exit(void);
+void cvp_dsp_send_hfi_queue(void);
+
+enum CVP_DSP_COMMAND {
+	CPU2DSP_SEND_HFI_QUEUE = 0,
+	CPU2DSP_SUSPEND = 1,
+	CPU2DSP_RESUME = 2,
+	CPU2DSP_SHUTDOWN = 3,
+	CPU2DSP_REGISTER_BUFFER = 4,
+	CPU2DSP_DEREGISTER_BUFFER = 5,
+	CPU2DSP_MAX_CMD = 6,
+	DSP2CPU_POWERON = 6,
+	DSP2CPU_POWEROFF = 7,
+	CVP_DSP_MAX_CMD = 8,
+};
+
+struct cvp_dsp_cmd_msg {
+	uint32_t type;
+	int32_t ret;
+	uint64_t msg_ptr;
+	uint32_t msg_ptr_len;
+	uint32_t buff_fd_iova;
+	uint32_t buff_index;
+	uint32_t buff_size;
+	uint32_t session_id;
+	int32_t ddr_type;
+	uint32_t buff_fd;
+	uint32_t buff_offset;
+	uint32_t buff_fd_size;
+	uint32_t reserved1;
+	uint32_t reserved2;
+};
+
+struct cvp_dsp_rsp_msg {
+	uint32_t type;
+	int32_t ret;
+	uint32_t reserved[CVP_DSP_MAX_RESERVED];
+};
+
+struct cvp_dsp2cpu_cmd_msg {
+	uint32_t type;
+	uint32_t ver;
+	uint32_t len;
+	uint32_t data[CVP_DSP2CPU_RESERVED];
+};
+
+struct cvp_dsp_apps {
+	struct mutex lock;
+	struct rpmsg_device *chan;
+	uint32_t state;
+	bool hyp_assigned;
+	uint64_t addr;
+	uint32_t size;
+	struct completion completions[CPU2DSP_MAX_CMD + 1];
+	struct cvp_dsp2cpu_cmd_msg pending_dsp2cpu_cmd;
+	struct cvp_dsp_rsp_msg pending_dsp2cpu_rsp;
+	struct task_struct *dsp_thread;
+};
+
+extern struct cvp_dsp_apps gfa_cv;
+/*
+ * API for CVP driver to suspend CVP session during
+ * power collapse
+ *
+ * @param session_flag
+ * Flag to share details of session.
+ */
+int cvp_dsp_suspend(uint32_t session_flag);
+
+/*
+ * API for CVP driver to resume CVP session during
+ * power collapse
+ *
+ * @param session_flag
+ * Flag to share details of session.
+ */
+int cvp_dsp_resume(uint32_t session_flag);
+
+/*
+ * API for CVP driver to shutdown CVP session during
+ * cvp subsystem error.
+ *
+ * @param session_flag
+ * Flag to share details of session.
+ */
+int cvp_dsp_shutdown(uint32_t session_flag);
+
+/*
+ * API to register iova buffer address with CDSP
+ *
+ * @session_id:     cvp session id
+ * @buff_fd:        buffer fd
+ * @buff_fd_size:   total size of fd in bytes
+ * @buff_size:      size in bytes of cvp buffer
+ * @buff_offset:    buffer offset
+ * @buff_index:     buffer index
+ * @iova_buff_addr: IOVA buffer address
+ */
+int cvp_dsp_register_buffer(uint32_t session_id, uint32_t buff_fd,
+			uint32_t buff_fd_size, uint32_t buff_size,
+			uint32_t buff_offset, uint32_t buff_index,
+			uint32_t buff_fd_iova);
+
+/*
+ * API to de-register iova buffer address from CDSP
+ *
+ * @session_id:     cvp session id
+ * @buff_fd:        buffer fd
+ * @buff_fd_size:   total size of fd in bytes
+ * @buff_size:      size in bytes of cvp buffer
+ * @buff_offset:    buffer offset
+ * @buff_index:     buffer index
+ * @iova_buff_addr: IOVA buffer address
+ */
+int cvp_dsp_deregister_buffer(uint32_t session_id, uint32_t buff_fd,
+			uint32_t buff_fd_size, uint32_t buff_size,
+			uint32_t buff_offset, uint32_t buff_index,
+			uint32_t buff_fd_iova);
+
+#endif // MSM_CVP_DSP_H
+

+ 315 - 0
msm/eva/msm_cvp_internal.h

@@ -0,0 +1,315 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_CVP_INTERNAL_H_
+#define _MSM_CVP_INTERNAL_H_
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/interconnect.h>
+#include <linux/kref.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
+#include "msm_cvp_core.h"
+#include <media/msm_media_info.h>
+#include <media/msm_cvp_private.h>
+#include "cvp_hfi_api.h"
+#include <synx_api.h>
+
+#define MAX_SUPPORTED_INSTANCES 16
+#define MAX_NAME_LENGTH 64
+#define MAX_DEBUGFS_NAME 50
+#define MAX_DSP_INIT_ATTEMPTS 16
+#define FENCE_WAIT_SIGNAL_TIMEOUT 100
+#define FENCE_WAIT_SIGNAL_RETRY_TIMES 20
+#define FENCE_BIT (1ULL << 63)
+
+#define FENCE_DME_ICA_ENABLED_IDX 0
+#define FENCE_DME_DS_IDX 1
+#define FENCE_DME_OUTPUT_IDX 7
+
+#define SYS_MSG_START HAL_SYS_INIT_DONE
+#define SYS_MSG_END HAL_SYS_ERROR
+#define SESSION_MSG_START HAL_SESSION_EVENT_CHANGE
+#define SESSION_MSG_END HAL_SESSION_ERROR
+#define SYS_MSG_INDEX(__msg) (__msg - SYS_MSG_START)
+#define SESSION_MSG_INDEX(__msg) (__msg - SESSION_MSG_START)
+
+#define ARP_BUF_SIZE 0x100000
+
+#define CVP_RT_PRIO_THRESHOLD 1
+
+struct msm_cvp_inst;
+
+enum cvp_core_state {
+	CVP_CORE_UNINIT = 0,
+	CVP_CORE_INIT,
+	CVP_CORE_INIT_DONE,
+};
+
+/*
+ * Do not change the enum values unless
+ * you know what you are doing
+ */
+enum instance_state {
+	MSM_CVP_CORE_UNINIT_DONE = 0x0001,
+	MSM_CVP_CORE_INIT,
+	MSM_CVP_CORE_INIT_DONE,
+	MSM_CVP_OPEN,
+	MSM_CVP_OPEN_DONE,
+	MSM_CVP_CLOSE,
+	MSM_CVP_CLOSE_DONE,
+	MSM_CVP_CORE_UNINIT,
+	MSM_CVP_CORE_INVALID
+};
+
+enum dsp_state {
+	DSP_INVALID,
+	DSP_UNINIT,
+	DSP_PROBED,
+	DSP_READY,
+	DSP_SUSPEND,
+	DSP_INACTIVE,
+};
+
+struct msm_cvp_common_data {
+	char key[128];
+	int value;
+};
+
+enum sku_version {
+	SKU_VERSION_0 = 0,
+	SKU_VERSION_1,
+	SKU_VERSION_2,
+};
+
+enum vpu_version {
+	VPU_VERSION_4 = 1,
+	VPU_VERSION_5,
+};
+
+struct msm_cvp_ubwc_config_data {
+	struct {
+		u32 max_channel_override : 1;
+		u32 mal_length_override : 1;
+		u32 hb_override : 1;
+		u32 bank_swzl_level_override : 1;
+		u32 bank_spreading_override : 1;
+		u32 reserved : 27;
+	} override_bit_info;
+
+	u32 max_channels;
+	u32 mal_length;
+	u32 highest_bank_bit;
+	u32 bank_swzl_level;
+	u32 bank_spreading;
+};
+
+struct msm_cvp_platform_data {
+	struct msm_cvp_common_data *common_data;
+	unsigned int common_data_length;
+	unsigned int sku_version;
+	uint32_t vpu_ver;
+	struct msm_cvp_ubwc_config_data *ubwc_config;
+};
+
+struct msm_cvp_drv {
+	struct mutex lock;
+	struct list_head cores;
+	int num_cores;
+	struct dentry *debugfs_root;
+	int thermal_level;
+	u32 sku_version;
+	struct kmem_cache *msg_cache;
+	struct kmem_cache *frame_cache;
+	struct kmem_cache *buf_cache;
+	struct kmem_cache *smem_cache;
+	char fw_version[CVP_VERSION_LENGTH];
+};
+
+enum profiling_points {
+	SYS_INIT = 0,
+	SESSION_INIT,
+	LOAD_RESOURCES,
+	FRAME_PROCESSING,
+	FW_IDLE,
+	MAX_PROFILING_POINTS,
+};
+
+struct cvp_clock_data {
+	int buffer_counter;
+	int load;
+	int load_low;
+	int load_norm;
+	int load_high;
+	int min_threshold;
+	int max_threshold;
+	unsigned long bitrate;
+	unsigned long min_freq;
+	unsigned long curr_freq;
+	u32 ddr_bw;
+	u32 sys_cache_bw;
+	u32 operating_rate;
+	u32 core_id;
+	bool low_latency_mode;
+	bool turbo_mode;
+};
+
+struct cvp_profile_data {
+	int start;
+	int stop;
+	int cumulative;
+	char name[64];
+	int sampling;
+	int average;
+};
+
+struct msm_cvp_debug {
+	struct cvp_profile_data pdata[MAX_PROFILING_POINTS];
+	int profile;
+	int samples;
+};
+
+enum msm_cvp_modes {
+	CVP_SECURE = BIT(0),
+	CVP_TURBO = BIT(1),
+	CVP_THUMBNAIL = BIT(2),
+	CVP_LOW_POWER = BIT(3),
+	CVP_REALTIME = BIT(4),
+};
+
+#define MAX_NUM_MSGS_PER_SESSION	128
+#define CVP_MAX_WAIT_TIME	2000
+
+struct cvp_session_msg {
+	struct list_head node;
+	struct cvp_hfi_msg_session_hdr pkt;
+};
+
+struct cvp_session_queue {
+	spinlock_t lock;
+	enum queue_state state;
+	unsigned int msg_count;
+	struct list_head msgs;
+	wait_queue_head_t wq;
+};
+
+struct cvp_session_prop {
+	u32 type;
+	u32 kernel_mask;
+	u32 priority;
+	u32 is_secure;
+	u32 dsp_mask;
+	u32 fthread_nr;
+	u32 fdu_cycles;
+	u32 od_cycles;
+	u32 mpu_cycles;
+	u32 ica_cycles;
+	u32 fw_cycles;
+	u32 fdu_op_cycles;
+	u32 od_op_cycles;
+	u32 mpu_op_cycles;
+	u32 ica_op_cycles;
+	u32 fw_op_cycles;
+	u32 ddr_bw;
+	u32 ddr_op_bw;
+	u32 ddr_cache;
+	u32 ddr_op_cache;
+};
+
+enum cvp_event_t {
+	CVP_NO_EVENT,
+	CVP_SSR_EVENT = 1,
+	CVP_SYS_ERROR_EVENT,
+	CVP_MAX_CLIENTS_EVENT,
+	CVP_HW_UNSUPPORTED_EVENT,
+	CVP_INVALID_EVENT,
+};
+
+struct cvp_session_event {
+	spinlock_t lock;
+	enum cvp_event_t event;
+	wait_queue_head_t wq;
+};
+
+struct msm_cvp_core {
+	struct list_head list;
+	struct mutex lock;
+	int id;
+	dev_t dev_num;
+	struct cdev cdev;
+	struct class *class;
+	struct device *dev;
+	struct cvp_hfi_device *device;
+	struct msm_cvp_platform_data *platform_data;
+	struct list_head instances;
+	struct dentry *debugfs_root;
+	enum cvp_core_state state;
+	struct completion completions[SYS_MSG_END - SYS_MSG_START + 1];
+	enum msm_cvp_hfi_type hfi_type;
+	struct msm_cvp_platform_resources resources;
+	struct msm_cvp_capability *capabilities;
+	struct delayed_work fw_unload_work;
+	struct work_struct ssr_work;
+	enum hal_ssr_trigger_type ssr_type;
+	bool smmu_fault_handled;
+	u32 last_fault_addr;
+	bool trigger_ssr;
+	unsigned long curr_freq;
+	atomic64_t kernel_trans_id;
+};
+
+struct msm_cvp_inst {
+	struct list_head list;
+	struct mutex sync_lock, lock;
+	struct msm_cvp_core *core;
+	enum session_type session_type;
+	struct cvp_session_queue session_queue;
+	struct cvp_session_queue session_queue_fence;
+	struct cvp_session_event event_handler;
+	void *session;
+	enum instance_state state;
+	struct msm_cvp_list freqs;
+	struct msm_cvp_list persistbufs;
+	struct cvp_dmamap_cache dma_cache;
+	struct msm_cvp_list cvpdspbufs;
+	struct msm_cvp_list frames;
+	struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
+	struct dentry *debugfs_root;
+	struct msm_cvp_debug debug;
+	struct cvp_clock_data clk_data;
+	enum msm_cvp_modes flags;
+	struct msm_cvp_capability capability;
+	struct kref kref;
+	struct cvp_session_prop prop;
+	u32 cur_cmd_type;
+	struct synx_session synx_session_id;
+	struct cvp_fence_queue fence_cmd_queue;
+};
+
+extern struct msm_cvp_drv *cvp_driver;
+
+void cvp_handle_cmd_response(enum hal_command_response cmd, void *data);
+int msm_cvp_trigger_ssr(struct msm_cvp_core *core,
+	enum hal_ssr_trigger_type type);
+int msm_cvp_noc_error_info(struct msm_cvp_core *core);
+void msm_cvp_comm_handle_thermal_event(void);
+
+void msm_cvp_fw_unload_handler(struct work_struct *work);
+void msm_cvp_ssr_handler(struct work_struct *work);
+/*
+ * XXX: normally should be in msm_cvp_core.h, but that's meant for public APIs,
+ * whereas this is private
+ */
+int msm_cvp_destroy(struct msm_cvp_inst *inst);
+void *cvp_get_drv_data(struct device *dev);
+#endif

+ 650 - 0
msm/eva/msm_cvp_ioctl.c

@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "cvp_private.h"
+#include "cvp_hfi_api.h"
+
+static int _get_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
+		struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+	struct cvp_kmd_hfi_packet *u;
+
+	u = &up->data.hfi_pkt;
+
+	if (get_user(pkt_hdr->size, &u->pkt_data[0]))
+		return -EFAULT;
+
+	if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+		return -EFAULT;
+
+	if (get_pkt_index(pkt_hdr) < 0) {
+		dprintk(CVP_ERR, "user mode provides incorrect hfi\n");
+		goto set_default_pkt_hdr;
+	}
+
+	if (pkt_hdr->size > MAX_HFI_PKT_SIZE*sizeof(unsigned int)) {
+		dprintk(CVP_ERR, "user HFI packet too large %x\n",
+				pkt_hdr->size);
+		return -EINVAL;
+	}
+
+	return 0;
+
+set_default_pkt_hdr:
+	pkt_hdr->size = get_msg_size();
+	return 0;
+}
+
+static int _get_fence_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
+		struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+	struct cvp_kmd_hfi_synx_packet __user *u;
+
+	u = &up->data.hfi_synx_pkt;
+
+	if (get_user(pkt_hdr->size, &u->pkt_data[0]))
+		return -EFAULT;
+
+	if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+		return -EFAULT;
+
+	if (pkt_hdr->size > (MAX_HFI_PKT_SIZE*sizeof(unsigned int)))
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_pkt_from_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_pkt;
+	u = &up->data.hfi_pkt;
+	for (i = 0; i < size; i++)
+		if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	return 0;
+}
+
+static int _copy_synx_data_from_user(
+	struct cvp_kmd_hfi_synx_packet *k,
+	struct cvp_kmd_hfi_synx_packet __user *u)
+{
+	int i;
+
+	for (i = 0; i < MAX_FENCE_DATA_SIZE; i++) {
+		if (get_user(k->fence_data[i], &u->fence_data[i]))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_fence_data_from_user_deprecate(
+	struct cvp_kmd_hfi_fence_packet *k,
+	struct cvp_kmd_hfi_fence_packet __user *u)
+{
+	int i;
+
+	for (i = 0; i < MAX_HFI_FENCE_SIZE; i++) {
+		if (get_user(k->fence_data[i], &u->fence_data[i]))
+			return -EFAULT;
+	}
+
+	if (get_user(k->frame_id, &u->frame_id)) {
+		dprintk(CVP_ERR, "Failed to get frame id from fence pkt\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int _copy_fence_pkt_from_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up)
+{	struct cvp_kmd_hfi_synx_packet *k;
+	struct cvp_kmd_hfi_synx_packet __user *u;
+	struct cvp_kmd_hfi_fence_packet __user *u1;
+	int i;
+
+	k = &kp->data.hfi_synx_pkt;
+	u = &up->data.hfi_synx_pkt;
+	u1 = &up->data.hfi_fence_pkt;
+
+	for (i = 0; i < MAX_HFI_PKT_SIZE; i++)
+		if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	if (get_user(k->fence_data[0], &u->fence_data[0]))
+		return -EFAULT;
+
+	if (k->fence_data[0] == 0xFEEDFACE)
+		return _copy_synx_data_from_user(k, u);
+	else
+		return _copy_fence_data_from_user_deprecate(
+				(struct cvp_kmd_hfi_fence_packet *)k, u1);
+}
+
+static int _copy_frameid_from_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up)
+{
+	if (get_user(kp->data.frame_id, &up->data.frame_id)) {
+		dprintk(CVP_ERR, "Failed to get frame id from user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int _copy_sysprop_from_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up)
+{
+	struct cvp_kmd_sys_properties *k, *u;
+
+	k = &kp->data.sys_properties;
+	u = &up->data.sys_properties;
+
+	if (get_user(k->prop_num, &u->prop_num))
+		return -EFAULT;
+
+	if (k->prop_num < 1 || k->prop_num > 32) {
+		dprintk(CVP_ERR, "Num of prop out of range %d\n", k->prop_num);
+		return -EFAULT;
+	}
+
+	return _copy_pkt_from_user(kp, up,
+		(k->prop_num*((sizeof(struct cvp_kmd_sys_property)>>2)+1)));
+}
+
+static int _copy_pkt_to_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_pkt;
+	u = &up->data.hfi_pkt;
+	for (i = 0; i < size; i++)
+		if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	return 0;
+}
+
+static int _copy_fence_pkt_to_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up)
+{
+	struct cvp_kmd_hfi_synx_packet *k;
+	struct cvp_kmd_hfi_synx_packet __user *u;
+	int i;
+
+	k = &kp->data.hfi_synx_pkt;
+	u = &up->data.hfi_synx_pkt;
+	for (i = 0; i < MAX_HFI_PKT_SIZE; i++) {
+		if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int _copy_sysprop_to_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up)
+{
+	struct cvp_kmd_sys_properties *k;
+	struct cvp_kmd_sys_properties __user *u;
+	int i;
+
+	k = &kp->data.sys_properties;
+	u = &up->data.sys_properties;
+
+	for (i = 0; i < 8; i++)
+		if (put_user(k->prop_data[i].data, &u->prop_data[i].data))
+			return -EFAULT;
+
+	return 0;
+
+}
+
+static void print_hfi_short(struct cvp_kmd_arg __user *up)
+{
+	struct cvp_kmd_hfi_packet *pkt;
+	unsigned int words[5];
+
+	pkt = &up->data.hfi_pkt;
+	if (get_user(words[0], &up->type) ||
+			get_user(words[1], &up->buf_offset) ||
+			get_user(words[2], &up->buf_num) ||
+			get_user(words[3], &pkt->pkt_data[0]) ||
+			get_user(words[4], &pkt->pkt_data[1]))
+		dprintk(CVP_ERR, "Failed to print ioctl cmd\n");
+
+	dprintk(CVP_HFI, "IOCTL cmd type %#x, offset %d, num %d, pkt %d %#x\n",
+			words[0], words[1], words[2], words[3], words[4]);
+}
+
+static int _copy_session_ctrl_to_user(
+	struct cvp_kmd_session_control *k,
+	struct cvp_kmd_session_control *u)
+{
+	int i;
+
+	if (put_user(k->ctrl_type, &u->ctrl_type))
+		return -EFAULT;
+	for (i = 0; i < 8; i++)
+		if (put_user(k->ctrl_data[i], &u->ctrl_data[i]))
+			return -EFAULT;
+	return 0;
+}
+
+static int _get_session_ctrl_from_user(
+	struct cvp_kmd_session_control *k,
+	struct cvp_kmd_session_control *u)
+{
+	int i;
+
+	if (get_user(k->ctrl_type, &u->ctrl_type))
+		return -EFAULT;
+
+	for (i = 0; i < 8; i++)
+		if (get_user(k->ctrl_data[i], &u->ctrl_data[i]))
+			return -EFAULT;
+	return 0;
+}
+
+static int _get_session_info_from_user(
+	struct cvp_kmd_session_info *k,
+	struct cvp_kmd_session_info __user *u)
+{
+	int i;
+
+	if (get_user(k->session_id, &u->session_id))
+		return -EFAULT;
+
+	for (i = 0; i < 10; i++)
+		if (get_user(k->reserved[i], &u->reserved[i]))
+			return -EFAULT;
+	return 0;
+}
+
+static int convert_from_user(struct cvp_kmd_arg *kp,
+		unsigned long arg,
+		struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	int i;
+	struct cvp_kmd_arg __user *up = (struct cvp_kmd_arg *)arg;
+	struct cvp_hal_session_cmd_pkt pkt_hdr;
+	int pkt_idx;
+
+	if (!kp || !up) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	print_hfi_short(up);
+
+	if (get_user(kp->type, &up->type))
+		return -EFAULT;
+
+	if (get_user(kp->buf_offset, &up->buf_offset) ||
+		get_user(kp->buf_num, &up->buf_num))
+		return -EFAULT;
+
+	switch (kp->type) {
+	case CVP_KMD_GET_SESSION_INFO:
+	{
+		struct cvp_kmd_session_info *k;
+		struct cvp_kmd_session_info __user *u;
+
+		k = &kp->data.session;
+		u = &up->data.session;
+		if (_get_session_info_from_user(k, u)) {
+			dprintk(CVP_ERR, "fail to get sess info\n");
+			return -EFAULT;
+		}
+
+		break;
+	}
+	case CVP_KMD_REGISTER_BUFFER:
+	{
+		struct cvp_kmd_buffer *k, *u;
+
+		k = &kp->data.regbuf;
+		u = &up->data.regbuf;
+		if (get_user(k->type, &u->type) ||
+			get_user(k->index, &u->index) ||
+			get_user(k->fd, &u->fd) ||
+			get_user(k->size, &u->size) ||
+			get_user(k->offset, &u->offset) ||
+			get_user(k->pixelformat, &u->pixelformat) ||
+			get_user(k->flags, &u->flags))
+			return -EFAULT;
+		for (i = 0; i < 5; i++)
+			if (get_user(k->reserved[i], &u->reserved[i]))
+				return -EFAULT;
+		break;
+	}
+	case CVP_KMD_UNREGISTER_BUFFER:
+	{
+		struct cvp_kmd_buffer *k, *u;
+
+		k = &kp->data.unregbuf;
+		u = &up->data.unregbuf;
+		if (get_user(k->type, &u->type) ||
+			get_user(k->index, &u->index) ||
+			get_user(k->fd, &u->fd) ||
+			get_user(k->size, &u->size) ||
+			get_user(k->offset, &u->offset) ||
+			get_user(k->pixelformat, &u->pixelformat) ||
+			get_user(k->flags, &u->flags))
+			return -EFAULT;
+		for (i = 0; i < 5; i++)
+			if (get_user(k->reserved[i], &u->reserved[i]))
+				return -EFAULT;
+		break;
+	}
+	case CVP_KMD_SEND_CMD_PKT:
+	{
+		if (_get_pkt_hdr_from_user(up, &pkt_hdr)) {
+			dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+				kp->type, pkt_hdr.size, pkt_hdr.packet_type);
+			return -EFAULT;
+		}
+
+		rc = _copy_pkt_from_user(kp, up, (pkt_hdr.size >> 2));
+		break;
+	}
+	case CVP_KMD_SEND_FENCE_CMD_PKT:
+	{
+		if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr)) {
+			dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+				kp->type, pkt_hdr.size, pkt_hdr.packet_type);
+			return -EFAULT;
+		}
+		dprintk(CVP_HFI, "system call cmd pkt: %d 0x%x\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+
+		pkt_idx = get_pkt_index(&pkt_hdr);
+		if (pkt_idx < 0) {
+			dprintk(CVP_ERR, "%s incorrect packet %d, %x\n",
+				__func__,
+				pkt_hdr.size,
+				pkt_hdr.packet_type);
+			return -EFAULT;
+		}
+
+		rc = _copy_fence_pkt_from_user(kp, up);
+		break;
+	}
+	case CVP_KMD_RECEIVE_MSG_PKT:
+		break;
+	case CVP_KMD_SESSION_CONTROL:
+	{
+		struct cvp_kmd_session_control *k, *u;
+
+		k = &kp->data.session_ctrl;
+		u = &up->data.session_ctrl;
+
+		rc = _get_session_ctrl_from_user(k, u);
+		break;
+	}
+	case CVP_KMD_GET_SYS_PROPERTY:
+	{
+		if (_copy_sysprop_from_user(kp, up)) {
+			dprintk(CVP_ERR, "Failed to get sysprop from user\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	case CVP_KMD_SET_SYS_PROPERTY:
+	{
+		if (_copy_sysprop_from_user(kp, up)) {
+			dprintk(CVP_ERR, "Failed to set sysprop from user\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	case CVP_KMD_FLUSH_ALL:
+	case CVP_KMD_UPDATE_POWER:
+		break;
+	case CVP_KMD_FLUSH_FRAME:
+	{
+		if (_copy_frameid_from_user(kp, up))
+			return -EFAULT;
+		break;
+	}
+	default:
+		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
+			__func__, kp->type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int _put_user_session_info(
+		struct cvp_kmd_session_info *k,
+		struct cvp_kmd_session_info __user *u)
+{
+	int i;
+
+	if (put_user(k->session_id, &u->session_id))
+		return -EFAULT;
+
+	for (i = 0; i < 10; i++)
+		if (put_user(k->reserved[i], &u->reserved[i]))
+			return -EFAULT;
+
+	return 0;
+}
+
+static int convert_to_user(struct cvp_kmd_arg *kp, unsigned long arg)
+{
+	int rc = 0;
+	int i, size = get_msg_size() >> 2;
+	struct cvp_kmd_arg __user *up = (struct cvp_kmd_arg *)arg;
+	struct cvp_hal_session_cmd_pkt pkt_hdr;
+
+	if (!kp || !up) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (put_user(kp->type, &up->type))
+		return -EFAULT;
+
+	switch (kp->type) {
+	case CVP_KMD_RECEIVE_MSG_PKT:
+	{
+		struct cvp_kmd_hfi_packet *k, *u;
+
+		k = &kp->data.hfi_pkt;
+		u = &up->data.hfi_pkt;
+		for (i = 0; i < size; i++)
+			if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+				return -EFAULT;
+		break;
+	}
+	case CVP_KMD_GET_SESSION_INFO:
+	{
+		struct cvp_kmd_session_info *k;
+		struct cvp_kmd_session_info __user *u;
+
+		k = &kp->data.session;
+		u = &up->data.session;
+		if (_put_user_session_info(k, u)) {
+			dprintk(CVP_ERR, "fail to copy sess info to user\n");
+			return -EFAULT;
+		}
+
+		break;
+	}
+	case CVP_KMD_REGISTER_BUFFER:
+	{
+		struct cvp_kmd_buffer *k, *u;
+
+		k = &kp->data.regbuf;
+		u = &up->data.regbuf;
+		if (put_user(k->type, &u->type) ||
+			put_user(k->index, &u->index) ||
+			put_user(k->fd, &u->fd) ||
+			put_user(k->size, &u->size) ||
+			put_user(k->offset, &u->offset) ||
+			put_user(k->pixelformat, &u->pixelformat) ||
+			put_user(k->flags, &u->flags))
+			return -EFAULT;
+		for (i = 0; i < 5; i++)
+			if (put_user(k->reserved[i], &u->reserved[i]))
+				return -EFAULT;
+		break;
+	}
+	case CVP_KMD_UNREGISTER_BUFFER:
+	{
+		struct cvp_kmd_buffer *k, *u;
+
+		k = &kp->data.unregbuf;
+		u = &up->data.unregbuf;
+		if (put_user(k->type, &u->type) ||
+			put_user(k->index, &u->index) ||
+			put_user(k->fd, &u->fd) ||
+			put_user(k->size, &u->size) ||
+			put_user(k->offset, &u->offset) ||
+			put_user(k->pixelformat, &u->pixelformat) ||
+			put_user(k->flags, &u->flags))
+			return -EFAULT;
+		for (i = 0; i < 5; i++)
+			if (put_user(k->reserved[i], &u->reserved[i]))
+				return -EFAULT;
+		break;
+	}
+	case CVP_KMD_SEND_CMD_PKT:
+	{
+		if (_get_pkt_hdr_from_user(up, &pkt_hdr))
+			return -EFAULT;
+
+		dprintk(CVP_HFI, "Send user cmd pkt: %d %d\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_pkt_to_user(kp, up, (pkt_hdr.size >> 2));
+		break;
+	}
+	case CVP_KMD_SEND_FENCE_CMD_PKT:
+	{
+		if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr))
+			return -EFAULT;
+
+		dprintk(CVP_HFI, "Send user cmd pkt: %d %d\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+
+		rc = _copy_fence_pkt_to_user(kp, up);
+		break;
+	}
+	case CVP_KMD_SESSION_CONTROL:
+	{
+		struct cvp_kmd_session_control *k, *u;
+
+		k = &kp->data.session_ctrl;
+		u = &up->data.session_ctrl;
+		rc = _copy_session_ctrl_to_user(k, u);
+		break;
+	}
+	case CVP_KMD_GET_SYS_PROPERTY:
+	{
+		if (_copy_sysprop_to_user(kp, up)) {
+			dprintk(CVP_ERR, "Fail to copy sysprop to user\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	case CVP_KMD_FLUSH_ALL:
+	case CVP_KMD_FLUSH_FRAME:
+	case CVP_KMD_SET_SYS_PROPERTY:
+	case CVP_KMD_UPDATE_POWER:
+		break;
+	default:
+		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
+			__func__, kp->type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static long cvp_ioctl(struct msm_cvp_inst *inst,
+	unsigned int cmd, unsigned long arg)
+{
+	int rc;
+	struct cvp_kmd_arg *karg;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	karg = kzalloc(sizeof(*karg), GFP_KERNEL);
+	if (!karg)
+		return -ENOMEM;
+
+	if (convert_from_user(karg, arg, inst)) {
+		dprintk(CVP_ERR, "%s: failed to get from user cmd %x\n",
+			__func__, karg->type);
+		kfree(karg);
+		return -EFAULT;
+	}
+
+	rc = msm_cvp_private((void *)inst, cmd, karg);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: failed cmd type %x %d\n",
+			__func__, karg->type, rc);
+		kfree(karg);
+		return rc;
+	}
+
+	if (convert_to_user(karg, arg)) {
+		dprintk(CVP_ERR, "%s: failed to copy to user cmd %x\n",
+			__func__, karg->type);
+		kfree(karg);
+		return -EFAULT;
+	}
+
+	kfree(karg);
+	return rc;
+}
+
+long cvp_unblocked_ioctl(struct file *filp,
+		unsigned int cmd, unsigned long arg)
+{
+	struct msm_cvp_inst *inst;
+
+	if (!filp || !filp->private_data) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	inst = filp->private_data;
+	return cvp_ioctl(inst, cmd, arg);
+}
+
+long cvp_compat_ioctl(struct file *filp,
+		unsigned int cmd, unsigned long arg)
+{
+	struct msm_cvp_inst *inst;
+
+	if (!filp || !filp->private_data) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	inst = filp->private_data;
+	return cvp_ioctl(inst, cmd, (unsigned long)compat_ptr(arg));
+}

+ 233 - 0
msm/eva/msm_cvp_platform.c

@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include <linux/of_fdt.h>
+#include "msm_cvp_internal.h"
+#include "msm_cvp_debug.h"
+
+#define DDR_TYPE_LPDDR4 0x6
+#define DDR_TYPE_LPDDR4X 0x7
+#define DDR_TYPE_LPDDR4Y 0x8
+#define DDR_TYPE_LPDDR5 0x9
+
+#define UBWC_CONFIG(mco, mlo, hbo, bslo, bso, rs, mc, ml, hbb, bsl, bsp) \
+{	\
+	.override_bit_info.max_channel_override = mco,	\
+	.override_bit_info.mal_length_override = mlo,	\
+	.override_bit_info.hb_override = hbo,	\
+	.override_bit_info.bank_swzl_level_override = bslo,	\
+	.override_bit_info.bank_spreading_override = bso,	\
+	.override_bit_info.reserved = rs,	\
+	.max_channels = mc,	\
+	.mal_length = ml,	\
+	.highest_bank_bit = hbb,	\
+	.bank_swzl_level = bsl,	\
+	.bank_spreading = bsp,	\
+}
+
+static struct msm_cvp_common_data default_common_data[] = {
+	{
+		.key = "qcom,never-unload-fw",
+		.value = 1,
+	},
+};
+
+static struct msm_cvp_common_data sm8250_common_data[] = {
+	{
+		.key = "qcom,never-unload-fw",
+		.value = 1,
+	},
+	{
+		.key = "qcom,sw-power-collapse",
+		.value = 1,
+	},
+	{
+		.key = "qcom,domain-attr-non-fatal-faults",
+		.value = 1,
+	},
+	{
+		.key = "qcom,max-secure-instances",
+		.value = 2,             /*
+					 * As per design driver allows 3rd
+					 * instance as well since the secure
+					 * flags were updated later for the
+					 * current instance. Hence total
+					 * secure sessions would be
+					 * max-secure-instances + 1.
+					 */
+	},
+	{
+		.key = "qcom,max-hw-load",
+		.value = 3916800,       /*
+					 * 1920x1088/256 MBs@480fps. It is less
+					 * any other usecases (ex:
+					 * 3840x2160@120fps, 4096x2160@96ps,
+					 * 7680x4320@30fps)
+					 */
+	},
+	{
+		.key = "qcom,power-collapse-delay",
+		.value = 3000,
+	},
+	{
+		.key = "qcom,hw-resp-timeout",
+		.value = 2000,
+	},
+	{
+		.key = "qcom,dsp-resp-timeout",
+		.value = 1000
+	},
+	{
+		.key = "qcom,debug-timeout",
+		.value = 0,
+	}
+};
+
+static struct msm_cvp_common_data sm8350_common_data[] = {
+	{
+		.key = "qcom,auto-pil",
+		.value = 1,
+	},
+	{
+		.key = "qcom,never-unload-fw",
+		.value = 1,
+	},
+	{
+		.key = "qcom,sw-power-collapse",
+		.value = 1,
+	},
+	{
+		.key = "qcom,domain-attr-non-fatal-faults",
+		.value = 1,
+	},
+	{
+		.key = "qcom,max-secure-instances",
+		.value = 2,             /*
+					 * As per design driver allows 3rd
+					 * instance as well since the secure
+					 * flags were updated later for the
+					 * current instance. Hence total
+					 * secure sessions would be
+					 * max-secure-instances + 1.
+					 */
+	},
+	{
+		.key = "qcom,max-hw-load",
+		.value = 3916800,       /*
+					 * 1920x1088/256 MBs@480fps. It is less
+					 * any other usecases (ex:
+					 * 3840x2160@120fps, 4096x2160@96ps,
+					 * 7680x4320@30fps)
+					 */
+	},
+	{
+		.key = "qcom,power-collapse-delay",
+		.value = 3000,
+	},
+	{
+		.key = "qcom,hw-resp-timeout",
+		.value = 2000,
+	},
+	{
+		.key = "qcom,dsp-resp-timeout",
+		.value = 1000
+	},
+	{
+		.key = "qcom,debug-timeout",
+		.value = 0,
+	}
+};
+
+
+/* Default UBWC config for LPDDR5 */
+static struct msm_cvp_ubwc_config_data kona_ubwc_data[] = {
+	UBWC_CONFIG(1, 1, 1, 0, 0, 0, 8, 32, 16, 0, 0),
+};
+
+
+static struct msm_cvp_platform_data default_data = {
+	.common_data = default_common_data,
+	.common_data_length =  ARRAY_SIZE(default_common_data),
+	.sku_version = 0,
+	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = 0x0,
+};
+
+static struct msm_cvp_platform_data sm8250_data = {
+	.common_data = sm8250_common_data,
+	.common_data_length =  ARRAY_SIZE(sm8250_common_data),
+	.sku_version = 0,
+	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = kona_ubwc_data,
+};
+
+static struct msm_cvp_platform_data sm8350_data = {
+	.common_data = sm8350_common_data,
+	.common_data_length =  ARRAY_SIZE(sm8350_common_data),
+	.sku_version = 0,
+	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = kona_ubwc_data,
+};
+
+static const struct of_device_id msm_cvp_dt_match[] = {
+	{
+		.compatible = "qcom,lahaina-cvp",
+		.data = &sm8350_data,
+	},
+	{
+		.compatible = "qcom,kona-cvp",
+		.data = &sm8250_data,
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, msm_cvp_dt_match);
+
+void *cvp_get_drv_data(struct device *dev)
+{
+	struct msm_cvp_platform_data *driver_data;
+	const struct of_device_id *match;
+	uint32_t ddr_type = DDR_TYPE_LPDDR5;
+
+	driver_data = &default_data;
+
+	if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
+		goto exit;
+
+	match = of_match_node(msm_cvp_dt_match, dev->of_node);
+
+	if (!match)
+		return NULL;
+
+	driver_data = (struct msm_cvp_platform_data *)match->data;
+
+	if (!strcmp(match->compatible, "qcom,kona-cvp")) {
+		ddr_type = of_fdt_get_ddrtype();
+		if (ddr_type == -ENOENT) {
+			dprintk(CVP_ERR,
+				"Failed to get ddr type, use LPDDR5\n");
+		}
+
+		if (driver_data->ubwc_config &&
+			(ddr_type == DDR_TYPE_LPDDR4 ||
+			ddr_type == DDR_TYPE_LPDDR4X ||
+			ddr_type == DDR_TYPE_LPDDR4Y))
+			driver_data->ubwc_config->highest_bank_bit = 15;
+	}
+exit:
+	return driver_data;
+}

+ 1073 - 0
msm/eva/msm_cvp_res_parse.c

@@ -0,0 +1,1073 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/of_reserved_mem.h>
+#include "msm_cvp_debug.h"
+#include "msm_cvp_resources.h"
+#include "msm_cvp_res_parse.h"
+#include "soc/qcom/secure_buffer.h"
+
+enum clock_properties {
+	CLOCK_PROP_HAS_SCALING = 1 << 0,
+	CLOCK_PROP_HAS_MEM_RETENTION    = 1 << 1,
+};
+
+#define PERF_GOV "performance"
+
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+	return NULL;
+}
+
+static size_t get_u32_array_num_elements(struct device_node *np,
+					char *name)
+{
+	int len;
+	size_t num_elements = 0;
+
+	if (!of_get_property(np, name, &len)) {
+		dprintk(CVP_ERR, "Failed to read %s from device tree\n",
+			name);
+		goto fail_read;
+	}
+
+	num_elements = len / sizeof(u32);
+	if (num_elements <= 0) {
+		dprintk(CVP_ERR, "%s not specified in device tree\n",
+			name);
+		goto fail_read;
+	}
+	return num_elements;
+
+fail_read:
+	return 0;
+}
+
+static inline void msm_cvp_free_allowed_clocks_table(
+		struct msm_cvp_platform_resources *res)
+{
+	res->allowed_clks_tbl = NULL;
+}
+
+static inline void msm_cvp_free_cycles_per_mb_table(
+		struct msm_cvp_platform_resources *res)
+{
+	res->clock_freq_tbl.clk_prof_entries = NULL;
+}
+
+static inline void msm_cvp_free_reg_table(
+			struct msm_cvp_platform_resources *res)
+{
+	res->reg_set.reg_tbl = NULL;
+}
+
+static inline void msm_cvp_free_qdss_addr_table(
+			struct msm_cvp_platform_resources *res)
+{
+	res->qdss_addr_set.addr_tbl = NULL;
+}
+
+static inline void msm_cvp_free_bus_vectors(
+			struct msm_cvp_platform_resources *res)
+{
+	kfree(res->bus_set.bus_tbl);
+	res->bus_set.bus_tbl = NULL;
+	res->bus_set.count = 0;
+}
+
+static inline void msm_cvp_free_regulator_table(
+			struct msm_cvp_platform_resources *res)
+{
+	int c = 0;
+
+	for (c = 0; c < res->regulator_set.count; ++c) {
+		struct regulator_info *rinfo =
+			&res->regulator_set.regulator_tbl[c];
+
+		rinfo->name = NULL;
+	}
+
+	res->regulator_set.regulator_tbl = NULL;
+	res->regulator_set.count = 0;
+}
+
+static inline void msm_cvp_free_clock_table(
+			struct msm_cvp_platform_resources *res)
+{
+	res->clock_set.clock_tbl = NULL;
+	res->clock_set.count = 0;
+}
+
+void msm_cvp_free_platform_resources(
+			struct msm_cvp_platform_resources *res)
+{
+	msm_cvp_free_clock_table(res);
+	msm_cvp_free_regulator_table(res);
+	msm_cvp_free_allowed_clocks_table(res);
+	msm_cvp_free_reg_table(res);
+	msm_cvp_free_qdss_addr_table(res);
+	msm_cvp_free_bus_vectors(res);
+}
+
+static int msm_cvp_load_reg_table(struct msm_cvp_platform_resources *res)
+{
+	struct reg_set *reg_set;
+	struct platform_device *pdev = res->pdev;
+	int i;
+	int rc = 0;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
+		/*
+		 * qcom,reg-presets is an optional property.  It likely won't be
+		 * present if we don't have any register settings to program
+		 */
+		dprintk(CVP_CORE, "qcom,reg-presets not found\n");
+		return 0;
+	}
+
+	reg_set = &res->reg_set;
+	reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+			"qcom,reg-presets");
+	reg_set->count /=  sizeof(*reg_set->reg_tbl) / sizeof(u32);
+
+	if (!reg_set->count) {
+		dprintk(CVP_CORE, "no elements in reg set\n");
+		return rc;
+	}
+
+	reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
+			sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
+	if (!reg_set->reg_tbl) {
+		dprintk(CVP_ERR, "%s Failed to alloc register table\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
+		(u32 *)reg_set->reg_tbl, reg_set->count * 2)) {
+		dprintk(CVP_ERR, "Failed to read register table\n");
+		msm_cvp_free_reg_table(res);
+		return -EINVAL;
+	}
+	for (i = 0; i < reg_set->count; i++) {
+		dprintk(CVP_CORE,
+			"reg = %x, value = %x\n",
+			reg_set->reg_tbl[i].reg,
+			reg_set->reg_tbl[i].value
+		);
+	}
+	return rc;
+}
+static int msm_cvp_load_qdss_table(struct msm_cvp_platform_resources *res)
+{
+	struct addr_set *qdss_addr_set;
+	struct platform_device *pdev = res->pdev;
+	int i;
+	int rc = 0;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
+		/*
+		 * qcom,qdss-presets is an optional property. It likely won't be
+		 * present if we don't have any register settings to program
+		 */
+		dprintk(CVP_CORE, "qcom,qdss-presets not found\n");
+		return rc;
+	}
+
+	qdss_addr_set = &res->qdss_addr_set;
+	qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+					"qcom,qdss-presets");
+	qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
+
+	if (!qdss_addr_set->count) {
+		dprintk(CVP_CORE, "no elements in qdss reg set\n");
+		return rc;
+	}
+
+	qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
+			qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
+			GFP_KERNEL);
+	if (!qdss_addr_set->addr_tbl) {
+		dprintk(CVP_ERR, "%s Failed to alloc register table\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_qdss_addr_tbl;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
+		(u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to read qdss address table\n");
+		msm_cvp_free_qdss_addr_table(res);
+		rc = -EINVAL;
+		goto err_qdss_addr_tbl;
+	}
+
+	for (i = 0; i < qdss_addr_set->count; i++) {
+		dprintk(CVP_CORE, "qdss addr = %x, value = %x\n",
+				qdss_addr_set->addr_tbl[i].start,
+				qdss_addr_set->addr_tbl[i].size);
+	}
+err_qdss_addr_tbl:
+	return rc;
+}
+
+static int msm_cvp_load_subcache_info(struct msm_cvp_platform_resources *res)
+{
+	int rc = 0, num_subcaches = 0, c;
+	struct platform_device *pdev = res->pdev;
+	struct subcache_set *subcaches = &res->subcache_set;
+
+	num_subcaches = of_property_count_strings(pdev->dev.of_node,
+		"cache-slice-names");
+	if (num_subcaches <= 0) {
+		dprintk(CVP_CORE, "No subcaches found\n");
+		goto err_load_subcache_table_fail;
+	}
+
+	subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
+		sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
+	if (!subcaches->subcache_tbl) {
+		dprintk(CVP_ERR,
+			"Failed to allocate memory for subcache tbl\n");
+		rc = -ENOMEM;
+		goto err_load_subcache_table_fail;
+	}
+
+	subcaches->count = num_subcaches;
+	dprintk(CVP_CORE, "Found %d subcaches\n", num_subcaches);
+
+	for (c = 0; c < num_subcaches; ++c) {
+		struct subcache_info *vsc = &res->subcache_set.subcache_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+			"cache-slice-names", c, &vsc->name);
+	}
+
+	res->sys_cache_present = true;
+
+	return 0;
+
+err_load_subcache_table_fail:
+	res->sys_cache_present = false;
+	subcaches->count = 0;
+	subcaches->subcache_tbl = NULL;
+
+	return rc;
+}
+
+/**
+ * msm_cvp_load_u32_table() - load dtsi table entries
+ * @pdev: A pointer to the platform device.
+ * @of_node:      A pointer to the device node.
+ * @table_name:   A pointer to the dtsi table entry name.
+ * @struct_size:  The size of the structure which is nothing but
+ *                a single entry in the dtsi table.
+ * @table:        A pointer to the table pointer which needs to be
+ *                filled by the dtsi table entries.
+ * @num_elements: Number of elements pointer which needs to be filled
+ *                with the number of elements in the table.
+ *
+ * This is a generic implementation to load single or multiple array
+ * table from dtsi. The array elements should be of size equal to u32.
+ *
+ * Return:        Return '0' for success else appropriate error value.
+ */
+int msm_cvp_load_u32_table(struct platform_device *pdev,
+		struct device_node *of_node, char *table_name, int struct_size,
+		u32 **table, u32 *num_elements)
+{
+	int rc = 0, num_elemts = 0;
+	u32 *ptbl = NULL;
+
+	if (!of_find_property(of_node, table_name, NULL)) {
+		dprintk(CVP_CORE, "%s not found\n", table_name);
+		return 0;
+	}
+
+	num_elemts = get_u32_array_num_elements(of_node, table_name);
+	if (!num_elemts) {
+		dprintk(CVP_ERR, "no elements in %s\n", table_name);
+		return 0;
+	}
+	num_elemts /= struct_size / sizeof(u32);
+
+	ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
+	if (!ptbl) {
+		dprintk(CVP_ERR, "Failed to alloc table %s\n", table_name);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(of_node, table_name, ptbl,
+			num_elemts * struct_size / sizeof(u32))) {
+		dprintk(CVP_ERR, "Failed to read %s\n", table_name);
+		return -EINVAL;
+	}
+
+	*table = ptbl;
+	if (num_elements)
+		*num_elements = num_elemts;
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_cvp_load_u32_table);
+
+/* A comparator to compare loads (needed later on) */
+static int cmp(const void *a, const void *b)
+{
+	return ((struct allowed_clock_rates_table *)a)->clock_rate -
+		((struct allowed_clock_rates_table *)b)->clock_rate;
+}
+
+static int msm_cvp_load_allowed_clocks_table(
+		struct msm_cvp_platform_resources *res)
+{
+	int rc = 0;
+	struct platform_device *pdev = res->pdev;
+
+	if (!of_find_property(pdev->dev.of_node,
+			"qcom,allowed-clock-rates", NULL)) {
+		dprintk(CVP_CORE, "qcom,allowed-clock-rates not found\n");
+		return 0;
+	}
+
+	rc = msm_cvp_load_u32_table(pdev, pdev->dev.of_node,
+				"qcom,allowed-clock-rates",
+				sizeof(*res->allowed_clks_tbl),
+				(u32 **)&res->allowed_clks_tbl,
+				&res->allowed_clks_tbl_size);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: failed to read allowed clocks table\n", __func__);
+		return rc;
+	}
+
+	sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size,
+		 sizeof(*res->allowed_clks_tbl), cmp, NULL);
+
+	return 0;
+}
+
+static int msm_cvp_populate_mem_cdsp(struct device *dev,
+		struct msm_cvp_platform_resources *res)
+{
+	struct device_node *mem_node;
+	int ret;
+
+	mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+	if (mem_node) {
+		ret = of_reserved_mem_device_init_by_idx(dev,
+				dev->of_node, 0);
+		of_node_put(dev->of_node);
+		if (ret) {
+			dprintk(CVP_ERR,
+				"Failed to initialize reserved mem, ret %d\n",
+				ret);
+			return ret;
+		}
+	}
+	res->mem_cdsp.dev = dev;
+
+	return 0;
+}
+
+static int msm_cvp_populate_bus(struct device *dev,
+		struct msm_cvp_platform_resources *res)
+{
+	struct bus_set *buses = &res->bus_set;
+	const char *temp_name = NULL;
+	struct bus_info *bus = NULL, *temp_table;
+	u32 range[2];
+	int rc = 0;
+
+	temp_table = krealloc(buses->bus_tbl, sizeof(*temp_table) *
+			(buses->count + 1), GFP_KERNEL);
+	if (!temp_table) {
+		dprintk(CVP_ERR, "%s: Failed to allocate memory", __func__);
+		rc = -ENOMEM;
+		goto err_bus;
+	}
+
+	buses->bus_tbl = temp_table;
+	bus = &buses->bus_tbl[buses->count];
+
+	memset(bus, 0x0, sizeof(struct bus_info));
+
+	rc = of_property_read_string(dev->of_node, "label", &temp_name);
+	if (rc) {
+		dprintk(CVP_ERR, "'label' not found in node\n");
+		goto err_bus;
+	}
+	/* need a non-const version of name, hence copying it over */
+	bus->name = devm_kstrdup(dev, temp_name, GFP_KERNEL);
+	if (!bus->name) {
+		rc = -ENOMEM;
+		goto err_bus;
+	}
+
+	rc = of_property_read_u32(dev->of_node, "qcom,bus-master",
+			&bus->master);
+	if (rc) {
+		dprintk(CVP_ERR, "'qcom,bus-master' not found in node\n");
+		goto err_bus;
+	}
+
+	rc = of_property_read_u32(dev->of_node, "qcom,bus-slave", &bus->slave);
+	if (rc) {
+		dprintk(CVP_ERR, "'qcom,bus-slave' not found in node\n");
+		goto err_bus;
+	}
+
+	rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
+			&bus->governor);
+	if (rc) {
+		rc = 0;
+		dprintk(CVP_CORE,
+				"'qcom,bus-governor' not found, default to performance governor\n");
+		bus->governor = PERF_GOV;
+	}
+
+	if (!strcmp(bus->governor, PERF_GOV))
+		bus->is_prfm_gov_used = true;
+
+	rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
+			range, ARRAY_SIZE(range));
+	if (rc) {
+		rc = 0;
+		dprintk(CVP_CORE,
+				"'qcom,range' not found defaulting to <0 INT_MAX>\n");
+		range[0] = 0;
+		range[1] = INT_MAX;
+	}
+
+	bus->range[0] = range[0]; /* min */
+	bus->range[1] = range[1]; /* max */
+
+	buses->count++;
+	bus->dev = dev;
+	dprintk(CVP_CORE, "Found bus %s [%d->%d] with governor %s\n",
+			bus->name, bus->master, bus->slave, bus->governor);
+err_bus:
+	return rc;
+}
+
+static int msm_cvp_load_regulator_table(
+		struct msm_cvp_platform_resources *res)
+{
+	int rc = 0;
+	struct platform_device *pdev = res->pdev;
+	struct regulator_set *regulators = &res->regulator_set;
+	struct device_node *domains_parent_node = NULL;
+	struct property *domains_property = NULL;
+	int reg_count = 0;
+
+	regulators->count = 0;
+	regulators->regulator_tbl = NULL;
+
+	domains_parent_node = pdev->dev.of_node;
+	for_each_property_of_node(domains_parent_node, domains_property) {
+		const char *search_string = "-supply";
+		char *supply;
+		bool matched = false;
+
+		/* check if current property is possibly a regulator */
+		supply = strnstr(domains_property->name, search_string,
+				strlen(domains_property->name) + 1);
+		matched = supply && (*(supply + strlen(search_string)) == '\0');
+		if (!matched)
+			continue;
+
+		reg_count++;
+	}
+
+	regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
+			sizeof(*regulators->regulator_tbl) *
+			reg_count, GFP_KERNEL);
+
+	if (!regulators->regulator_tbl) {
+		rc = -ENOMEM;
+		dprintk(CVP_ERR,
+			"Failed to alloc memory for regulator table\n");
+		goto err_reg_tbl_alloc;
+	}
+
+	for_each_property_of_node(domains_parent_node, domains_property) {
+		const char *search_string = "-supply";
+		char *supply;
+		bool matched = false;
+		struct device_node *regulator_node = NULL;
+		struct regulator_info *rinfo = NULL;
+
+		/* check if current property is possibly a regulator */
+		supply = strnstr(domains_property->name, search_string,
+				strlen(domains_property->name) + 1);
+		matched = supply && (supply[strlen(search_string)] == '\0');
+		if (!matched)
+			continue;
+
+		/* make sure prop isn't being misused */
+		regulator_node = of_parse_phandle(domains_parent_node,
+				domains_property->name, 0);
+		if (IS_ERR(regulator_node)) {
+			dprintk(CVP_WARN, "%s is not a phandle\n",
+					domains_property->name);
+			continue;
+		}
+		regulators->count++;
+
+		/* populate regulator info */
+		rinfo = &regulators->regulator_tbl[regulators->count - 1];
+		rinfo->name = devm_kzalloc(&pdev->dev,
+			(supply - domains_property->name) + 1, GFP_KERNEL);
+		if (!rinfo->name) {
+			rc = -ENOMEM;
+			dprintk(CVP_ERR,
+					"Failed to alloc memory for regulator name\n");
+			goto err_reg_name_alloc;
+		}
+		strlcpy(rinfo->name, domains_property->name,
+			(supply - domains_property->name) + 1);
+
+		rinfo->has_hw_power_collapse = of_property_read_bool(
+			regulator_node, "qcom,support-hw-trigger");
+
+		dprintk(CVP_CORE, "Found regulator %s: h/w collapse = %s\n",
+				rinfo->name,
+				rinfo->has_hw_power_collapse ? "yes" : "no");
+	}
+
+	if (!regulators->count)
+		dprintk(CVP_CORE, "No regulators found");
+
+	return 0;
+
+err_reg_name_alloc:
+err_reg_tbl_alloc:
+	msm_cvp_free_regulator_table(res);
+	return rc;
+}
+
+static int msm_cvp_load_clock_table(
+		struct msm_cvp_platform_resources *res)
+{
+	int rc = 0, num_clocks = 0, c = 0;
+	struct platform_device *pdev = res->pdev;
+	int *clock_props = NULL;
+	struct clock_set *clocks = &res->clock_set;
+
+	num_clocks = of_property_count_strings(pdev->dev.of_node,
+				"clock-names");
+	if (num_clocks <= 0) {
+		dprintk(CVP_CORE, "No clocks found\n");
+		clocks->count = 0;
+		rc = 0;
+		goto err_load_clk_table_fail;
+	}
+
+	clock_props = devm_kzalloc(&pdev->dev, num_clocks *
+			sizeof(*clock_props), GFP_KERNEL);
+	if (!clock_props) {
+		dprintk(CVP_ERR, "No memory to read clock properties\n");
+		rc = -ENOMEM;
+		goto err_load_clk_table_fail;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+				"qcom,clock-configs", clock_props,
+				num_clocks);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to read clock properties: %d\n", rc);
+		goto err_load_clk_prop_fail;
+	}
+
+	clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
+			* num_clocks, GFP_KERNEL);
+	if (!clocks->clock_tbl) {
+		dprintk(CVP_ERR, "Failed to allocate memory for clock tbl\n");
+		rc = -ENOMEM;
+		goto err_load_clk_prop_fail;
+	}
+
+	clocks->count = num_clocks;
+	dprintk(CVP_CORE, "Found %d clocks\n", num_clocks);
+
+	for (c = 0; c < num_clocks; ++c) {
+		struct clock_info *vc = &res->clock_set.clock_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+				"clock-names", c, &vc->name);
+
+		if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
+			vc->has_scaling = true;
+		} else {
+			vc->count = 0;
+			vc->has_scaling = false;
+		}
+
+		if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
+			vc->has_mem_retention = true;
+		else
+			vc->has_mem_retention = false;
+
+		dprintk(CVP_CORE, "Found clock %s: scale-able = %s\n", vc->name,
+			vc->count ? "yes" : "no");
+	}
+
+	return 0;
+
+err_load_clk_prop_fail:
+err_load_clk_table_fail:
+	return rc;
+}
+
+#define MAX_CLK_RESETS 5
+
+static int msm_cvp_load_reset_table(
+		struct msm_cvp_platform_resources *res)
+{
+	struct platform_device *pdev = res->pdev;
+	struct reset_set *rst = &res->reset_set;
+	int num_clocks = 0, c = 0, ret = 0;
+	int pwr_stats[MAX_CLK_RESETS];
+
+	num_clocks = of_property_count_strings(pdev->dev.of_node,
+				"reset-names");
+	if (num_clocks <= 0 || num_clocks > MAX_CLK_RESETS) {
+		dprintk(CVP_ERR, "Num reset clocks out of range\n");
+		rst->count = 0;
+		return 0;
+	}
+
+	rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks,
+			sizeof(*rst->reset_tbl), GFP_KERNEL);
+	if (!rst->reset_tbl)
+		return -ENOMEM;
+
+	rst->count = num_clocks;
+	dprintk(CVP_CORE, "Found %d reset clocks\n", num_clocks);
+	ret = of_property_read_u32_array(pdev->dev.of_node,
+				"reset-power-status", pwr_stats,
+				num_clocks);
+	if (ret) {
+		dprintk(CVP_ERR, "Failed to read reset pwr state: %d\n", ret);
+		devm_kfree(&pdev->dev, rst->reset_tbl);
+		return ret;
+	}
+
+	for (c = 0; c < num_clocks; ++c) {
+		struct reset_info *rc = &res->reset_set.reset_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+				"reset-names", c, &rc->name);
+		rc->required_state = pwr_stats[c];
+	}
+
+	return 0;
+}
+
+static int find_key_value(struct msm_cvp_platform_data *platform_data,
+	const char *key)
+{
+	int i = 0;
+	struct msm_cvp_common_data *common_data = platform_data->common_data;
+	int size = platform_data->common_data_length;
+
+	for (i = 0; i < size; i++) {
+		if (!strcmp(common_data[i].key, key))
+			return common_data[i].value;
+	}
+	return 0;
+}
+
+int cvp_read_platform_resources_from_drv_data(
+		struct msm_cvp_core *core)
+{
+	struct msm_cvp_platform_data *platform_data;
+	struct msm_cvp_platform_resources *res;
+	int rc = 0;
+
+	if (!core || !core->platform_data) {
+		dprintk(CVP_ERR, "%s Invalid data\n", __func__);
+		return -ENOENT;
+	}
+	platform_data = core->platform_data;
+	res = &core->resources;
+
+	res->sku_version = platform_data->sku_version;
+
+	res->fw_name = "evass";
+
+	dprintk(CVP_CORE, "Firmware filename: %s\n", res->fw_name);
+
+	res->auto_pil = find_key_value(platform_data,
+			"qcom,auto-pil");
+
+	res->max_load = find_key_value(platform_data,
+			"qcom,max-hw-load");
+
+	res->sw_power_collapsible = find_key_value(platform_data,
+			"qcom,sw-power-collapse");
+
+	res->never_unload_fw =  find_key_value(platform_data,
+			"qcom,never-unload-fw");
+
+	res->debug_timeout = find_key_value(platform_data,
+			"qcom,debug-timeout");
+
+	res->pm_qos_latency_us = find_key_value(platform_data,
+			"qcom,pm-qos-latency-us");
+
+	res->max_secure_inst_count = find_key_value(platform_data,
+			"qcom,max-secure-instances");
+
+	res->thermal_mitigable = find_key_value(platform_data,
+			"qcom,enable-thermal-mitigation");
+	res->msm_cvp_pwr_collapse_delay = find_key_value(platform_data,
+			"qcom,power-collapse-delay");
+	res->msm_cvp_firmware_unload_delay = find_key_value(platform_data,
+			"qcom,fw-unload-delay");
+	res->msm_cvp_hw_rsp_timeout = find_key_value(platform_data,
+			"qcom,hw-resp-timeout");
+	res->msm_cvp_dsp_rsp_timeout = find_key_value(platform_data,
+			"qcom,dsp-resp-timeout");
+	res->non_fatal_pagefaults = find_key_value(platform_data,
+			"qcom,domain-attr-non-fatal-faults");
+
+	res->vpu_ver = platform_data->vpu_ver;
+	res->ubwc_config = platform_data->ubwc_config;
+	return rc;
+
+}
+
+int cvp_read_platform_resources_from_dt(
+		struct msm_cvp_platform_resources *res)
+{
+	struct platform_device *pdev = res->pdev;
+	struct resource *kres = NULL;
+	int rc = 0;
+	uint32_t firmware_base = 0;
+
+	if (!pdev->dev.of_node) {
+		dprintk(CVP_ERR, "DT node not found\n");
+		return -ENOENT;
+	}
+
+	INIT_LIST_HEAD(&res->context_banks);
+
+	res->firmware_base = (phys_addr_t)firmware_base;
+
+	kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	res->register_base = kres ? kres->start : -1;
+	res->register_size = kres ? (kres->end + 1 - kres->start) : -1;
+
+	kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	res->irq = kres ? kres->start : -1;
+
+	rc = msm_cvp_load_subcache_info(res);
+	if (rc)
+		dprintk(CVP_WARN, "Failed to load subcache info: %d\n", rc);
+
+	rc = msm_cvp_load_qdss_table(res);
+	if (rc)
+		dprintk(CVP_WARN, "Failed to load qdss reg table: %d\n", rc);
+
+	rc = msm_cvp_load_reg_table(res);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to load reg table: %d\n", rc);
+		goto err_load_reg_table;
+	}
+
+	rc = msm_cvp_load_regulator_table(res);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to load list of regulators %d\n", rc);
+		goto err_load_regulator_table;
+	}
+
+	rc = msm_cvp_load_clock_table(res);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to load clock table: %d\n", rc);
+		goto err_load_clock_table;
+	}
+
+	rc = msm_cvp_load_allowed_clocks_table(res);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to load allowed clocks table: %d\n", rc);
+		goto err_load_allowed_clocks_table;
+	}
+
+	rc = msm_cvp_load_reset_table(res);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to load reset table: %d\n", rc);
+		goto err_load_reset_table;
+	}
+
+	res->use_non_secure_pil = of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-non-secure-pil");
+
+	if (res->use_non_secure_pil || !is_iommu_present(res)) {
+		of_property_read_u32(pdev->dev.of_node, "qcom,fw-bias",
+				&firmware_base);
+		res->firmware_base = (phys_addr_t)firmware_base;
+		dprintk(CVP_CORE,
+				"Using fw-bias : %pa", &res->firmware_base);
+	}
+
+return rc;
+
+err_load_reset_table:
+	msm_cvp_free_allowed_clocks_table(res);
+err_load_allowed_clocks_table:
+	msm_cvp_free_clock_table(res);
+err_load_clock_table:
+	msm_cvp_free_regulator_table(res);
+err_load_regulator_table:
+	msm_cvp_free_reg_table(res);
+err_load_reg_table:
+	return rc;
+}
+
+static int msm_cvp_setup_context_bank(struct msm_cvp_platform_resources *res,
+		struct context_bank_info *cb, struct device *dev)
+{
+	int rc = 0;
+	struct bus_type *bus;
+
+	if (!dev || !cb || !res) {
+		dprintk(CVP_ERR,
+			"%s: Invalid Input params\n", __func__);
+		return -EINVAL;
+	}
+	cb->dev = dev;
+
+	bus = cb->dev->bus;
+	if (IS_ERR_OR_NULL(bus)) {
+		dprintk(CVP_ERR, "%s - failed to get bus type\n", __func__);
+		rc = PTR_ERR(bus) ?: -ENODEV;
+		goto remove_cb;
+	}
+
+	/*
+	 * configure device segment size and segment boundary to ensure
+	 * iommu mapping returns one mapping (which is required for partial
+	 * cache operations)
+	 */
+	if (!dev->dma_parms)
+		dev->dma_parms =
+			devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+	dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
+
+	dprintk(CVP_CORE, "Attached %s and created mapping\n", dev_name(dev));
+	dprintk(CVP_CORE,
+		"Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK",
+		cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start,
+		cb->addr_range.size, cb->dev);
+
+	return rc;
+
+remove_cb:
+	return rc;
+}
+
+int msm_cvp_smmu_fault_handler(struct iommu_domain *domain,
+		struct device *dev, unsigned long iova, int flags, void *token)
+{
+	struct msm_cvp_core *core = token;
+	struct msm_cvp_inst *inst;
+	u32 *pfaddr = &core->last_fault_addr;
+
+	if (!domain || !core) {
+		dprintk(CVP_ERR, "%s - invalid param %pK %pK\n",
+			__func__, domain, core);
+		return -EINVAL;
+	}
+
+	if (core->smmu_fault_handled) {
+		if (core->resources.non_fatal_pagefaults) {
+			WARN_ONCE(1, "%s: non-fatal pagefault address: %lx\n",
+					__func__, iova);
+			*pfaddr = (*pfaddr == 0) ? iova : (*pfaddr);
+			return 0;
+		}
+	}
+
+	dprintk(CVP_ERR, "%s - faulting address: %lx\n", __func__, iova);
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		msm_cvp_print_inst_bufs(inst);
+	}
+	core->smmu_fault_handled = true;
+	msm_cvp_noc_error_info(core);
+	mutex_unlock(&core->lock);
+	/*
+	 * Return -EINVAL to elicit the default behaviour of smmu driver.
+	 * If we return -ENOSYS, then smmu driver assumes page fault handler
+	 * is not installed and prints a list of useful debug information like
+	 * FAR, SID etc. This information is not printed if we return 0.
+	 */
+	return -EINVAL;
+}
+
+static int msm_cvp_populate_context_bank(struct device *dev,
+		struct msm_cvp_core *core)
+{
+	int rc = 0;
+	struct context_bank_info *cb = NULL;
+	struct device_node *np = NULL;
+
+	if (!dev || !core) {
+		dprintk(CVP_ERR, "%s - invalid inputs\n", __func__);
+		return -EINVAL;
+	}
+
+	np = dev->of_node;
+	cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
+	if (!cb) {
+		dprintk(CVP_ERR, "%s - Failed to allocate cb\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&cb->list);
+	list_add_tail(&cb->list, &core->resources.context_banks);
+
+	rc = of_property_read_string(np, "label", &cb->name);
+	if (rc) {
+		dprintk(CVP_CORE,
+			"Failed to read cb label from device tree\n");
+		rc = 0;
+	}
+
+	dprintk(CVP_CORE, "%s: context bank has name %s\n", __func__, cb->name);
+	rc = of_property_read_u32_array(np, "qcom,iommu-dma-addr-pool",
+			(u32 *)&cb->addr_range, 2);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Could not read addr pool for context bank : %s %d\n",
+			cb->name, rc);
+		goto err_setup_cb;
+	}
+
+	cb->is_secure = of_property_read_bool(np, "qcom,iommu-vmid");
+	dprintk(CVP_CORE, "context bank %s : secure = %d\n",
+			cb->name, cb->is_secure);
+
+	/* setup buffer type for each sub device*/
+	rc = of_property_read_u32(np, "buffer-types", &cb->buffer_type);
+	if (rc) {
+		dprintk(CVP_ERR, "failed to load buffer_type info %d\n", rc);
+		rc = -ENOENT;
+		goto err_setup_cb;
+	}
+	dprintk(CVP_CORE,
+		"context bank %s address start = %x address size = %x buffer_type = %x\n",
+		cb->name, cb->addr_range.start,
+		cb->addr_range.size, cb->buffer_type);
+
+	cb->domain = iommu_get_domain_for_dev(dev);
+	if (IS_ERR_OR_NULL(cb->domain)) {
+		dprintk(CVP_ERR, "Create domain failed\n");
+		rc = -ENODEV;
+		goto err_setup_cb;
+	}
+
+	rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
+	if (rc) {
+		dprintk(CVP_ERR, "Cannot setup context bank %d\n", rc);
+		goto err_setup_cb;
+	}
+
+	iommu_set_fault_handler(cb->domain,
+		msm_cvp_smmu_fault_handler, (void *)core);
+
+	return 0;
+
+err_setup_cb:
+	list_del(&cb->list);
+	return rc;
+}
+
+int cvp_read_context_bank_resources_from_dt(struct platform_device *pdev)
+{
+	struct msm_cvp_core *core;
+	int rc = 0;
+
+	if (!pdev) {
+		dprintk(CVP_ERR, "Invalid platform device\n");
+		return -EINVAL;
+	} else if (!pdev->dev.parent) {
+		dprintk(CVP_ERR, "Failed to find a parent for %s\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	core = dev_get_drvdata(pdev->dev.parent);
+	if (!core) {
+		dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
+				dev_name(pdev->dev.parent));
+		return -EINVAL;
+	}
+
+	rc = msm_cvp_populate_context_bank(&pdev->dev, core);
+	if (rc)
+		dprintk(CVP_ERR, "Failed to probe context bank\n");
+	else
+		dprintk(CVP_CORE, "Successfully probed context bank\n");
+
+	return rc;
+}
+
+int cvp_read_bus_resources_from_dt(struct platform_device *pdev)
+{
+	struct msm_cvp_core *core;
+
+	if (!pdev) {
+		dprintk(CVP_ERR, "Invalid platform device\n");
+		return -EINVAL;
+	} else if (!pdev->dev.parent) {
+		dprintk(CVP_ERR, "Failed to find a parent for %s\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	core = dev_get_drvdata(pdev->dev.parent);
+	if (!core) {
+		dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
+				dev_name(pdev->dev.parent));
+		return -EINVAL;
+	}
+
+	return msm_cvp_populate_bus(&pdev->dev, &core->resources);
+}
+
+int cvp_read_mem_cdsp_resources_from_dt(struct platform_device *pdev)
+{
+	struct msm_cvp_core *core;
+
+	if (!pdev) {
+		dprintk(CVP_ERR, "%s: invalid platform device\n", __func__);
+		return -EINVAL;
+	} else if (!pdev->dev.parent) {
+		dprintk(CVP_ERR, "Failed to find a parent for %s\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	core = dev_get_drvdata(pdev->dev.parent);
+	if (!core) {
+		dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
+				dev_name(pdev->dev.parent));
+		return -EINVAL;
+	}
+
+	return msm_cvp_populate_mem_cdsp(&pdev->dev, &core->resources);
+}

+ 30 - 0
msm/eva/msm_cvp_res_parse.h

@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_CVP_RES_PARSE_H__
+#define __MSM_CVP_RES_PARSE_H__
+#include <linux/of.h>
+#include "msm_cvp_resources.h"
+#include "msm_cvp_common.h"
+void msm_cvp_free_platform_resources(
+		struct msm_cvp_platform_resources *res);
+
+int read_hfi_type(struct platform_device *pdev);
+
+int cvp_read_platform_resources_from_drv_data(
+		struct msm_cvp_core *core);
+int cvp_read_platform_resources_from_dt(
+		struct msm_cvp_platform_resources *res);
+
+int cvp_read_context_bank_resources_from_dt(struct platform_device *pdev);
+
+int cvp_read_bus_resources_from_dt(struct platform_device *pdev);
+int cvp_read_mem_cdsp_resources_from_dt(struct platform_device *pdev);
+
+int msm_cvp_load_u32_table(struct platform_device *pdev,
+		struct device_node *of_node, char *table_name, int struct_size,
+		u32 **table, u32 *num_elements);
+
+#endif

+ 185 - 0
msm/eva/msm_cvp_resources.h

@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_CVP_RESOURCES_H__
+#define __MSM_CVP_RESOURCES_H__
+
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include "msm_cvp_core.h"
+#include <linux/soc/qcom/llcc-qcom.h>
+
+struct reg_value_pair {
+	u32 reg;
+	u32 value;
+};
+
+struct reg_set {
+	struct reg_value_pair *reg_tbl;
+	int count;
+};
+
+struct addr_range {
+	u32 start;
+	u32 size;
+};
+
+struct addr_set {
+	struct addr_range *addr_tbl;
+	int count;
+};
+
+struct context_bank_info {
+	struct list_head list;
+	const char *name;
+	u32 buffer_type;
+	bool is_secure;
+	struct addr_range addr_range;
+	struct device *dev;
+	struct iommu_domain *domain;
+};
+
+struct regulator_info {
+	struct regulator *regulator;
+	bool has_hw_power_collapse;
+	char *name;
+};
+
+struct regulator_set {
+	struct regulator_info *regulator_tbl;
+	u32 count;
+};
+
+struct clock_info {
+	const char *name;
+	struct clk *clk;
+	u32 count;
+	bool has_scaling;
+	bool has_mem_retention;
+};
+
+struct clock_set {
+	struct clock_info *clock_tbl;
+	u32 count;
+};
+
+struct bus_info {
+	char *name;
+	int master;
+	int slave;
+	unsigned int range[2];
+	const char *governor;
+	struct device *dev;
+	struct devfreq_dev_profile devfreq_prof;
+	struct devfreq *devfreq;
+	struct icc_path *client;
+	bool is_prfm_gov_used;
+};
+
+struct bus_set {
+	struct bus_info *bus_tbl;
+	u32 count;
+};
+
+enum power_state {
+	CVP_POWER_INIT,
+	CVP_POWER_ON,
+	CVP_POWER_OFF,
+	CVP_POWER_INVALID,
+};
+
+struct reset_info {
+	struct reset_control *rst;
+	enum power_state required_state;
+	const char *name;
+};
+
+struct reset_set {
+	struct reset_info *reset_tbl;
+	u32 count;
+};
+
+struct allowed_clock_rates_table {
+	u32 clock_rate;
+};
+
+struct clock_profile_entry {
+	u32 codec_mask;
+	u32 vpp_cycles;
+	u32 vsp_cycles;
+	u32 low_power_cycles;
+};
+
+struct clock_freq_table {
+	struct clock_profile_entry *clk_prof_entries;
+	u32 count;
+};
+
+struct subcache_info {
+	const char *name;
+	bool isactive;
+	bool isset;
+	struct llcc_slice_desc *subcache;
+};
+
+struct subcache_set {
+	struct subcache_info *subcache_tbl;
+	u32 count;
+};
+
+struct msm_cvp_mem_cdsp {
+	struct device *dev;
+};
+
+struct msm_cvp_platform_resources {
+	phys_addr_t firmware_base;
+	phys_addr_t register_base;
+	uint32_t register_size;
+	uint32_t irq;
+	uint32_t sku_version;
+	struct allowed_clock_rates_table *allowed_clks_tbl;
+	u32 allowed_clks_tbl_size;
+	struct clock_freq_table clock_freq_tbl;
+	bool sys_cache_present;
+	bool sys_cache_res_set;
+	struct subcache_set subcache_set;
+	struct reg_set reg_set;
+	struct addr_set qdss_addr_set;
+	uint32_t max_load;
+	struct platform_device *pdev;
+	struct regulator_set regulator_set;
+	struct clock_set clock_set;
+	struct bus_set bus_set;
+	struct reset_set reset_set;
+	bool use_non_secure_pil;
+	bool sw_power_collapsible;
+	bool auto_pil;
+	struct list_head context_banks;
+	bool thermal_mitigable;
+	const char *fw_name;
+	const char *hfi_version;
+	bool never_unload_fw;
+	bool debug_timeout;
+	uint32_t pm_qos_latency_us;
+	uint32_t max_inst_count;
+	uint32_t max_secure_inst_count;
+	int msm_cvp_hw_rsp_timeout;
+	int msm_cvp_dsp_rsp_timeout;
+	int msm_cvp_firmware_unload_delay;
+	uint32_t msm_cvp_pwr_collapse_delay;
+	bool non_fatal_pagefaults;
+	struct msm_cvp_mem_cdsp mem_cdsp;
+	uint32_t vpu_ver;
+	uint32_t fw_cycles;
+	struct msm_cvp_ubwc_config_data *ubwc_config;
+};
+
+static inline bool is_iommu_present(struct msm_cvp_platform_resources *res)
+{
+	return !list_empty(&res->context_banks);
+}
+
+#endif
+

+ 240 - 0
msm/eva/msm_cvp_synx.c

@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_cvp_common.h"
+#include "cvp_hfi_api.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_core.h"
+#include "msm_cvp_dsp.h"
+
+void cvp_dump_fence_queue(struct msm_cvp_inst *inst)
+{
+	struct cvp_fence_queue *q;
+	struct cvp_fence_command *f;
+	struct synx_session ssid;
+	int i;
+
+	q = &inst->fence_cmd_queue;
+	ssid = inst->synx_session_id;
+	mutex_lock(&q->lock);
+	dprintk(CVP_WARN, "inst %x fence q mode %d, ssid %d\n",
+			hash32_ptr(inst->session), q->mode, ssid.client_id);
+
+	dprintk(CVP_WARN, "fence cmdq wait list:\n");
+	list_for_each_entry(f, &q->wait_list, list) {
+		dprintk(CVP_WARN, "frame pkt type 0x%x\n", f->pkt->packet_type);
+		for (i = 0; i < f->output_index; i++)
+			dprintk(CVP_WARN, "idx %d client hdl %d, state %d\n",
+				i, f->synx[i],
+				synx_get_status(ssid, f->synx[i]));
+
+	}
+
+	dprintk(CVP_WARN, "fence cmdq schedule list:\n");
+	list_for_each_entry(f, &q->sched_list, list) {
+		dprintk(CVP_WARN, "frame pkt type 0x%x\n", f->pkt->packet_type);
+		for (i = 0; i < f->output_index; i++)
+			dprintk(CVP_WARN, "idx %d client hdl %d, state %d\n",
+				i, f->synx[i],
+				synx_get_status(ssid, f->synx[i]));
+
+	}
+	mutex_unlock(&q->lock);
+}
+
+int cvp_import_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc,
+		u32 *fence)
+{
+	int rc = 0;
+	int i;
+	struct cvp_fence_type *fs;
+	struct synx_import_params params;
+	s32 h_synx;
+	struct synx_session ssid;
+
+	if (fc->signature != 0xFEEDFACE) {
+		dprintk(CVP_ERR, "%s Deprecated synx path\n", __func__);
+		return -EINVAL;
+	}
+
+	fs = (struct cvp_fence_type *)fence;
+	ssid = inst->synx_session_id;
+
+	for (i = 0; i < fc->num_fences; ++i) {
+		h_synx = fs[i].h_synx;
+
+		if (h_synx) {
+			params.h_synx = h_synx;
+			params.secure_key = fs[i].secure_key;
+			params.new_h_synx = &fc->synx[i];
+
+			rc = synx_import(ssid, &params);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s: synx_import failed\n",
+					__func__);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int cvp_release_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc)
+{
+	int rc = 0;
+	int i;
+	s32 h_synx;
+	struct synx_session ssid;
+
+	if (fc->signature != 0xFEEDFACE) {
+		dprintk(CVP_ERR, "%s deprecated synx_path\n", __func__);
+		return -EINVAL;
+	}
+
+	ssid = inst->synx_session_id;
+	for (i = 0; i < fc->num_fences; ++i) {
+		h_synx = fc->synx[i];
+		if (h_synx) {
+			rc = synx_release(ssid, h_synx);
+			if (rc)
+				dprintk(CVP_ERR,
+				"%s: synx_release %d failed\n",
+				__func__, i);
+		}
+	}
+	return rc;
+}
+
+static int cvp_cancel_synx_impl(struct msm_cvp_inst *inst,
+			enum cvp_synx_type type,
+			struct cvp_fence_command *fc)
+{
+	int rc = 0;
+	int i;
+	int h_synx;
+	struct synx_session ssid;
+	int start = 0, end = 0;
+	int synx_state = SYNX_STATE_SIGNALED_CANCEL;
+
+	ssid = inst->synx_session_id;
+
+	if (type == CVP_INPUT_SYNX) {
+		start = 0;
+		end = fc->output_index;
+	} else if (type == CVP_OUTPUT_SYNX) {
+		start = fc->output_index;
+		end = fc->num_fences;
+	} else {
+		dprintk(CVP_ERR, "%s Incorrect synx type\n", __func__);
+		return -EINVAL;
+	}
+
+	for (i = start; i < end; ++i) {
+		h_synx = fc->synx[i];
+		if (h_synx) {
+			rc = synx_signal(ssid, h_synx, synx_state);
+			if (rc) {
+				dprintk(CVP_ERR, "%s: synx_signal %d failed\n",
+				__func__, i);
+				synx_state = SYNX_STATE_SIGNALED_ERROR;
+			}
+		}
+	}
+
+	return rc;
+
+
+}
+
+int cvp_cancel_synx(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+		struct cvp_fence_command *fc)
+{
+	if (fc->signature != 0xFEEDFACE) {
+		dprintk(CVP_ERR, "%s deprecated synx path\n", __func__);
+			return -EINVAL;
+		}
+
+	return cvp_cancel_synx_impl(inst, type, fc);
+}
+
+static int cvp_wait_synx(struct synx_session ssid, u32 *synx, u32 num_synx,
+		u32 *synx_state)
+{
+	int i = 0, rc = 0;
+	unsigned long timeout_ms = 1000;
+	int h_synx;
+
+	while (i < num_synx) {
+		h_synx = synx[i];
+		if (h_synx) {
+			rc = synx_wait(ssid, h_synx, timeout_ms);
+			if (rc) {
+				*synx_state = synx_get_status(ssid, h_synx);
+				if (*synx_state == SYNX_STATE_SIGNALED_CANCEL) {
+					dprintk(CVP_SYNX,
+					"%s: synx_wait %d cancel %d state %d\n",
+					current->comm, i, rc, *synx_state);
+				} else {
+					dprintk(CVP_ERR,
+					"%s: synx_wait %d failed %d state %d\n",
+					current->comm, i, rc, *synx_state);
+					*synx_state = SYNX_STATE_SIGNALED_ERROR;
+				}
+				return rc;
+			}
+		}
+		++i;
+	}
+	return rc;
+}
+
+static int cvp_signal_synx(struct synx_session ssid, u32 *synx, u32 num_synx,
+		u32 synx_state)
+{
+	int i = 0, rc = 0;
+	int h_synx;
+
+	while (i < num_synx) {
+		h_synx = synx[i];
+		if (h_synx) {
+			rc = synx_signal(ssid, h_synx, synx_state);
+			if (rc) {
+				dprintk(CVP_ERR, "%s: synx_signal %d failed\n",
+				current->comm, i);
+				synx_state = SYNX_STATE_SIGNALED_ERROR;
+			}
+		}
+		++i;
+	}
+	return rc;
+}
+
+int cvp_synx_ops(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+		struct cvp_fence_command *fc, u32 *synx_state)
+{
+	struct synx_session ssid;
+
+	ssid = inst->synx_session_id;
+
+	if (fc->signature != 0xFEEDFACE) {
+		dprintk(CVP_ERR, "%s deprecated synx, type %d\n", __func__);
+				return -EINVAL;
+	}
+
+	if (type == CVP_INPUT_SYNX) {
+		return cvp_wait_synx(ssid, fc->synx, fc->output_index,
+				synx_state);
+	} else if (type == CVP_OUTPUT_SYNX) {
+		return cvp_signal_synx(ssid, &fc->synx[fc->output_index],
+				(fc->num_fences - fc->output_index),
+				*synx_state);
+	} else {
+		dprintk(CVP_ERR, "%s Incorrect SYNX type\n", __func__);
+		return -EINVAL;
+	}
+}
+

+ 55 - 0
msm/eva/msm_cvp_synx.h

@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_CVP_SYNX_H_
+#define _MSM_CVP_SYNX_H_
+
+#include <linux/types.h>
+#include <media/msm_cvp_private.h>
+#include "cvp_comm_def.h"
+
+
+struct cvp_fence_queue {
+	struct mutex lock;
+	enum queue_state state;
+	enum op_mode mode;
+	struct list_head wait_list;
+	wait_queue_head_t wq;
+	struct list_head sched_list;
+};
+
+struct cvp_fence_type {
+	s32 h_synx;
+	u32 secure_key;
+};
+
+struct cvp_fence_command {
+	struct list_head list;
+	u64 frame_id;
+	enum op_mode mode;
+	u32 signature;
+	u32 num_fences;
+	u32 output_index;
+	u32 type;
+	u32 synx[MAX_HFI_FENCE_SIZE/2];
+	struct cvp_hfi_cmd_session_hdr *pkt;
+};
+
+enum cvp_synx_type {
+	CVP_UINIT_SYNX,
+	CVP_INPUT_SYNX,
+	CVP_OUTPUT_SYNX,
+	CVP_INVALID_SYNX,
+};
+
+int cvp_import_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc,
+		u32 *fence);
+int cvp_release_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc);
+int cvp_cancel_synx(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+		struct cvp_fence_command *fc);
+int cvp_synx_ops(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+		struct cvp_fence_command *fc, u32 *synx_state);
+void cvp_dump_fence_queue(struct msm_cvp_inst *inst);
+#endif

+ 473 - 0
msm/eva/msm_smem.c

@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/iommu.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_resources.h"
+
+
+static int msm_dma_get_device_address(struct dma_buf *dbuf, u32 align,
+	dma_addr_t *iova, u32 flags, unsigned long ion_flags,
+	struct msm_cvp_platform_resources *res,
+	struct cvp_dma_mapping_info *mapping_info)
+{
+	int rc = 0;
+	struct dma_buf_attachment *attach;
+	struct sg_table *table = NULL;
+	struct context_bank_info *cb = NULL;
+
+	if (!dbuf || !iova || !mapping_info) {
+		dprintk(CVP_ERR, "Invalid params: %pK, %pK, %pK\n",
+			dbuf, iova, mapping_info);
+		return -EINVAL;
+	}
+
+	if (is_iommu_present(res)) {
+		cb = msm_cvp_smem_get_context_bank((flags & SMEM_SECURE),
+				res, ion_flags);
+		if (!cb) {
+			dprintk(CVP_ERR,
+				"%s: Failed to get context bank device\n",
+				 __func__);
+			rc = -EIO;
+			goto mem_map_failed;
+		}
+
+		/* Prepare a dma buf for dma on the given device */
+		attach = dma_buf_attach(dbuf, cb->dev);
+		if (IS_ERR_OR_NULL(attach)) {
+			rc = PTR_ERR(attach) ?: -ENOMEM;
+			dprintk(CVP_ERR, "Failed to attach dmabuf\n");
+			goto mem_buf_attach_failed;
+		}
+
+		/*
+		 * Get the scatterlist for the given attachment
+		 * Mapping of sg is taken care by map attachment
+		 */
+		attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP;
+		/*
+		 * We do not need dma_map function to perform cache operations
+		 * on the whole buffer size and hence pass skip sync flag.
+		 * We do the required cache operations separately for the
+		 * required buffer size
+		 */
+		attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+		if (res->sys_cache_present)
+			attach->dma_map_attrs |=
+				DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+
+		table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+		if (IS_ERR_OR_NULL(table)) {
+			rc = PTR_ERR(table) ?: -ENOMEM;
+			dprintk(CVP_ERR, "Failed to map table\n");
+			goto mem_map_table_failed;
+		}
+
+		if (table->sgl) {
+			*iova = table->sgl->dma_address;
+		} else {
+			dprintk(CVP_ERR, "sgl is NULL\n");
+			rc = -ENOMEM;
+			goto mem_map_sg_failed;
+		}
+
+		mapping_info->dev = cb->dev;
+		mapping_info->domain = cb->domain;
+		mapping_info->table = table;
+		mapping_info->attach = attach;
+		mapping_info->buf = dbuf;
+		mapping_info->cb_info = (void *)cb;
+	} else {
+		dprintk(CVP_MEM, "iommu not present, use phys mem addr\n");
+	}
+
+	return 0;
+mem_map_sg_failed:
+	dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
+mem_map_table_failed:
+	dma_buf_detach(dbuf, attach);
+mem_buf_attach_failed:
+mem_map_failed:
+	return rc;
+}
+
+static int msm_dma_put_device_address(u32 flags,
+	struct cvp_dma_mapping_info *mapping_info)
+{
+	int rc = 0;
+
+	if (!mapping_info) {
+		dprintk(CVP_WARN, "Invalid mapping_info\n");
+		return -EINVAL;
+	}
+
+	if (!mapping_info->dev || !mapping_info->table ||
+		!mapping_info->buf || !mapping_info->attach ||
+		!mapping_info->cb_info) {
+		dprintk(CVP_WARN, "Invalid params\n");
+		return -EINVAL;
+	}
+
+	dma_buf_unmap_attachment(mapping_info->attach,
+		mapping_info->table, DMA_BIDIRECTIONAL);
+	dma_buf_detach(mapping_info->buf, mapping_info->attach);
+
+	mapping_info->dev = NULL;
+	mapping_info->domain = NULL;
+	mapping_info->table = NULL;
+	mapping_info->attach = NULL;
+	mapping_info->buf = NULL;
+	mapping_info->cb_info = NULL;
+
+
+	return rc;
+}
+
+struct dma_buf *msm_cvp_smem_get_dma_buf(int fd)
+{
+	struct dma_buf *dma_buf;
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		dprintk(CVP_ERR, "Failed to get dma_buf for %d, error %ld\n",
+				fd, PTR_ERR(dma_buf));
+		dma_buf = NULL;
+	}
+
+	return dma_buf;
+}
+
+void msm_cvp_smem_put_dma_buf(void *dma_buf)
+{
+	if (!dma_buf) {
+		dprintk(CVP_ERR, "%s: NULL dma_buf\n", __func__);
+		return;
+	}
+
+	dma_buf_put((struct dma_buf *)dma_buf);
+}
+
+int msm_cvp_map_smem(struct msm_cvp_inst *inst,
+			struct msm_cvp_smem *smem,
+			const char *str)
+{
+	int rc = 0;
+
+	dma_addr_t iova = 0;
+	u32 temp = 0;
+	u32 align = SZ_4K;
+	struct dma_buf *dma_buf;
+	unsigned long ion_flags = 0;
+
+	if (!inst || !smem) {
+		dprintk(CVP_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, inst, smem);
+		return -EINVAL;
+	}
+
+	dma_buf = smem->dma_buf;
+	rc = dma_buf_get_flags(dma_buf, &ion_flags);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to get dma buf flags: %d\n", rc);
+		goto exit;
+	}
+	if (ion_flags & ION_FLAG_CACHED)
+		smem->flags |= SMEM_CACHED;
+
+	if (ion_flags & ION_FLAG_SECURE)
+		smem->flags |= SMEM_SECURE;
+
+	rc = msm_dma_get_device_address(dma_buf, align, &iova, smem->flags,
+			ion_flags, &(inst->core->resources),
+			&smem->mapping_info);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to get device address: %d\n", rc);
+		goto exit;
+	}
+	temp = (u32)iova;
+	if ((dma_addr_t)temp != iova) {
+		dprintk(CVP_ERR, "iova(%pa) truncated to %#x", &iova, temp);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	smem->size = dma_buf->size;
+	smem->device_addr = (u32)iova;
+
+	print_smem(CVP_MEM, str, inst, smem);
+	return rc;
+exit:
+	smem->device_addr = 0x0;
+	return rc;
+}
+
+int msm_cvp_unmap_smem(struct msm_cvp_inst *inst,
+		struct msm_cvp_smem *smem,
+		const char *str)
+{
+	int rc = 0;
+
+	if (!smem) {
+		dprintk(CVP_ERR, "%s: Invalid params: %pK\n", __func__, smem);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	print_smem(CVP_MEM, str, inst, smem);
+	rc = msm_dma_put_device_address(smem->flags, &smem->mapping_info);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to put device address: %d\n", rc);
+		goto exit;
+	}
+
+	smem->device_addr = 0x0;
+
+exit:
+	return rc;
+}
+
+static int alloc_dma_mem(size_t size, u32 align, u32 flags, int map_kernel,
+	struct msm_cvp_platform_resources *res, struct msm_cvp_smem *mem)
+{
+	dma_addr_t iova = 0;
+	unsigned long heap_mask = 0;
+	int rc = 0;
+	int ion_flags = 0;
+	struct dma_buf *dbuf = NULL;
+
+	if (!res) {
+		dprintk(CVP_ERR, "%s: NULL res\n", __func__);
+		return -EINVAL;
+	}
+
+	align = ALIGN(align, SZ_4K);
+	size = ALIGN(size, SZ_4K);
+
+	if (is_iommu_present(res)) {
+		if (flags & SMEM_ADSP) {
+			dprintk(CVP_MEM, "Allocating from ADSP heap\n");
+			heap_mask = ION_HEAP(ION_ADSP_HEAP_ID);
+		} else {
+			heap_mask = ION_HEAP(ION_SYSTEM_HEAP_ID);
+		}
+	} else {
+		dprintk(CVP_MEM,
+		"allocate shared memory from adsp heap size %zx align %d\n",
+		size, align);
+		heap_mask = ION_HEAP(ION_ADSP_HEAP_ID);
+	}
+
+	if (flags & SMEM_CACHED)
+		ion_flags |= ION_FLAG_CACHED;
+
+	if (flags & SMEM_NON_PIXEL)
+		ion_flags |= ION_FLAG_CP_NON_PIXEL;
+
+	if (flags & SMEM_SECURE) {
+		ion_flags |= ION_FLAG_SECURE;
+		heap_mask = ION_HEAP(ION_SECURE_HEAP_ID);
+	}
+
+	dbuf = ion_alloc(size, heap_mask, ion_flags);
+	if (IS_ERR_OR_NULL(dbuf)) {
+		dprintk(CVP_ERR,
+		"Failed to allocate shared memory = %x bytes, %llx, %x\n",
+		size, heap_mask, ion_flags);
+		rc = -ENOMEM;
+		goto fail_shared_mem_alloc;
+	}
+
+	mem->flags = flags;
+	mem->ion_flags = ion_flags;
+	mem->size = size;
+	mem->dma_buf = dbuf;
+	mem->kvaddr = NULL;
+
+	rc = msm_dma_get_device_address(dbuf, align, &iova, flags,
+			ion_flags, res, &mem->mapping_info);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to get device address: %d\n",
+			rc);
+		goto fail_device_address;
+	}
+	mem->device_addr = (u32)iova;
+	if ((dma_addr_t)mem->device_addr != iova) {
+		dprintk(CVP_ERR, "iova(%pa) truncated to %#x",
+			&iova, mem->device_addr);
+		goto fail_device_address;
+	}
+
+	if (map_kernel) {
+		dma_buf_begin_cpu_access(dbuf, DMA_BIDIRECTIONAL);
+		mem->kvaddr = dma_buf_vmap(dbuf);
+		if (!mem->kvaddr) {
+			dprintk(CVP_ERR,
+				"Failed to map shared mem in kernel\n");
+			rc = -EIO;
+			goto fail_map;
+		}
+	}
+
+	dprintk(CVP_MEM,
+		"%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK, ion_flags = %#x, flags = %#lx\n",
+		__func__, mem->dma_buf, mem->device_addr, mem->size,
+		mem->kvaddr, mem->ion_flags, mem->flags);
+	return rc;
+
+fail_map:
+	if (map_kernel)
+		dma_buf_end_cpu_access(dbuf, DMA_BIDIRECTIONAL);
+fail_device_address:
+	dma_buf_put(dbuf);
+fail_shared_mem_alloc:
+	return rc;
+}
+
+static int free_dma_mem(struct msm_cvp_smem *mem)
+{
+	dprintk(CVP_MEM,
+		"%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK, ion_flags = %#x\n",
+		__func__, mem->dma_buf, mem->device_addr, mem->size,
+		mem->kvaddr, mem->ion_flags);
+
+	if (mem->device_addr) {
+		msm_dma_put_device_address(mem->flags, &mem->mapping_info);
+		mem->device_addr = 0x0;
+	}
+
+	if (mem->kvaddr) {
+		dma_buf_vunmap(mem->dma_buf, mem->kvaddr);
+		mem->kvaddr = NULL;
+		dma_buf_end_cpu_access(mem->dma_buf, DMA_BIDIRECTIONAL);
+	}
+
+	if (mem->dma_buf) {
+		dma_buf_put(mem->dma_buf);
+		mem->dma_buf = NULL;
+	}
+
+	return 0;
+}
+
+int msm_cvp_smem_alloc(size_t size, u32 align, u32 flags, int map_kernel,
+		void *res, struct msm_cvp_smem *smem)
+{
+	int rc = 0;
+
+	if (!smem || !size) {
+		dprintk(CVP_ERR, "%s: NULL smem or %d size\n",
+			__func__, (u32)size);
+		return -EINVAL;
+	}
+
+	rc = alloc_dma_mem(size, align, flags, map_kernel,
+				(struct msm_cvp_platform_resources *)res,
+				smem);
+
+	return rc;
+}
+
+int msm_cvp_smem_free(struct msm_cvp_smem *smem)
+{
+	int rc = 0;
+
+	if (!smem) {
+		dprintk(CVP_ERR, "NULL smem passed\n");
+		return -EINVAL;
+	}
+	rc = free_dma_mem(smem);
+
+	return rc;
+};
+
+int msm_cvp_smem_cache_operations(struct dma_buf *dbuf,
+	enum smem_cache_ops cache_op, unsigned long offset, unsigned long size)
+{
+	int rc = 0;
+	unsigned long flags = 0;
+
+	if (!dbuf) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Return if buffer doesn't support caching */
+	rc = dma_buf_get_flags(dbuf, &flags);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: dma_buf_get_flags failed, err %d\n",
+			__func__, rc);
+		return rc;
+	} else if (!(flags & ION_FLAG_CACHED)) {
+		return rc;
+	}
+
+	switch (cache_op) {
+	case SMEM_CACHE_CLEAN:
+	case SMEM_CACHE_CLEAN_INVALIDATE:
+		rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_BIDIRECTIONAL,
+				offset, size);
+		if (rc)
+			break;
+		rc = dma_buf_end_cpu_access_partial(dbuf, DMA_BIDIRECTIONAL,
+				offset, size);
+		break;
+	case SMEM_CACHE_INVALIDATE:
+		rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
+				offset, size);
+		if (rc)
+			break;
+		rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
+				offset, size);
+		break;
+	default:
+		dprintk(CVP_ERR, "%s: cache (%d) operation not supported\n",
+			__func__, cache_op);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+struct context_bank_info *msm_cvp_smem_get_context_bank(bool is_secure,
+	struct msm_cvp_platform_resources *res, unsigned long ion_flags)
+{
+	struct context_bank_info *cb = NULL, *match = NULL;
+	char *search_str;
+	char *non_secure_cb = "cvp_hlos";
+	char *secure_nonpixel_cb = "cvp_sec_nonpixel";
+	char *secure_pixel_cb = "cvp_sec_pixel";
+
+	if (ion_flags & ION_FLAG_CP_PIXEL)
+		search_str = secure_pixel_cb;
+	else if (ion_flags & ION_FLAG_CP_NON_PIXEL)
+		search_str = secure_nonpixel_cb;
+	else
+		search_str = non_secure_cb;
+
+	list_for_each_entry(cb, &res->context_banks, list) {
+		if (cb->is_secure == is_secure &&
+			!strcmp(search_str, cb->name)) {
+			match = cb;
+			break;
+		}
+	}
+
+	if (!match)
+		dprintk(CVP_ERR,
+			"%s: cb not found for ion_flags %x, is_secure %d\n",
+			__func__, ion_flags, is_secure);
+
+	return match;
+}