Ver código fonte

Add 'qcom/opensource/eva-kernel/' from commit 'c1ff9cd986e7dd66ecf6b385b92ac3de85c76f4e'

git-subtree-dir: qcom/opensource/eva-kernel
git-subtree-mainline: caab746e9f7ab5e05abbc14067bbfb83b7837739
git-subtree-split: c1ff9cd986e7dd66ecf6b385b92ac3de85c76f4e
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/opensource/eva-kernel
tag: CV.LA.2.0.r1-04800-lanai.0
David Wronek 5 meses atrás
pai
commit
af64423e9f
64 arquivos alterados com 27671 adições e 0 exclusões
  1. 5 0
      qcom/opensource/eva-kernel/Android.bp
  2. 67 0
      qcom/opensource/eva-kernel/Android.mk
  3. 38 0
      qcom/opensource/eva-kernel/BUILD.bazel
  4. 17 0
      qcom/opensource/eva-kernel/Kbuild
  5. 14 0
      qcom/opensource/eva-kernel/Makefile
  6. 5 0
      qcom/opensource/eva-kernel/config/waipioeva.conf
  7. 6 0
      qcom/opensource/eva-kernel/config/waipioevaconf.h
  8. 18 0
      qcom/opensource/eva-kernel/eva_kernel_board.mk
  9. 12 0
      qcom/opensource/eva-kernel/eva_kernel_product.mk
  10. 130 0
      qcom/opensource/eva-kernel/eva_module_build.bzl
  11. 45 0
      qcom/opensource/eva-kernel/eva_modules.bzl
  12. 278 0
      qcom/opensource/eva-kernel/include/uapi/eva/media/msm_eva_private.h
  13. 69 0
      qcom/opensource/eva-kernel/msm/Kbuild
  14. 27 0
      qcom/opensource/eva-kernel/msm/Makefile
  15. 630 0
      qcom/opensource/eva-kernel/msm/eva/cvp.c
  16. 50 0
      qcom/opensource/eva-kernel/msm/eva/cvp_comm_def.h
  17. 53 0
      qcom/opensource/eva-kernel/msm/eva/cvp_core_hfi.c
  18. 302 0
      qcom/opensource/eva-kernel/msm/eva/cvp_core_hfi.h
  19. 314 0
      qcom/opensource/eva-kernel/msm/eva/cvp_dump.c
  20. 124 0
      qcom/opensource/eva-kernel/msm/eva/cvp_dump.h
  21. 150 0
      qcom/opensource/eva-kernel/msm/eva/cvp_fw_load.c
  22. 5805 0
      qcom/opensource/eva-kernel/msm/eva/cvp_hfi.c
  23. 390 0
      qcom/opensource/eva-kernel/msm/eva/cvp_hfi.h
  24. 317 0
      qcom/opensource/eva-kernel/msm/eva/cvp_hfi_api.h
  25. 511 0
      qcom/opensource/eva-kernel/msm/eva/cvp_hfi_helper.h
  26. 311 0
      qcom/opensource/eva-kernel/msm/eva/cvp_hfi_io.h
  27. 343 0
      qcom/opensource/eva-kernel/msm/eva/cvp_power.c
  28. 23 0
      qcom/opensource/eva-kernel/msm/eva/cvp_power.h
  29. 18 0
      qcom/opensource/eva-kernel/msm/eva/cvp_private.h
  30. 663 0
      qcom/opensource/eva-kernel/msm/eva/cvp_smem.c
  31. 86 0
      qcom/opensource/eva-kernel/msm/eva/eva_shared_def.h
  32. 463 0
      qcom/opensource/eva-kernel/msm/eva/hfi_packetization.c
  33. 75 0
      qcom/opensource/eva-kernel/msm/eva/hfi_packetization.h
  34. 748 0
      qcom/opensource/eva-kernel/msm/eva/hfi_response_handler.c
  35. 1708 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp.c
  36. 49 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp.h
  37. 2480 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_buf.c
  38. 244 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_buf.h
  39. 494 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_clocks.c
  40. 28 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_clocks.h
  41. 1431 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_common.c
  42. 36 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_common.h
  43. 544 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_core.c
  44. 40 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_core.h
  45. 630 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_debug.c
  46. 205 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_debug.h
  47. 2234 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_dsp.c
  48. 315 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_dsp.h
  49. 375 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_events.h
  50. 408 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_internal.h
  51. 670 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_ioctl.c
  52. 1042 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_platform.c
  53. 1265 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_res_parse.c
  54. 30 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_res_parse.h
  55. 232 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_resources.h
  56. 344 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_synx.c
  57. 74 0
      qcom/opensource/eva-kernel/msm/eva/msm_cvp_synx.h
  58. 45 0
      qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm.h
  59. 181 0
      qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_main.c
  60. 341 0
      qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_msgq.c
  61. 77 0
      qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_msgq.h
  62. 8 0
      qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_resource.c
  63. 17 0
      qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_resource.h
  64. 17 0
      qcom/opensource/eva-kernel/pineapple.bzl

+ 5 - 0
qcom/opensource/eva-kernel/Android.bp

@@ -0,0 +1,5 @@
+cc_library_headers {
+    name: "qti_eva_kernel_headers",
+    export_include_dirs: ["include/uapi/eva/media"],
+    vendor_available: true
+}

+ 67 - 0
qcom/opensource/eva-kernel/Android.mk

@@ -0,0 +1,67 @@
+ENABLE_EVA_KERNEL := true
+ifeq ($(TARGET_USES_QMAA), true)
+ifneq ($(TARGET_USES_QMAA_OVERRIDE_CVP), true)
+ENABLE_EVA_KERNEL := false
+endif
+endif
+ifeq ($(call is-board-platform-in-list,volcano),true)
+ENABLE_EVA_KERNEL := false
+endif
+ifeq ($(ENABLE_EVA_KERNEL), true)
+ifneq ($(TARGET_BOARD_PLATFORM), qssi)
+ifeq ($(call is-board-platform-in-list, $(TARGET_BOARD_PLATFORM)),true)
+
+DLKM_DIR   := device/qcom/common/dlkm
+
+LOCAL_PATH := $(call my-dir)
+# For DDK
+LOCAL_MODULE_DDK_BUILD := true
+LOCAL_MODULE_KO_DIRS := msm/msm-eva.ko
+
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE      := msm-eva.ko
+LOCAL_MODULE_KBUILD_NAME := msm/msm-eva.ko
+LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
+
+LOCAL_ADDITIONAL_DEPENDENCY      := synx-driver.ko
+
+# export to kbuild
+# Setup mmrm dependency
+LOCAL_REQUIRED_MODULES    := mmrm-module-symvers
+LOCAL_ADDITIONAL_DEPENDENCIES := $(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
+KBUILD_REQUIRED_KOS += msm-mmrm.ko
+
+# Setup SynX dependency
+CONFIG_SYNX := y
+#ifdef CONFIG_SYNX
+ifeq ($(CONFIG_SYNX), y)
+$(warning Compiling SynX)
+LOCAL_REQUIRED_MODULES    += synx-driver-symvers
+LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,synx-driver-symvers)/synx-driver-symvers
+KBUILD_REQUIRED_KOS += synx-driver.ko
+endif
+
+# Setup fastRPC dependency
+CONFIG_FASTRPC := y
+ifeq ($(CONFIG_FASTRPC), y)
+$(warning Compiling FastRPC)
+LOCAL_REQUIRED_MODULES    += dsp-module-symvers
+LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,dsp-module-symvers)/Module.symvers
+KBUILD_REQUIRED_KOS += frpc-adsprpc.ko
+endif
+
+# print out variables
+$(info KBUILD_OPTIONS = $(KBUILD_OPTIONS))
+$(info intermediates mmrm symvers path = $(call intermediates-dir-for,DLKM,mmrm-module-symvers))
+$(info LOCAL_ADDITIONAL_DEPENDENCY = $(LOCAL_ADDITIONAL_DEPENDENCY))
+$(info LOCAL_ADDITIONAL_DEPENDENCIES = $(LOCAL_ADDITIONAL_DEPENDENCIES))
+$(info LOCAL_REQUIRED_MODULES = $(LOCAL_REQUIRED_MODULES))
+$(info DLKM_DIR = $(DLKM_DIR))
+
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+endif # End of check for board platform
+endif # End of check for target product
+endif # End of enable eva kernel check

+ 38 - 0
qcom/opensource/eva-kernel/BUILD.bazel

@@ -0,0 +1,38 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+
+package(
+    default_visibility = [
+      "//visibility:public"],
+)
+
+ddk_headers(
+    name = "eva_drivers_configs",
+    hdrs  =  [
+      "config/waipioevaconf.h"
+    ],
+    includes = ["config"]
+)
+ddk_headers(
+    name = "uapi_headers",
+    hdrs = glob([
+      "include/uapi/eva/media/*.h",
+    ]),
+    includes = ["include/uapi/eva"]
+)
+ddk_headers(
+    name = "msm_headers",
+    hdrs = glob([
+      "msm/eva/*.h",
+      "msm/eva/vm/*.h",
+    ]),
+    includes = ["msm","msm/eva"]
+)
+
+ddk_headers(
+    name = "eva_drivers_headers",
+    hdrs = [":eva_drivers_configs", ":uapi_headers", ":msm_headers"]
+)
+
+
+load(":pineapple.bzl", "define_pineapple")
+define_pineapple()

+ 17 - 0
qcom/opensource/eva-kernel/Kbuild

@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+CONFIG_BUILD_VENDORSI := true
+
+# auto-detect subdirs
+ifneq ($(CONFIG_BUILD_VENDORSI), true)
+ifneq ($(CONFIG_ARCH_QTI_VM), y)
+include $(srctree)/techpack/eva/config/waipioeva.conf
+LINUXINCLUDE    += -include $(srctree)/techpack/eva/config/waipioevaconf.h
+endif
+
+LINUXINCLUDE    += -I$(srctree)/techpack/eva/include \
+                   -I$(srctree)/techpack/eva/include/uapi \
+		   -I$(srctree)/techpack/eva/include/uapi/eva
+endif
+
+obj-y +=msm/

+ 14 - 0
qcom/opensource/eva-kernel/Makefile

@@ -0,0 +1,14 @@
+KBUILD_OPTIONS+= EVA_ROOT=$(KERNEL_SRC)/$(M)
+
+all:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+
+modules_install:
+	$(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install
+
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+
+clean:
+	rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+	rm -rf .tmp_versions

+ 5 - 0
qcom/opensource/eva-kernel/config/waipioeva.conf

@@ -0,0 +1,5 @@
+ifeq ($(CONFIG_QGKI),y)
+export CONFIG_MSM_EVA=y
+else
+export CONFIG_MSM_EVA=m
+endif

+ 6 - 0
qcom/opensource/eva-kernel/config/waipioevaconf.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define CONFIG_MSM_EVA 1

+ 18 - 0
qcom/opensource/eva-kernel/eva_kernel_board.mk

@@ -0,0 +1,18 @@
+# Build eva kernel driver
+
+ENABLE_EVA_KERNEL := true
+ifeq ($(TARGET_USES_QMAA), true)
+ifneq ($(TARGET_USES_QMAA_OVERRIDE_CVP), true)
+ENABLE_EVA_KERNEL := false
+endif
+endif
+ifeq ($(TARGET_BOARD_PLATFORM),volcano)
+ENABLE_EVA_KERNEL := false
+endif
+ifeq ($(ENABLE_EVA_KERNEL), true)
+ifneq ($(TARGET_BOARD_AUTO),true)
+ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm-eva.ko
+endif
+endif
+endif

+ 12 - 0
qcom/opensource/eva-kernel/eva_kernel_product.mk

@@ -0,0 +1,12 @@
+ENABLE_EVA_KERNEL := true
+ifeq ($(TARGET_USES_QMAA), true)
+ifneq ($(TARGET_USES_QMAA_OVERRIDE_CVP), true)
+ENABLE_EVA_KERNEL := false
+endif
+endif
+ifeq ($(TARGET_BOARD_PLATFORM),volcano)
+ENABLE_EVA_KERNEL := false
+endif
+ifeq ($(ENABLE_EVA_KERNEL), true)
+PRODUCT_PACKAGES += msm-eva.ko
+endif

+ 130 - 0
qcom/opensource/eva-kernel/eva_module_build.bzl

@@ -0,0 +1,130 @@
+load(
+    "//build/kernel/kleaf:kernel.bzl",
+    "ddk_module",
+    "ddk_submodule",
+    "kernel_module",
+    "kernel_modules_install",
+)
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+
+def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps):
+    processed_config_srcs = {}
+    processed_config_deps = {}
+
+    for config_src_name in config_srcs:
+        config_src = config_srcs[config_src_name]
+
+        if type(config_src) == "list":
+            processed_config_srcs[config_src_name] = {True: config_src}
+        else:
+            processed_config_srcs[config_src_name] = config_src
+
+    for config_deps_name in config_deps:
+        config_dep = config_deps[config_deps_name]
+
+        if type(config_dep) == "list":
+            processed_config_deps[config_deps_name] = {True: config_dep}
+        else:
+            processed_config_deps[config_deps_name] = config_dep
+
+    module = struct(
+        name = name,
+        path = path,
+        srcs = srcs,
+        config_srcs = processed_config_srcs,
+        config_option = config_option,
+        deps = deps,
+        config_deps = processed_config_deps,
+    )
+
+    module_map[name] = module
+
+def _get_config_choices(map, options):
+    choices = []
+
+    for option in map:
+        choices.extend(map[option].get(option in options, []))
+
+    return choices
+
+def _get_kernel_build_options(modules, config_options):
+    all_options = {option: True for option in config_options}
+    all_options = all_options | {module.config_option: True for module in modules if module.config_option}
+
+    return all_options
+
+def _get_kernel_build_module_srcs(module, options, formatter):
+    srcs = module.srcs + _get_config_choices(module.config_srcs, options)
+    module_path = "{}/".format(module.path) if module.path else ""
+    globbed_srcs = native.glob(["{}{}".format(module_path, formatter(src)) for src in srcs])
+
+    return globbed_srcs
+
+def _get_kernel_build_module_deps(module, options, formatter):
+    deps = module.deps + _get_config_choices(module.config_deps, options)
+    deps = [formatter(dep) for dep in deps]
+
+    return deps
+
+def create_module_registry(hdrs = []):
+    module_map = {}
+
+    def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps = [], config_deps = {}):
+        _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps)
+
+    return struct(
+        register = register,
+        get = module_map.get,
+        hdrs = hdrs,
+        module_map = module_map,
+    )
+
+def define_target_variant_modules(target, variant, registry, modules, config_options = []):
+    kernel_build = "{}_{}".format(target, variant)
+    kernel_build_label = "//msm-kernel:{}".format(kernel_build)
+    modules = [registry.get(module_name) for module_name in modules]
+    options = _get_kernel_build_options(modules, config_options)
+    build_print = lambda message: print("{}: {}".format(kernel_build, message))
+    formatter = lambda s: s.replace("%b", kernel_build).replace("%t", target)
+
+    headers = ["//msm-kernel:all_headers"] + registry.hdrs
+    all_module_rules = []
+
+    for module in modules:
+        rule_name = "{}_{}".format(kernel_build, module.name)
+        module_srcs = _get_kernel_build_module_srcs(module, options, formatter)
+
+        if not module_srcs:
+            continue
+
+        ddk_submodule(
+            name = rule_name,
+            srcs = module_srcs,
+            out = "{}.ko".format(module.name),
+            copts = ["-Wno-format"],
+            deps = headers + _get_kernel_build_module_deps(module, options, formatter),
+            local_defines = options.keys(),
+        )
+
+        all_module_rules.append(rule_name)
+
+    ddk_module(
+        name = "{}_modules".format(kernel_build),
+        kernel_build = kernel_build_label,
+        deps = all_module_rules,
+    )
+
+    copy_to_dist_dir(
+        name = "{}_modules_dist".format(kernel_build),
+        data = [":{}_modules".format(kernel_build)],
+        dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(kernel_build),
+        flat = True,
+        wipe_dist_dir = False,
+        allow_duplicate_filenames = False,
+        mode_overrides = {"**/*": "644"},
+    )
+
+
+def define_consolidate_gki_modules(target, registry, modules, config_options = []):
+    define_target_variant_modules(target, "consolidate", registry, modules, config_options)
+    define_target_variant_modules(target, "gki", registry, modules, config_options)

+ 45 - 0
qcom/opensource/eva-kernel/eva_modules.bzl

@@ -0,0 +1,45 @@
+load(":eva_module_build.bzl", "create_module_registry")
+
+EVA_KERNEL_ROOT = "eva-kernel"
+
+eva_modules = create_module_registry([":eva_drivers_headers"])
+register_eva_module = eva_modules.register
+
+register_eva_module(
+    name = "msm-eva",
+    path = "msm",
+    srcs = [
+        "eva/cvp.c",
+        "eva/cvp_core_hfi.c",
+        "eva/cvp_dump.c",
+        "eva/cvp_fw_load.c",
+        "eva/cvp_hfi.c",
+        "eva/cvp_power.c",
+        "eva/cvp_smem.c",
+        "eva/hfi_packetization.c",
+        "eva/hfi_response_handler.c",
+        "eva/msm_cvp.c",
+        "eva/msm_cvp_buf.c",
+        "eva/msm_cvp_clocks.c",
+        "eva/msm_cvp_common.c",
+        "eva/msm_cvp_core.c",
+        "eva/msm_cvp_debug.c",
+        "eva/msm_cvp_dsp.c",
+        "eva/msm_cvp_ioctl.c",
+        "eva/msm_cvp_platform.c",
+        "eva/msm_cvp_res_parse.c",
+        "eva/msm_cvp_synx.c",
+        "eva/vm/cvp_vm_main.c",
+        "eva/vm/cvp_vm_msgq.c",
+        "eva/vm/cvp_vm_resource.c",
+    ],
+    config_deps = {
+       "TARGET_SYNX_ENABLE": [
+           "//vendor/qcom/opensource/synx-kernel:synx_headers",
+           "//vendor/qcom/opensource/synx-kernel:%b_modules"
+        ],
+        "TARGET_DSP_ENABLE": [
+             "//vendor/qcom/opensource/dsp-kernel:%b_frpc-adsprpc"
+        ],
+    },
+)

+ 278 - 0
qcom/opensource/eva-kernel/include/uapi/eva/media/msm_eva_private.h

@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __MSM_EVA_PRIVATE_H__
+#define __MSM_EVA_PRIVATE_H__
+
+#include <linux/types.h>
+
+/* Commands type */
+#define EVA_KMD_CMD_BASE		0x10000000
+#define EVA_KMD_CMD_START		(EVA_KMD_CMD_BASE + 0x1000)
+
+/*
+ * userspace clients pass one of the below arguments type
+ * in struct eva_kmd_arg (@type field).
+ */
+
+/*
+ * EVA_KMD_GET_SESSION_INFO - this argument type is used to
+ *          get the session information from driver. it passes
+ *          struct eva_kmd_session_info {}
+ */
+#define EVA_KMD_GET_SESSION_INFO	(EVA_KMD_CMD_START + 1)
+
+/*
+ * EVA_KMD_REGISTER_BUFFER - this argument type is used to
+ *          register the buffer to driver. it passes
+ *          struct eva_kmd_buffer {}
+ */
+#define EVA_KMD_REGISTER_BUFFER		(EVA_KMD_CMD_START + 3)
+
+/*
+ * EVA_KMD_REGISTER_BUFFER - this argument type is used to
+ *          unregister the buffer to driver. it passes
+ *          struct eva_kmd_buffer {}
+ */
+#define EVA_KMD_UNREGISTER_BUFFER	(EVA_KMD_CMD_START + 4)
+
+#define EVA_KMD_UPDATE_POWER	(EVA_KMD_CMD_START + 17)
+
+#define EVA_KMD_SEND_CMD_PKT	(EVA_KMD_CMD_START + 64)
+
+#define EVA_KMD_RECEIVE_MSG_PKT	 (EVA_KMD_CMD_START + 65)
+
+#define EVA_KMD_SET_SYS_PROPERTY	(EVA_KMD_CMD_START + 66)
+
+#define EVA_KMD_GET_SYS_PROPERTY	(EVA_KMD_CMD_START + 67)
+
+#define EVA_KMD_SESSION_CONTROL		(EVA_KMD_CMD_START + 68)
+
+#define EVA_KMD_SEND_FENCE_CMD_PKT	(EVA_KMD_CMD_START + 69)
+
+#define EVA_KMD_FLUSH_ALL	(EVA_KMD_CMD_START + 70)
+
+#define EVA_KMD_FLUSH_FRAME	(EVA_KMD_CMD_START + 71)
+
+/* flags */
+#define EVA_KMD_FLAG_UNSECURE			0x00000000
+#define EVA_KMD_FLAG_SECURE			0x00000001
+
+/* buffer type */
+#define EVA_KMD_BUFTYPE_INPUT			0x00000001
+#define EVA_KMD_BUFTYPE_OUTPUT			0x00000002
+#define EVA_KMD_BUFTYPE_INTERNAL_1		0x00000003
+#define EVA_KMD_BUFTYPE_INTERNAL_2		0x00000004
+
+
+/**
+ * struct eva_kmd_session_info - session information
+ * @session_id:    current session id
+ */
+struct eva_kmd_session_info {
+	__u32 session_id;
+	__u32 reserved[10];
+};
+
+/**
+ * struct eva_kmd_buffer - buffer information to be registered
+ * @index:         index of buffer
+ * @type:          buffer type
+ * @fd:            file descriptor of buffer
+ * @size:          allocated size of buffer
+ * @offset:        offset in fd from where usable data starts
+ * @pixelformat:   fourcc format
+ * @flags:         buffer flags
+ */
+struct eva_kmd_buffer {
+	__u32 index;
+	__u32 type;
+	__u32 fd;
+	__u32 size;
+	__u32 offset;
+	__u32 pixelformat;
+	__u32 flags;
+	__u32 reserved[5];
+};
+
+/**
+ * struct eva_kmd_send_cmd - sending generic HFI command
+ * @cmd_address_fd:   file descriptor of cmd_address
+ * @cmd_size:         allocated size of buffer
+ */
+struct eva_kmd_send_cmd {
+	__u32 cmd_address_fd;
+	__u32 cmd_size;
+	__u32 reserved[10];
+};
+
+/**
+ * struct eva_kmd_client_data - store generic client
+ *                              data
+ * @transactionid:  transaction id
+ * @client_data1:   client data to be used during callback
+ * @client_data2:   client data to be used during callback
+ */
+struct eva_kmd_client_data {
+	__u32 transactionid;
+	__u32 client_data1;
+	__u32 client_data2;
+};
+
+/**
+ * Structures and macros for KMD arg data
+ */
+
+#define	MAX_HFI_PKT_SIZE	490
+
+struct eva_kmd_hfi_packet {
+	__u32 pkt_data[MAX_HFI_PKT_SIZE];
+	void *oob_buf;
+};
+
+#define EVA_KMD_PROP_HFI_VERSION	1
+#define EVA_KMD_PROP_SESSION_TYPE	2
+#define EVA_KMD_PROP_SESSION_KERNELMASK	3
+#define EVA_KMD_PROP_SESSION_PRIORITY	4
+#define EVA_KMD_PROP_SESSION_SECURITY	5
+#define EVA_KMD_PROP_SESSION_DSPMASK	6
+#define EVA_KMD_PROP_SESSION_DUMPOFFSET	7
+#define EVA_KMD_PROP_SESSION_DUMPSIZE	8
+#define EVA_KMD_PROP_SESSION_ERROR	9
+
+#define EVA_KMD_PROP_PWR_FDU	0x10
+#define EVA_KMD_PROP_PWR_ICA	0x11
+#define EVA_KMD_PROP_PWR_OD	0x12
+#define EVA_KMD_PROP_PWR_MPU	0x13
+#define EVA_KMD_PROP_PWR_FW	0x14
+#define EVA_KMD_PROP_PWR_DDR	0x15
+#define EVA_KMD_PROP_PWR_SYSCACHE	0x16
+#define EVA_KMD_PROP_PWR_FDU_OP	0x17
+#define EVA_KMD_PROP_PWR_ICA_OP	0x18
+#define EVA_KMD_PROP_PWR_OD_OP	0x19
+#define EVA_KMD_PROP_PWR_MPU_OP	0x1A
+#define EVA_KMD_PROP_PWR_FW_OP	0x1B
+#define EVA_KMD_PROP_PWR_DDR_OP	0x1C
+#define EVA_KMD_PROP_PWR_SYSCACHE_OP	0x1D
+#define EVA_KMD_PROP_PWR_FPS_FDU	0x1E
+#define EVA_KMD_PROP_PWR_FPS_MPU	0x1F
+#define EVA_KMD_PROP_PWR_FPS_OD	0x20
+#define EVA_KMD_PROP_PWR_FPS_ICA	0x21
+
+#define EVA_KMD_PROP_PWR_VADL 0x22
+#define EVA_KMD_PROP_PWR_VADL_OP 0x23
+#define EVA_KMD_PROP_PWR_FPS_VADL 0x24
+
+#define EVA_KMD_PROP_PWR_TOF 0x25
+#define EVA_KMD_PROP_PWR_TOF_OP 0x26
+#define EVA_KMD_PROP_PWR_FPS_TOF 0x27
+
+#define EVA_KMD_PROP_PWR_RGE 0x28
+#define EVA_KMD_PROP_PWR_RGE_OP 0x29
+#define EVA_KMD_PROP_PWR_FPS_RGE 0x2A
+
+#define EVA_KMD_PROP_PWR_XRA 0x2B
+#define EVA_KMD_PROP_PWR_XRA_OP 0x2C
+#define EVA_KMD_PROP_PWR_FPS_XRA 0x2D
+
+#define EVA_KMD_PROP_PWR_LSR 0x2E
+#define EVA_KMD_PROP_PWR_LSR_OP 0x2F
+#define EVA_KMD_PROP_PWR_FPS_LSR 0x30
+
+
+#define MAX_KMD_PROP_NUM_PER_PACKET		64
+#define MAX_KMD_PROP_TYPE	(EVA_KMD_PROP_PWR_FPS_ICA + 1)
+
+struct eva_kmd_sys_property {
+	__u32 prop_type;
+	__u32 data;
+};
+
+struct eva_kmd_sys_properties {
+	__u32 prop_num;
+	struct eva_kmd_sys_property prop_data[MAX_KMD_PROP_NUM_PER_PACKET];
+};
+
+#define SESSION_CREATE	1
+#define SESSION_DELETE	2
+#define SESSION_START	3
+#define SESSION_STOP	4
+#define SESSION_INFO	5
+
+struct eva_kmd_session_control {
+	__u32 ctrl_type;
+	__u32 ctrl_data[8];
+};
+
+#define MAX_HFI_FENCE_SIZE	64
+#define MAX_HFI_FENCE_OFFSET	MAX_HFI_PKT_SIZE
+struct eva_kmd_hfi_fence_packet {
+	__u32 pkt_data[MAX_HFI_FENCE_OFFSET];
+	__u32 fence_data[MAX_HFI_FENCE_SIZE];
+	__u64 frame_id;
+};
+
+struct eva_kmd_fence {
+	__u32 h_synx;
+};
+
+struct eva_kmd_fence_ctrl {
+	__u32 magic;
+	__u32 reserved;
+	__u64 frame_id;
+	__u32 num_fences;
+	__u32 output_index;
+	struct eva_kmd_fence fences[MAX_HFI_FENCE_SIZE];
+};
+
+#define MAX_FENCE_DATA_SIZE	(MAX_HFI_FENCE_SIZE + 6)
+
+struct eva_kmd_hfi_synx_packet {
+	__u32 pkt_data[MAX_HFI_PKT_SIZE];
+	union {
+		__u32 fence_data[MAX_FENCE_DATA_SIZE];
+		struct eva_kmd_fence_ctrl fc;
+	};
+	struct eva_kmd_oob_buf* oob_buf;
+};
+
+/**
+ * struct eva_kmd_arg
+ *
+ * @type:          command type
+ * @buf_offset:    offset to buffer list in the command
+ * @buf_num:       number of buffers in the command
+ * @session:       session information
+ * @req_power:     power information
+ * @regbuf:        buffer to be registered
+ * @unregbuf:      buffer to be unregistered
+ * @send_cmd:      sending generic HFI command
+
+ * @hfi_pkt:       HFI packet created by user library
+ * @sys_properties System properties read or set by user library
+ * @hfi_fence_pkt: HFI fence packet created by user library
+ */
+struct eva_kmd_arg {
+	__u32 type;
+	__u32 buf_offset;
+	__u32 buf_num;
+	union eva_data_t {
+		struct eva_kmd_session_info session;
+		struct eva_kmd_buffer regbuf;
+		struct eva_kmd_buffer unregbuf;
+		struct eva_kmd_send_cmd send_cmd;
+		struct eva_kmd_hfi_packet hfi_pkt;
+		struct eva_kmd_sys_properties sys_properties;
+		struct eva_kmd_hfi_fence_packet hfi_fence_pkt;
+		struct eva_kmd_hfi_synx_packet hfi_synx_pkt;
+		struct eva_kmd_session_control session_ctrl;
+		__u64 frame_id;
+	} data;
+};
+
+struct eva_kmd_request_power {
+	__u32 deprecated;
+};
+#endif

+ 69 - 0
qcom/opensource/eva-kernel/msm/Kbuild

@@ -0,0 +1,69 @@
+LINUXINCLUDE    += -I$(EVA_ROOT)/include \
+                   -I$(EVA_ROOT)/include/uapi \
+				   -I$(EVA_ROOT)/include/uapi/eva
+
+#srctree is /kernel_platform/common/
+
+ccflags-y += -I$(EVA_ROOT)/msm/eva/ \
+    -I$(srctree)/drivers/media/platform/msm/synx/
+
+# add flag to compile mmrm actual implementatio instead of stub version.
+# to follow up with mmrm team if techpack users need to define this for long term?
+KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM
+
+# ported from Android.mk
+$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS))
+
+ifeq ($(CONFIG_ARCH_WAIPIO), y)
+$(info within KBUILD file CONFIG_ARCH_WAIPIO = $(CONFIG_ARCH_WAIPIO))
+# include $(EVA_ROOT)/config/waipio.mk
+KBUILD_CPPFLAGS += -DCONFIG_EVA_WAIPIO=1
+ccflags-y += -DCONFIG_EVA_WAIPIO=1
+endif
+
+ifeq ($(CONFIG_ARCH_KALAMA), y)
+$(info within KBUILD file CONFIG_ARCH_KALAMA = $(CONFIG_ARCH_KALAMA))
+# include $(EVA_ROOT)/config/waipio.mk
+KBUILD_CPPFLAGS += -DCONFIG_EVA_KALAMA=1
+ccflags-y += -DCONFIG_EVA_KALAMA=1
+endif
+
+ifeq ($(CONFIG_ARCH_PINEAPPLE), y)
+$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE))
+KBUILD_CPPFLAGS += -DCONFIG_EVA_PINEAPPLE=1 -DCVP_CONFIG_SYNX_V2=1
+ccflags-y += -DCONFIG_EVA_PINEAPPLE=1 -DCVP_CONFIG_SYNX_V2=1
+ccflags-y += -I$(EVA_ROOT)/../synx-kernel/msm/synx/ \
+    -I$(EVA_ROOT)/../dsp-kernel/include/ \
+    -I$(EVA_ROOT)/../synx-kernel/include/uapi/synx/media/
+endif
+
+
+ifeq ($(CONFIG_EVA_LE), 1)
+ccflags-y += -DCONFIG_EVA_TVM=1
+endif
+
+msm-eva-objs := eva/cvp.o \
+        eva/msm_cvp_ioctl.o \
+        eva/msm_cvp_platform.o \
+        eva/msm_cvp_common.o \
+        eva/msm_cvp_core.o \
+        eva/msm_cvp.o \
+        eva/cvp_smem.o \
+        eva/msm_cvp_debug.o \
+        eva/msm_cvp_res_parse.o \
+        eva/cvp_dump.o \
+        eva/cvp_hfi.o \
+        eva/hfi_response_handler.o \
+        eva/hfi_packetization.o \
+        eva/cvp_core_hfi.o \
+        eva/msm_cvp_clocks.o\
+        eva/msm_cvp_dsp.o \
+        eva/msm_cvp_buf.o \
+        eva/msm_cvp_synx.o \
+	eva/cvp_fw_load.o \
+	eva/cvp_power.o \
+	eva/vm/cvp_vm_main.o \
+	eva/vm/cvp_vm_msgq.o \
+	eva/vm/cvp_vm_resource.o
+obj-m += msm-eva.o
+

+ 27 - 0
qcom/opensource/eva-kernel/msm/Makefile

@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -I$(srctree)/techpack/eva/msm/eva/ \
+    -I$(srctree)/drivers/media/platform/msm/synx/
+
+msm-eva-objs := eva/cvp.o \
+                eva/msm_cvp_ioctl.o \
+                eva/msm_cvp_platform.o \
+                eva/msm_cvp_common.o \
+                eva/msm_cvp_core.o \
+                eva/msm_cvp.o \
+                eva/msm_smem.o \
+                eva/msm_cvp_debug.o \
+                eva/msm_cvp_res_parse.o \
+                eva/cvp_dump.o \
+                eva/cvp_hfi.o \
+                eva/hfi_response_handler.o \
+                eva/hfi_packetization.o \
+                eva/cvp_core_hfi.o \
+                eva/msm_cvp_clocks.o\
+                eva/msm_cvp_dsp.o \
+                eva/msm_cvp_buf.o \
+                eva/msm_cvp_synx.o \
+		eva/cvp_fw_load.o \
+		eva/cvp_power.o
+
+obj-$(CONFIG_MSM_EVA) := msm-eva.o
+

+ 630 - 0
qcom/opensource/eva-kernel/msm/eva/cvp.c

@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp_common.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_internal.h"
+#include "msm_cvp_res_parse.h"
+#include "msm_cvp_resources.h"
+#include "msm_cvp_buf.h"
+#include "cvp_hfi_api.h"
+#include "cvp_private.h"
+#include "msm_cvp_clocks.h"
+#include "msm_cvp_dsp.h"
+#include "msm_cvp.h"
+#include "vm/cvp_vm.h"
+
+#define CLASS_NAME              "cvp"
+#define DRIVER_NAME             "cvp"
+
+struct msm_cvp_drv *cvp_driver;
+
+static int cvp_open(struct inode *inode, struct file *filp)
+{
+	struct msm_cvp_inst *inst;
+
+	dprintk(CVP_SESS, "%s\n", __func__);
+
+	inst = msm_cvp_open(MSM_CVP_USER, current);
+	if (!inst) {
+		dprintk(CVP_ERR, "Failed to create cvp instance\n");
+		return -ENOMEM;
+	}
+	filp->private_data = inst;
+	return 0;
+}
+
+static int cvp_close(struct inode *inode, struct file *filp)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst = filp->private_data;
+
+	rc = msm_cvp_close(inst);
+	filp->private_data = NULL;
+	return rc;
+}
+
+static unsigned int cvp_poll(struct file *filp, struct poll_table_struct *p)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst = filp->private_data;
+	unsigned long flags = 0;
+
+	poll_wait(filp, &inst->event_handler.wq, p);
+
+	spin_lock_irqsave(&inst->event_handler.lock, flags);
+	if (inst->event_handler.event == CVP_SSR_EVENT)
+		rc |= POLLPRI;
+	if (inst->event_handler.event == CVP_DUMP_EVENT)
+		rc |= POLLIN;
+	inst->event_handler.event = CVP_NO_EVENT;
+	spin_unlock_irqrestore(&inst->event_handler.lock, flags);
+
+	return rc;
+}
+
+static const struct file_operations cvp_fops = {
+	.owner = THIS_MODULE,
+	.open = cvp_open,
+	.release = cvp_close,
+	.unlocked_ioctl = cvp_unblocked_ioctl,
+	.compat_ioctl = cvp_compat_ioctl,
+	.poll = cvp_poll,
+};
+
+static int read_platform_resources(struct msm_cvp_core *core,
+		struct platform_device *pdev)
+{
+	int rc = 0;
+
+	if (!core || !pdev) {
+		dprintk(CVP_ERR, "%s: Invalid params %pK %pK\n",
+			__func__, core, pdev);
+		return -EINVAL;
+	}
+
+	core->hfi_type = CVP_HFI_IRIS;
+	core->resources.pdev = pdev;
+	if (pdev->dev.of_node) {
+		/* Target supports DT, parse from it */
+		rc = cvp_read_platform_resources_from_drv_data(core);
+		rc = cvp_read_platform_resources_from_dt(&core->resources);
+	} else {
+		dprintk(CVP_ERR, "pdev node is NULL\n");
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int msm_cvp_initialize_core(struct platform_device *pdev,
+				struct msm_cvp_core *core)
+{
+	int i = 0;
+	int rc = 0;
+
+	if (!core)
+		return -EINVAL;
+	rc = read_platform_resources(core, pdev);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to get platform resources\n");
+		return rc;
+	}
+
+	INIT_LIST_HEAD(&core->instances);
+	mutex_init(&core->lock);
+	mutex_init(&core->clk_lock);
+
+	core->state = CVP_CORE_UNINIT;
+	for (i = SYS_MSG_INDEX(SYS_MSG_START);
+		i <= SYS_MSG_INDEX(SYS_MSG_END); i++) {
+		init_completion(&core->completions[i]);
+	}
+
+	INIT_WORK(&core->ssr_work, msm_cvp_ssr_handler);
+	core->ssr_count = 0;
+
+	return rc;
+}
+
+static ssize_t link_name_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct msm_cvp_core *core = dev_get_drvdata(dev);
+
+	if (core)
+		if (dev == core->dev)
+			return snprintf(buf, PAGE_SIZE, "msm_cvp\n");
+		else
+			return 0;
+	else
+		return 0;
+}
+
+static DEVICE_ATTR_RO(link_name);
+
+static ssize_t pwr_collapse_delay_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long val = 0;
+	int rc = 0;
+	struct msm_cvp_core *core = NULL;
+
+	rc = kstrtoul(buf, 0, &val);
+	if (rc)
+		return rc;
+	else if (!val)
+		return -EINVAL;
+
+	core = cvp_driver->cvp_core;
+	if (!core)
+		return -EINVAL;
+	core->resources.msm_cvp_pwr_collapse_delay = val;
+	return count;
+}
+
+static ssize_t pwr_collapse_delay_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct msm_cvp_core *core = NULL;
+
+	core = cvp_driver->cvp_core;
+	if (!core)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		core->resources.msm_cvp_pwr_collapse_delay);
+}
+
+static DEVICE_ATTR_RW(pwr_collapse_delay);
+
+static ssize_t thermal_level_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", cvp_driver->thermal_level);
+}
+
+static ssize_t thermal_level_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int rc = 0, val = 0;
+
+	rc = kstrtoint(buf, 0, &val);
+	if (rc || val < 0) {
+		dprintk(CVP_WARN,
+			"Invalid thermal level value: %s\n", buf);
+		return -EINVAL;
+	}
+	dprintk(CVP_PWR, "Thermal level old %d new %d\n",
+			cvp_driver->thermal_level, val);
+
+	if (val == cvp_driver->thermal_level)
+		return count;
+	cvp_driver->thermal_level = val;
+
+	msm_cvp_comm_handle_thermal_event();
+	return count;
+}
+
+static DEVICE_ATTR_RW(thermal_level);
+
+static ssize_t sku_version_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d",
+			cvp_driver->sku_version);
+}
+
+static DEVICE_ATTR_RO(sku_version);
+
+static ssize_t boot_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	int rc = 0, val = 0;
+	static int booted;
+
+	rc = kstrtoint(buf, 0, &val);
+	if (rc || val < 0) {
+		dprintk(CVP_WARN,
+			"Invalid boot value: %s\n", buf);
+		return -EINVAL;
+	}
+
+	if (val == 1 && booted == 0) {
+		struct msm_cvp_inst *inst;
+
+		inst = msm_cvp_open(MSM_CVP_BOOT, current);
+		if (!inst) {
+			dprintk(CVP_ERR,
+			"Failed to create cvp instance\n");
+			return -ENOMEM;
+		}
+		rc = msm_cvp_close(inst);
+		if (rc) {
+			dprintk(CVP_ERR,
+			"Failed to close cvp instance\n");
+			return rc;
+		}
+	} else if (val == 2) {
+		struct msm_cvp_inst *inst;
+
+		inst = msm_cvp_open(MSM_CVP_USER, current);
+		if (!inst) {
+			dprintk(CVP_ERR,
+			"Failed to create eva instance\n");
+			return -ENOMEM;
+		}
+		rc = msm_cvp_session_create(inst);
+		if (rc)
+			dprintk(CVP_ERR, "Failed to create eva session\n");
+
+		rc = msm_cvp_close(inst);
+		if (rc) {
+			dprintk(CVP_ERR,
+			"Failed to close eva instance\n");
+			return rc;
+		}
+	}
+	booted = 1;
+	return count;
+}
+
+static DEVICE_ATTR_WO(boot);
+
+static struct attribute *msm_cvp_core_attrs[] = {
+		&dev_attr_pwr_collapse_delay.attr,
+		&dev_attr_thermal_level.attr,
+		&dev_attr_sku_version.attr,
+		&dev_attr_link_name.attr,
+		&dev_attr_boot.attr,
+		NULL
+};
+
+static struct attribute_group msm_cvp_core_attr_group = {
+		.attrs = msm_cvp_core_attrs,
+};
+
+static const struct of_device_id msm_cvp_plat_match[] = {
+	{.compatible = "qcom,msm-cvp"},
+	{.compatible = "qcom,msm-cvp,context-bank"},
+	{.compatible = "qcom,msm-cvp,bus"},
+	{.compatible = "qcom,msm-cvp,mem-cdsp"},
+	{}
+};
+
+static int msm_probe_cvp_device(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+
+	if (!cvp_driver) {
+		dprintk(CVP_ERR, "Invalid cvp driver\n");
+		return -EINVAL;
+	}
+
+	core = kzalloc(sizeof(*core), GFP_KERNEL);
+	if (!core)
+		return -ENOMEM;
+
+	core->platform_data = cvp_get_drv_data(&pdev->dev);
+	dev_set_drvdata(&pdev->dev, core);
+	rc = msm_cvp_initialize_core(pdev, core);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init core\n");
+		goto err_core_init;
+	}
+
+	rc = alloc_chrdev_region(&core->dev_num, 0, 1, DRIVER_NAME);
+	if (rc < 0) {
+		dprintk(CVP_ERR, "alloc_chrdev_region failed: %d\n",
+				rc);
+		goto err_alloc_chrdev;
+	}
+
+	core->class = class_create(THIS_MODULE, CLASS_NAME);
+	if (IS_ERR(core->class)) {
+		rc = PTR_ERR(core->class);
+		dprintk(CVP_ERR, "class_create failed: %d\n",
+				rc);
+		goto err_class_create;
+	}
+
+	core->dev = device_create(core->class, NULL,
+		core->dev_num, NULL, DRIVER_NAME);
+	if (IS_ERR(core->dev)) {
+		rc = PTR_ERR(core->dev);
+		dprintk(CVP_ERR, "device_create failed: %d\n",
+				rc);
+		goto err_device_create;
+	}
+	dev_set_drvdata(core->dev, core);
+
+	cdev_init(&core->cdev, &cvp_fops);
+	rc = cdev_add(&core->cdev,
+			MKDEV(MAJOR(core->dev_num), 0), 1);
+	if (rc < 0) {
+		dprintk(CVP_ERR, "cdev_add failed: %d\n",
+				rc);
+		goto error_cdev_add;
+	}
+
+	rc = sysfs_create_group(&core->dev->kobj, &msm_cvp_core_attr_group);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"Failed to create attributes\n");
+		goto err_cores_exceeded;
+	}
+
+	/* VM manager shall be started before HFI init */
+	vm_manager.vm_ops->vm_start(core);
+
+	core->dev_ops = cvp_hfi_initialize(core->hfi_type,
+				&core->resources, &cvp_handle_cmd_response);
+	if (IS_ERR_OR_NULL(core->dev_ops)) {
+		mutex_lock(&cvp_driver->lock);
+		mutex_unlock(&cvp_driver->lock);
+
+		rc = PTR_ERR(core->dev_ops) ?: -EBADHANDLE;
+		if (rc != -EPROBE_DEFER)
+			dprintk(CVP_ERR, "Failed to create HFI device\n");
+		else
+			dprintk(CVP_CORE, "msm_cvp: request probe defer\n");
+		goto err_hfi_initialize;
+	}
+
+	cvp_synx_ftbl_init(core);
+
+	mutex_lock(&cvp_driver->lock);
+	cvp_driver->cvp_core = core;
+	mutex_unlock(&cvp_driver->lock);
+
+	cvp_driver->debugfs_root = msm_cvp_debugfs_init_drv();
+	if (!cvp_driver->debugfs_root)
+		dprintk(CVP_ERR, "Failed to create debugfs for msm_cvp\n");
+
+	core->debugfs_root = msm_cvp_debugfs_init_core(
+		core, cvp_driver->debugfs_root);
+
+	cvp_driver->sku_version = core->resources.sku_version;
+
+	dprintk(CVP_CORE, "populating sub devices\n");
+	/*
+	 * Trigger probe for each sub-device i.e. qcom,msm-cvp,context-bank.
+	 * When msm_cvp_probe is called for each sub-device, parse the
+	 * context-bank details and store it in core->resources.context_banks
+	 * list.
+	 */
+	rc = of_platform_populate(pdev->dev.of_node, msm_cvp_plat_match, NULL,
+			&pdev->dev);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to trigger probe for sub-devices\n");
+		goto err_fail_sub_device_probe;
+	}
+
+	atomic64_set(&core->kernel_trans_id, ARRAY_SIZE(cvp_hfi_defs));
+
+	if (core->resources.dsp_enabled) {
+		rc = cvp_dsp_device_init();
+		if (rc)
+			dprintk(CVP_WARN, "Failed to initialize DSP driver\n");
+	} else {
+		dprintk(CVP_DSP, "DSP interface not enabled\n");
+	}
+
+	return rc;
+
+err_fail_sub_device_probe:
+	cvp_hfi_deinitialize(core->hfi_type, core->dev_ops);
+	debugfs_remove_recursive(cvp_driver->debugfs_root);
+err_hfi_initialize:
+err_cores_exceeded:
+	cdev_del(&core->cdev);
+error_cdev_add:
+	device_destroy(core->class, core->dev_num);
+err_device_create:
+	class_destroy(core->class);
+err_class_create:
+	unregister_chrdev_region(core->dev_num, 1);
+err_alloc_chrdev:
+	sysfs_remove_group(&pdev->dev.kobj, &msm_cvp_core_attr_group);
+err_core_init:
+	dev_set_drvdata(&pdev->dev, NULL);
+	kfree(core);
+	return rc;
+}
+
+static int msm_cvp_probe_mem_cdsp(struct platform_device *pdev)
+{
+	return cvp_read_mem_cdsp_resources_from_dt(pdev);
+}
+
+static int msm_cvp_probe_context_bank(struct platform_device *pdev)
+{
+	return cvp_read_context_bank_resources_from_dt(pdev);
+}
+
+static int msm_cvp_probe_bus(struct platform_device *pdev)
+{
+	return cvp_read_bus_resources_from_dt(pdev);
+}
+
+static int msm_cvp_probe(struct platform_device *pdev)
+{
+	/*
+	 * Sub devices probe will be triggered by of_platform_populate() towards
+	 * the end of the probe function after msm-cvp device probe is
+	 * completed. Return immediately after completing sub-device probe.
+	 */
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cvp")) {
+		return msm_probe_cvp_device(pdev);
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+		"qcom,msm-cvp,bus")) {
+		return msm_cvp_probe_bus(pdev);
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+		"qcom,msm-cvp,context-bank")) {
+		return msm_cvp_probe_context_bank(pdev);
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+		"qcom,msm-cvp,mem-cdsp")) {
+		return msm_cvp_probe_mem_cdsp(pdev);
+	}
+
+	/* How did we end up here? */
+	MSM_CVP_ERROR(1);
+	return -EINVAL;
+}
+
+static int msm_cvp_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+
+	if (!pdev) {
+		dprintk(CVP_ERR, "%s invalid input %pK", __func__, pdev);
+		return -EINVAL;
+	}
+
+	core = dev_get_drvdata(&pdev->dev);
+	if (!core) {
+		dprintk(CVP_ERR, "%s invalid core", __func__);
+		return -EINVAL;
+	}
+
+	cvp_hfi_deinitialize(core->hfi_type, core->dev_ops);
+	msm_cvp_free_platform_resources(&core->resources);
+	sysfs_remove_group(&pdev->dev.kobj, &msm_cvp_core_attr_group);
+	dev_set_drvdata(&pdev->dev, NULL);
+	mutex_destroy(&core->lock);
+	mutex_destroy(&core->clk_lock);
+	kfree(core);
+	return rc;
+}
+
+static int msm_cvp_pm_suspend(struct device *dev)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+
+	/*
+	 * Bail out if
+	 * - driver possibly not probed yet
+	 * - not the main device. We don't support power management on
+	 *   subdevices (e.g. context banks)
+	 */
+	if (!dev || !dev->driver ||
+		!of_device_is_compatible(dev->of_node, "qcom,msm-cvp"))
+		return 0;
+
+	core = dev_get_drvdata(dev);
+	if (!core) {
+		dprintk(CVP_ERR, "%s invalid core\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_cvp_suspend();
+	if (rc == -ENOTSUPP)
+		rc = 0;
+	else if (rc)
+		dprintk(CVP_WARN, "Failed to suspend: %d\n", rc);
+
+
+	return rc;
+}
+
+static int msm_cvp_pm_resume(struct device *dev)
+{
+	dprintk(CVP_INFO, "%s\n", __func__);
+	return 0;
+}
+
+static const struct dev_pm_ops msm_cvp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(msm_cvp_pm_suspend, msm_cvp_pm_resume)
+};
+
+MODULE_DEVICE_TABLE(of, msm_cvp_plat_match);
+
+static struct platform_driver msm_cvp_driver = {
+	.probe = msm_cvp_probe,
+	.remove = msm_cvp_remove,
+	.driver = {
+		.name = "msm_cvp",
+		.of_match_table = msm_cvp_plat_match,
+		.pm = &msm_cvp_pm_ops,
+	},
+};
+
+static int __init msm_cvp_init(void)
+{
+	int rc = 0;
+
+	cvp_driver = kzalloc(sizeof(*cvp_driver), GFP_KERNEL);
+	if (!cvp_driver) {
+		dprintk(CVP_ERR,
+			"Failed to allocate memroy for msm_cvp_drv\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&cvp_driver->lock);
+
+	rc = platform_driver_register(&msm_cvp_driver);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to register platform driver\n");
+		kfree(cvp_driver);
+		cvp_driver = NULL;
+		return rc;
+	}
+
+	cvp_driver->msg_cache.cache = KMEM_CACHE(cvp_session_msg, 0);
+	cvp_driver->frame_cache.cache = KMEM_CACHE(msm_cvp_frame, 0);
+	cvp_driver->buf_cache.cache = KMEM_CACHE(cvp_internal_buf, 0);
+	cvp_driver->smem_cache.cache = KMEM_CACHE(msm_cvp_smem, 0);
+	mutex_init(&wncc_buf_pool.lock);
+
+	return rc;
+}
+
+static void __exit msm_cvp_exit(void)
+{
+	cvp_dsp_device_exit();
+	kmem_cache_destroy(cvp_driver->msg_cache.cache);
+	kmem_cache_destroy(cvp_driver->frame_cache.cache);
+	kmem_cache_destroy(cvp_driver->buf_cache.cache);
+	kmem_cache_destroy(cvp_driver->smem_cache.cache);
+
+	platform_driver_unregister(&msm_cvp_driver);
+	debugfs_remove_recursive(cvp_driver->debugfs_root);
+	mutex_destroy(&cvp_driver->lock);
+	mutex_destroy(&wncc_buf_pool.lock);
+	kfree(cvp_driver);
+	cvp_driver = NULL;
+}
+
+module_init(msm_cvp_init);
+module_exit(msm_cvp_exit);
+
+MODULE_SOFTDEP("pre: msm-mmrm");
+MODULE_SOFTDEP("pre: synx-driver");
+MODULE_SOFTDEP("pre: frpc-adsprpc");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(DMA_BUF);

+ 50 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_comm_def.h

@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_COMM_DEF_H_
+#define _MSM_COMM_DEF_H_
+
+#include <linux/types.h>
+#include <linux/gunyah/gh_rm_drv.h>
+
+enum op_mode {
+	OP_NORMAL,
+	OP_DRAINING,
+	OP_FLUSH,
+	OP_INVALID,
+};
+
+enum queue_state {
+	QUEUE_INIT,
+	QUEUE_ACTIVE = 1,
+	QUEUE_START,
+	QUEUE_STOP,
+	QUEUE_INVALID,
+};
+
+#ifdef CONFIG_EVA_TVM
+
+#else	/* LA target starts here */
+
+#ifdef CONFIG_EVA_KALAMA
+#define CVP_SYNX_ENABLED 1
+#define CVP_MMRM_ENABLED 1
+#define CVP_FASTRPC_ENABLED 1
+#endif	/* End of CONFIG_EVA_KALAMA */
+
+#ifdef CONFIG_EVA_PINEAPPLE
+#define CVP_MMRM_ENABLED 1
+#define CVP_SYNX_ENABLED 1
+#define CVP_FASTRPC_ENABLED 1
+#endif	/* End of CONFIG_EVA_PINEAPPLE */
+
+
+#ifdef CONFIG_EVA_WAIPIO
+#define CVP_MINIDUMP_ENABLED 1
+#endif
+
+#endif	/* End CONFIG_EVA_TVM */
+
+#endif

+ 53 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_core_hfi.c

@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "msm_cvp_debug.h"
+#include "cvp_hfi_api.h"
+#include "cvp_core_hfi.h"
+
+struct cvp_hfi_ops *cvp_hfi_initialize(enum msm_cvp_hfi_type hfi_type,
+		struct msm_cvp_platform_resources *res,
+		hfi_cmd_response_callback callback)
+{
+	struct cvp_hfi_ops *ops_tbl = NULL;
+	int rc = 0;
+
+	ops_tbl = kzalloc(sizeof(struct cvp_hfi_ops), GFP_KERNEL);
+	if (!ops_tbl) {
+		dprintk(CVP_ERR, "%s: failed to allocate ops_tbl\n", __func__);
+		return NULL;
+	}
+
+	rc = cvp_iris_hfi_initialize(ops_tbl, res, callback);
+
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			dprintk(CVP_ERR, "%s device init failed rc = %d",
+				__func__, rc);
+		goto err_hfi_init;
+	}
+
+	return ops_tbl;
+
+err_hfi_init:
+	kfree(ops_tbl);
+	return ERR_PTR(rc);
+}
+
+void cvp_hfi_deinitialize(enum msm_cvp_hfi_type hfi_type,
+			struct cvp_hfi_ops *ops_tbl)
+{
+	if (!ops_tbl) {
+		dprintk(CVP_ERR, "%s invalid device %pK", __func__, ops_tbl);
+		return;
+	}
+
+	cvp_iris_hfi_delete_device(ops_tbl->hfi_device_data);
+
+	kfree(ops_tbl);
+}
+

+ 302 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_core_hfi.h

@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __H_CVP_CORE_HFI_H__
+#define __H_CVP_CORE_HFI_H__
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/soc/qcom/msm_mmrm.h>
+#include "cvp_hfi_api.h"
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_api.h"
+#include "cvp_hfi.h"
+#include "msm_cvp_resources.h"
+#include "hfi_packetization.h"
+
+#define HFI_MASK_QHDR_TX_TYPE			0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE			0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE			0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE			0x000000FF
+#define HFI_Q_ID_HOST_TO_CTRL_CMD_Q		0x00
+#define HFI_Q_ID_CTRL_TO_HOST_MSG_Q		0x01
+#define HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q	0x02
+#define HFI_MASK_QHDR_STATUS			0x000000FF
+
+#define CVP_IFACEQ_NUMQ					3
+#define CVP_IFACEQ_CMDQ_IDX				0
+#define CVP_IFACEQ_MSGQ_IDX				1
+#define CVP_IFACEQ_DBGQ_IDX				2
+#define CVP_IFACEQ_MAX_BUF_COUNT			50
+#define CVP_IFACE_MAX_PARALLEL_CLNTS		16
+#define CVP_IFACEQ_DFLT_QHDR				0x01010000
+
+#define CVP_MAX_NAME_LENGTH 64
+#define CVP_MAX_PC_SKIP_COUNT 10
+#define CVP_MAX_SUBCACHES 4
+#define CVP_MAX_SUBCACHE_SIZE 52
+
+struct cvp_hfi_queue_table_header {
+	u32 qtbl_version;
+	u32 qtbl_size;
+	u32 qtbl_qhdr0_offset;
+	u32 qtbl_qhdr_size;
+	u32 qtbl_num_q;
+	u32 qtbl_num_active_q;
+	void *device_addr;
+	char name[256];
+};
+
+struct cvp_hfi_queue_header {
+	u32 qhdr_status;
+	u32 qhdr_start_addr;
+	u32 qhdr_type;
+	u32 qhdr_q_size;
+	u32 qhdr_pkt_size;
+	u32 qhdr_pkt_drop_cnt;
+	u32 qhdr_rx_wm;
+	u32 qhdr_tx_wm;
+	u32 qhdr_rx_req;
+	u32 qhdr_tx_req;
+	u32 qhdr_rx_irq_status;
+	u32 qhdr_tx_irq_status;
+	u32 qhdr_read_idx;
+	u32 qhdr_write_idx;
+};
+
+struct cvp_hfi_mem_map_table {
+	u32 mem_map_num_entries;
+	u32 mem_map_table_base_addr;
+};
+
+struct cvp_hfi_mem_map {
+	u32 virtual_addr;
+	u32 physical_addr;
+	u32 size;
+	u32 attr;
+};
+
+#define CVP_IFACEQ_TABLE_SIZE (sizeof(struct cvp_hfi_queue_table_header) \
+	+ sizeof(struct cvp_hfi_queue_header) * CVP_IFACEQ_NUMQ)
+
+#define CVP_IFACEQ_QUEUE_SIZE	(CVP_IFACEQ_MAX_PKT_SIZE *  \
+	CVP_IFACEQ_MAX_BUF_COUNT * CVP_IFACE_MAX_PARALLEL_CLNTS)
+
+#define CVP_IFACEQ_GET_QHDR_START_ADDR(ptr, i)     \
+	(void *)((ptr + sizeof(struct cvp_hfi_queue_table_header)) + \
+		(i * sizeof(struct cvp_hfi_queue_header)))
+
+#define QDSS_SIZE 4096
+#define SFR_SIZE 1048576
+
+#define QUEUE_SIZE (CVP_IFACEQ_TABLE_SIZE + \
+	(CVP_IFACEQ_QUEUE_SIZE * CVP_IFACEQ_NUMQ))
+
+#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
+#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
+#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
+#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
+			ALIGNED_QDSS_SIZE, SZ_1M)
+
+struct cvp_mem_addr {
+	u32 align_device_addr;
+	u8 *align_virtual_addr;
+	u32 mem_size;
+	struct msm_cvp_smem mem_data;
+};
+
+struct cvp_iface_q_info {
+	spinlock_t hfi_lock;
+	void *q_hdr;
+	struct cvp_mem_addr q_array;
+};
+
+/*
+ * These are helper macros to iterate over various lists within
+ * iris_hfi_device->res.  The intention is to cut down on a lot of boiler-plate
+ * code
+ */
+
+/* Read as "for each 'thing' in a set of 'thingies'" */
+#define iris_hfi_for_each_thing(__device, __thing, __thingy) \
+	iris_hfi_for_each_thing_continue(__device, __thing, __thingy, 0)
+
+#define iris_hfi_for_each_thing_reverse(__device, __thing, __thingy) \
+	iris_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+			(__device)->res->__thingy##_set.count - 1)
+
+/* TODO: the __from parameter technically not required since we can figure it
+ * out with some pointer magic (i.e. __thing - __thing##_tbl[0]).  If this macro
+ * sees extensive use, probably worth cleaning it up but for now omitting it
+ * since it introduces unnecessary complexity.
+ */
+#define iris_hfi_for_each_thing_continue(__device, __thing, __thingy, __from) \
+	for (__thing = &(__device)->res->\
+			__thingy##_set.__thingy##_tbl[__from]; \
+		__thing < &(__device)->res->__thingy##_set.__thingy##_tbl[0] + \
+			((__device)->res->__thingy##_set.count - __from); \
+		++__thing)
+
+#define iris_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+		__from) \
+	for (__thing = &(__device)->res->\
+			__thingy##_set.__thingy##_tbl[__from]; \
+		__thing >= &(__device)->res->__thingy##_set.__thingy##_tbl[0]; \
+		--__thing)
+
+/* Regular set helpers */
+#define iris_hfi_for_each_regulator(__device, __rinfo) \
+	iris_hfi_for_each_thing(__device, __rinfo, regulator)
+
+#define iris_hfi_for_each_regulator_reverse(__device, __rinfo) \
+	iris_hfi_for_each_thing_reverse(__device, __rinfo, regulator)
+
+#define iris_hfi_for_each_regulator_reverse_continue(__device, __rinfo, \
+		__from) \
+	iris_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
+			regulator, __from)
+
+/* Clock set helpers */
+#define iris_hfi_for_each_clock(__device, __cinfo) \
+	iris_hfi_for_each_thing(__device, __cinfo, clock)
+
+#define iris_hfi_for_each_clock_reverse(__device, __cinfo) \
+	iris_hfi_for_each_thing_reverse(__device, __cinfo, clock)
+
+#define iris_hfi_for_each_clock_reverse_continue(__device, __rinfo, \
+		__from) \
+	iris_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
+			clock, __from)
+
+/* reset set helpers */
+#define iris_hfi_for_each_reset_clock(__device, __resetinfo) \
+	iris_hfi_for_each_thing(__device, __resetinfo, reset)
+
+#define iris_hfi_for_each_reset_clock_reverse(__device, __resetinfo) \
+	iris_hfi_for_each_thing_reverse(__device, __resetinfo, reset)
+
+/* Bus set helpers */
+#define iris_hfi_for_each_bus(__device, __binfo) \
+	iris_hfi_for_each_thing(__device, __binfo, bus)
+#define iris_hfi_for_each_bus_reverse(__device, __binfo) \
+	iris_hfi_for_each_thing_reverse(__device, __binfo, bus)
+
+/* Subcache set helpers */
+#define iris_hfi_for_each_subcache(__device, __sinfo) \
+	iris_hfi_for_each_thing(__device, __sinfo, subcache)
+#define iris_hfi_for_each_subcache_reverse(__device, __sinfo) \
+	iris_hfi_for_each_thing_reverse(__device, __sinfo, subcache)
+
+#define call_iris_op(d, op, args...)			\
+	(((d) && (d)->hal_ops && (d)->hal_ops->op) ? \
+	((d)->hal_ops->op(args)):0)
+
+struct cvp_hal_data {
+	u32 irq;
+	u32 irq_wd;
+	phys_addr_t firmware_base;
+	u8 __iomem *register_base;
+	u8 __iomem *gcc_reg_base;
+	u32 register_size;
+	u32 gcc_reg_size;
+};
+
+struct iris_resources {
+	struct msm_cvp_fw fw;
+};
+
+enum iris_hfi_state {
+	IRIS_STATE_DEINIT = 1,
+	IRIS_STATE_INIT,
+};
+
+enum reset_state {
+	INIT = 1,
+	ASSERT,
+	DEASSERT,
+};
+
+/* Indices of hfi queues in hfi queue arrays (iface_queues & dsp_iface_queues) */
+enum hfi_queue_idx {
+	CMD_Q, /* Command queue */
+	MSG_Q, /* Message queue */
+	DEBUG_Q, /* Debug queue */
+	MAX_Q
+};
+
+struct iris_hfi_device;
+
+struct cvp_hal_ops {
+	void (*interrupt_init)(struct iris_hfi_device *ptr);
+	void (*setup_dsp_uc_memmap)(struct iris_hfi_device *device);
+	void (*clock_config_on_enable)(struct iris_hfi_device *device);
+	void (*power_off)(struct iris_hfi_device *device);
+	void (*noc_error_info)(struct iris_hfi_device *device);
+	int (*reset_control_assert_name)(struct iris_hfi_device *device, const char *name);
+	int (*reset_control_deassert_name)(struct iris_hfi_device *device, const char *name);
+	int (*reset_control_acquire_name)(struct iris_hfi_device *device, const char *name);
+	int (*reset_control_release_name)(struct iris_hfi_device *device, const char *name);
+};
+
+struct iris_hfi_device {
+	struct list_head sess_head;
+	u32 version;
+	u32 intr_status;
+	u32 clk_freq;
+	u32 last_packet_type;
+	u32 error;
+	unsigned long clk_bitrate;
+	unsigned long scaled_rate;
+	struct msm_cvp_gov_data bus_vote;
+	bool power_enabled;
+	bool reg_dumped;
+	struct mutex lock;
+	msm_cvp_callback callback;
+	struct cvp_mem_addr iface_q_table;
+	struct cvp_mem_addr dsp_iface_q_table;
+	struct cvp_mem_addr qdss;
+	struct cvp_mem_addr sfr;
+	struct cvp_mem_addr mem_addr;
+	struct cvp_iface_q_info iface_queues[CVP_IFACEQ_NUMQ];
+	struct cvp_iface_q_info dsp_iface_queues[CVP_IFACEQ_NUMQ];
+	struct cvp_hal_data *cvp_hal_data;
+	struct workqueue_struct *cvp_workq;
+	struct workqueue_struct *iris_pm_workq;
+	int spur_count;
+	int reg_count;
+	struct iris_resources resources;
+	struct msm_cvp_platform_resources *res;
+	struct mmrm_client_desc mmrm_desc;
+	struct mmrm_client *mmrm_cvp;
+	enum iris_hfi_state state;
+	struct cvp_hfi_packetization_ops *pkt_ops;
+	enum hfi_packetization_type packetization_type;
+	struct msm_cvp_cb_info *response_pkt;
+	u8 *raw_packet;
+	struct pm_qos_request qos;
+	unsigned int skip_pc_count;
+	struct msm_cvp_capability *sys_init_capabilities;
+	struct cvp_hal_ops *hal_ops;
+};
+
+irqreturn_t cvp_hfi_isr(int irq, void *dev);
+irqreturn_t iris_hfi_core_work_handler(int irq, void *data);
+irqreturn_t iris_hfi_isr_wd(int irq, void *dev);
+void cvp_iris_hfi_delete_device(void *device);
+
+int cvp_iris_hfi_initialize(struct cvp_hfi_ops *hdev,
+		struct msm_cvp_platform_resources *res,
+		hfi_cmd_response_callback callback);
+
+int load_cvp_fw_impl(struct iris_hfi_device *device);
+int unload_cvp_fw_impl(struct iris_hfi_device *device);
+void cvp_dump_csr(struct iris_hfi_device *dev);
+#endif

+ 314 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_dump.c

@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <asm/memory.h>
+#include <linux/coresight-stm.h>
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/hash.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/pm_qos.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/dma-mapping.h>
+#include <linux/reset.h>
+#include "hfi_packetization.h"
+#include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_io.h"
+#include "msm_cvp_dsp.h"
+#include "msm_cvp_clocks.h"
+#include "cvp_dump.h"
+
+#ifdef CVP_MINIDUMP_ENABLED
+/*Declare and init the head node of the linked list
+for queue va_md dump*/
+static LIST_HEAD(head_node_hfi_queue);
+
+/*Declare and init the head node of the linked list
+ for debug struct va_md dump*/
+static LIST_HEAD(head_node_dbg_struct);
+
+static int eva_struct_list_notif_handler(struct notifier_block *this,
+                unsigned long event, void *ptr);
+
+static int eva_hfiq_list_notif_handler(struct notifier_block *this,
+		unsigned long event, void *ptr);
+
+static struct notifier_block eva_struct_list_notif_blk = {
+		.notifier_call = eva_struct_list_notif_handler,
+		.priority = INT_MAX-1,
+};
+
+static struct notifier_block eva_hfiq_list_notif_blk = {
+		.notifier_call = eva_hfiq_list_notif_handler,
+		.priority = INT_MAX,
+};
+
+struct list_head *dump_array[CVP_MAX_DUMP] = {
+	[CVP_QUEUE_DUMP] = &head_node_hfi_queue,
+	[CVP_DBG_DUMP] = &head_node_dbg_struct,
+};
+
+int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size)
+{
+	struct md_region md_entry;
+
+	if (msm_minidump_enabled()) {
+		dprintk(CVP_INFO, "Minidump is enabled!\n");
+
+		strlcpy(md_entry.name, name, sizeof(md_entry.name));
+		md_entry.virt_addr = (uintptr_t)virt;
+		md_entry.phys_addr = phys;
+		md_entry.size = size;
+		if (msm_minidump_add_region(&md_entry) < 0) {
+			dprintk(CVP_ERR, "Failed to add \"%s\" data in \
+			Minidump\n", name);
+			return 1;
+		} else {
+			dprintk(CVP_INFO,
+				"add region success for \"%s\" with virt addr:\
+				0x%x, phy addr: 0x%x, size: %d",
+				md_entry.name, md_entry.virt_addr,
+				md_entry.phys_addr, md_entry.size);
+			return 0;
+		}
+	} else {
+		dprintk(CVP_ERR, "Minidump is NOT enabled!\n");
+		return 1;
+	}
+}
+
+void cvp_va_md_register(char* name, void* notf_blk_ptr)
+{
+	int rc = 0;
+	struct notifier_block* notf_blk = (struct notifier_block*)notf_blk_ptr;
+
+	rc = qcom_va_md_register(name, notf_blk);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"\"%s\" : qcom_va_md_register failed rc = %d\n",
+			name, rc);
+	} else {
+		dprintk(CVP_INFO, "\"%s\" : eva_queue qcom_va_md_register \
+			success rc = %d\n", name, rc);
+	}
+}
+
+void cvp_register_va_md_region()
+{
+	if (qcom_va_md_enabled()) {
+		cvp_va_md_register("eva_queues", &eva_hfiq_list_notif_blk);
+		cvp_va_md_register("dbg_struct", &eva_struct_list_notif_blk);
+	} else {
+		dprintk(CVP_ERR, "VA_Minidump is NOT enabled!\n");
+	}
+}
+
+void cvp_free_va_md_list(void)
+{
+	struct eva_va_md_queue *cursor, *temp;
+
+	list_for_each_entry_safe(cursor, temp, &head_node_hfi_queue, list) {
+		list_del(&cursor->list);
+		kfree(cursor);
+	}
+
+	list_for_each_entry_safe(cursor, temp, &head_node_dbg_struct, list) {
+		list_del(&cursor->list);
+		kfree(cursor);
+	}
+}
+
+void add_va_node_to_list(enum cvp_dump_type type, void *buff_va, u32 buff_size,
+			const char *region_name, bool copy)
+{
+	struct list_head *head_node;
+	struct eva_va_md_queue *temp_node = NULL;
+
+	if (type >= CVP_MAX_DUMP)
+		return;
+
+	head_node = dump_array[type];
+
+	/*Creating Node*/
+	temp_node = kzalloc(sizeof(struct eva_va_md_queue), GFP_KERNEL);
+	if (!temp_node) {
+		dprintk(CVP_ERR, "Memory allocation failed for list node\n");
+		return;
+	}
+
+	INIT_LIST_HEAD(&temp_node->list);
+
+	temp_node->va_md_buff = buff_va;
+	temp_node->va_md_buff_size = buff_size;
+	strlcpy(temp_node->region_name, region_name,
+		sizeof(temp_node->region_name));
+	temp_node->copy = copy;
+
+	list_add_tail(&temp_node->list, head_node);
+	dprintk(CVP_INFO,
+			"\"%s\" added to buffer list, vaddr: %px size: 0x%x\n",
+			temp_node->region_name, temp_node->va_md_buff,
+			temp_node->va_md_buff_size);
+}
+
+void add_hfi_queue_to_va_md_list(void *device)
+{
+	struct cvp_iface_q_info *iface_q;
+	struct iris_hfi_device *dev;
+
+	dev = (struct iris_hfi_device*)device;
+
+	iface_q = &dev->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	add_va_node_to_list(CVP_QUEUE_DUMP,
+				iface_q->q_array.align_virtual_addr,
+				iface_q->q_array.mem_size,
+				"eva_cmdq_cpu", false);
+	iface_q = &dev->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	add_va_node_to_list(CVP_QUEUE_DUMP,
+				iface_q->q_array.align_virtual_addr,
+				iface_q->q_array.mem_size,
+				"eva_msgq_cpu", false);
+
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	add_va_node_to_list(CVP_QUEUE_DUMP,
+				iface_q->q_array.align_virtual_addr,
+				iface_q->q_array.mem_size,
+				"eva_cmdq_dsp", false);
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	add_va_node_to_list(CVP_QUEUE_DUMP,
+				iface_q->q_array.align_virtual_addr,
+				iface_q->q_array.mem_size,
+				"eva_msgq_dsp", false);
+}
+
+void add_queue_header_to_va_md_list(void *device)
+{
+	struct cvp_iface_q_info *iface_q;
+	struct iris_hfi_device *dev;
+	struct cvp_hfi_queue_header *queue;
+
+	dev = (struct iris_hfi_device*)device;
+
+	iface_q = &dev->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
+	add_va_node_to_list(CVP_DBG_DUMP,
+			queue, sizeof(struct cvp_hfi_queue_header),
+			"cvp_hfi_queue_header-cpucmdQ", false);
+
+	iface_q = &dev->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
+	add_va_node_to_list(CVP_DBG_DUMP,
+			queue, sizeof(struct cvp_hfi_queue_header),
+			"cvp_hfi_queue_header-cpumsgQ", false);
+
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
+	add_va_node_to_list(CVP_DBG_DUMP,
+			queue, sizeof(struct cvp_hfi_queue_header),
+			"cvp_hfi_queue_header-dspcmdQ", false);
+
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
+	add_va_node_to_list(CVP_DBG_DUMP,
+			queue, sizeof(struct cvp_hfi_queue_header),
+			"cvp_hfi_queue_header-dspmsgQ", false);
+}
+
+static int eva_hfiq_list_notif_handler(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	struct va_md_entry entry;
+	struct eva_va_md_queue *cursor, *temp;
+	int rc = 0;
+	void *temp_data;
+
+	list_for_each_entry_safe(cursor, temp, &head_node_hfi_queue, list) {
+		entry.vaddr = (unsigned long)cursor->va_md_buff;
+		if (cursor->copy) {
+			dprintk(CVP_INFO, "Copying \"%s\"(%d Bytes)\
+				to intermediate buffer\n",
+				cursor->region_name, cursor->va_md_buff_size);
+			temp_data = kzalloc(cursor->va_md_buff_size,
+							GFP_KERNEL);
+			if (temp_data) {
+				memcpy(temp_data, cursor->va_md_buff,
+						cursor->va_md_buff_size);
+				entry.vaddr = (unsigned long)temp_data;
+			}
+		}
+		entry.size = cursor->va_md_buff_size;
+		strlcpy(entry.owner, cursor->region_name, sizeof(entry.owner));
+		entry.cb = NULL;
+
+		if (msm_cvp_minidump_enable) {
+			rc = qcom_va_md_add_region(&entry);
+			if (rc)
+				dprintk(CVP_ERR, "Add region \"failed\" for \
+				\"%s\", vaddr: %px size: 0x%x\n", entry.owner,
+				cursor->va_md_buff, entry.size);
+			else
+				dprintk(CVP_INFO, "Add region \"success\" for \
+				\"%s\", vaddr: %px size: 0x%x\n", entry.owner,
+				cursor->va_md_buff, entry.size);
+		}
+	}
+	return NOTIFY_OK;
+}
+
+static int eva_struct_list_notif_handler(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	struct va_md_entry entry;
+	struct eva_va_md_queue *cursor, *temp;
+	int rc = 0;
+	void *temp_data;
+
+	list_for_each_entry_safe(cursor, temp, &head_node_dbg_struct, list) {
+		entry.vaddr = (unsigned long)cursor->va_md_buff;
+		if (cursor->copy) {
+			dprintk(CVP_INFO, "Copying \"%s\"(%d Bytes) to \
+				intermediate buffer\n", cursor->region_name,
+				cursor->va_md_buff_size);
+			temp_data = kzalloc(cursor->va_md_buff_size,
+							GFP_KERNEL);
+			if (temp_data) {
+				memcpy(temp_data, cursor->va_md_buff,
+						cursor->va_md_buff_size);
+				entry.vaddr = (unsigned long)temp_data;
+			}
+		}
+		entry.size = cursor->va_md_buff_size;
+		strlcpy(entry.owner, cursor->region_name, sizeof(entry.owner));
+		entry.cb = NULL;
+
+		if (msm_cvp_minidump_enable) {
+			rc = qcom_va_md_add_region(&entry);
+			if (rc)
+				dprintk(CVP_ERR, "Add region \"failed\" for \
+					\"%s\", vaddr: %px size: 0x%x\n",
+					entry.owner, cursor->va_md_buff,
+					entry.size);
+			else
+				dprintk(CVP_INFO, "Add region \"success\" for \
+				\"%s\", vaddr: %px size: 0x%x\n", entry.owner,
+				cursor->va_md_buff, entry.size);
+		}
+	}
+	return NOTIFY_OK;
+}
+
+#endif

+ 124 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_dump.h

@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __H_CVP_MINIDUMP_H__
+#define __H_CVP_MINIDUMP_H__
+
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include "cvp_comm_def.h"
+
+enum cvp_dump_type {
+	CVP_QUEUE_DUMP,
+	CVP_DBG_DUMP,
+	CVP_MAX_DUMP,
+};
+
+#define MAX_REGION_NAME_LEN 32
+#define EVAFW_IMAGE_SIZE 7*1024*1024
+
+#ifdef CVP_MINIDUMP_ENABLED
+#include <soc/qcom/minidump.h>
+
+/*
+ * wrapper for static minidump
+
+ * @name: Dump will be collected with this name
+ * @virt: Virtual address of the buffer which needs to be dumped
+ * @phys: Physical address of the buffer which needs to be dumped
+ * @size: Size of the buffer which needs to be dumped
+*/
+int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size);
+
+/*
+ * Fucntion to add dump region to queue
+
+ * @type: Type of the list node which needs to be updated
+ * @buff_va: Virtual address of the buffer which needs to be dumped
+ * @buff_size: Size of the buffer which needs to be dumped
+ * @region_name: Dump will be collected with this name
+ * @copy: Flag to indicate if the buffer data needs to be copied
+ *		to the intermidiate buffer allocated by kzmalloc.
+*/
+void add_va_node_to_list(enum cvp_dump_type type, void *buff_va,
+			u32 buff_size, const char *region_name, bool copy);
+
+/*
+ * Registers subsystem to minidump driver
+
+ * @name: Subsytem name which will get registered
+ * @notf_blk_ptr: notifier block pointer.
+ *		notifier_call mentioned in this block will be triggered by
+ *		minidump driver in case of crash
+*/
+void cvp_va_md_register(char *name, void* notf_blk_ptr);
+
+/* One function where we will register all the regions */
+void cvp_register_va_md_region(void);
+
+/*
+ * Free up the memory allocated for different va_md_list
+ * Do not forget to add code for any new list in this function
+*/
+void cvp_free_va_md_list(void);
+
+/* Adds the HFI queues(both for CPU and DSP) to the global hfi list head*/
+void add_hfi_queue_to_va_md_list(void *device);
+
+/*Add queue header structures(both for CPU and DSP)
+to the global struct list head*/
+void add_queue_header_to_va_md_list(void *device);
+
+/*
+ * Node structure for VA_MD Linked List
+
+ * @list: linux kernel list implementation
+ * @va_md_buff: Virtual address of the buffer which needs to be dumped
+ * @va_md_buff_size: Size of the buffer which needs to be dumped
+ * @region_name: Dump will be collected with this name
+ * @copy: Flag to indicate if the buffer data needs to be copied
+ *		to the intermidiate buffer allocated by kzmalloc.
+*/
+struct eva_va_md_queue
+{
+	struct list_head list;
+	void *va_md_buff;
+	u32 va_md_buff_size;
+	char region_name[MAX_REGION_NAME_LEN];
+	bool copy;
+};
+#else
+static inline int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size)
+{
+	return 0;
+}
+
+static inline void add_va_node_to_list(enum cvp_dump_type type, void *buff_va,
+                        u32 buff_size, const char *region_name, bool copy)
+{
+}
+
+static inline void cvp_va_md_register(char *name, void* notf_blk_ptr)
+{
+}
+
+static inline void cvp_register_va_md_region(void)
+{
+}
+
+static inline void cvp_free_va_md_list(void)
+{
+}
+
+static inline void add_hfi_queue_to_va_md_list(void *device)
+{
+}
+
+static inline void add_queue_header_to_va_md_list(void *device)
+{
+}
+
+#endif	/* End of CVP_MINIDUMP_ENABLED */
+#endif

+ 150 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_fw_load.c

@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/pm_qos.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include "msm_cvp_debug.h"
+#include "cvp_comm_def.h"
+#include "cvp_core_hfi.h"
+#include "cvp_hfi.h"
+#include <linux/of_address.h>
+#include <linux/firmware.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include "cvp_dump.h"
+
+#define MAX_FIRMWARE_NAME_SIZE 128
+
+static int __load_fw_to_memory(struct platform_device *pdev,
+		const char *fw_name)
+{
+	int rc = 0;
+	const struct firmware *firmware = NULL;
+	char firmware_name[MAX_FIRMWARE_NAME_SIZE] = {0};
+	struct device_node *node = NULL;
+	struct resource res = {0};
+	phys_addr_t phys = 0;
+	size_t res_size = 0;
+	ssize_t fw_size = 0;
+	void *virt = NULL;
+	int pas_id = 0;
+
+	if (!fw_name || !(*fw_name) || !pdev) {
+		dprintk(CVP_ERR, "%s: Invalid inputs\n", __func__);
+		return -EINVAL;
+	}
+	if (strlen(fw_name) >= MAX_FIRMWARE_NAME_SIZE - 4) {
+		dprintk(CVP_ERR, "%s: Invalid fw name\n", __func__);
+		return -EINVAL;
+	}
+	scnprintf(firmware_name, ARRAY_SIZE(firmware_name), "%s.mbn", fw_name);
+
+	rc = of_property_read_u32(pdev->dev.of_node, "pas-id", &pas_id);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: error %d while reading DT for \"pas-id\"\n",
+				__func__, rc);
+		goto exit;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+	if (!node) {
+		dprintk(CVP_ERR,
+			"%s: DT error getting \"memory-region\" property\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	rc = of_address_to_resource(node, 0, &res);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: error %d getting \"memory-region\" resource\n",
+				__func__, rc);
+		goto exit;
+	}
+	phys = res.start;
+	res_size = (size_t)resource_size(&res);
+
+	rc = request_firmware(&firmware, firmware_name, &pdev->dev);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: error %d requesting \"%s\"\n",
+				__func__, rc, firmware_name);
+		goto exit;
+	}
+
+	fw_size = qcom_mdt_get_size(firmware);
+	if (fw_size < 0 || res_size < (size_t)fw_size) {
+		rc = -EINVAL;
+		dprintk(CVP_ERR,
+			"%s: Corrupted fw image. Alloc size: %lu, fw size: %ld",
+				__func__, res_size, fw_size);
+		goto exit;
+	}
+
+	virt = memremap(phys, res_size, MEMREMAP_WC);
+	if (!virt) {
+		rc = -ENOMEM;
+		dprintk(CVP_ERR, "%s: unable to remap firmware memory\n",
+				__func__);
+		goto exit;
+	}
+
+	rc = qcom_mdt_load(&pdev->dev, firmware, firmware_name,
+			pas_id, virt, phys, res_size, NULL);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: error %d loading \"%s\"\n",
+				__func__, rc, firmware_name);
+		goto exit;
+	}
+	rc = qcom_scm_pas_auth_and_reset(pas_id);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: error %d authenticating \"%s\"\n",
+				__func__, rc, firmware_name);
+		goto exit;
+	}
+	rc = md_eva_dump("evafwdata", (uintptr_t)virt, phys, EVAFW_IMAGE_SIZE);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: error %d in dumping \"%s\"\n",
+				__func__, rc, firmware_name);
+	}
+
+	memunmap(virt);
+	release_firmware(firmware);
+	dprintk(CVP_CORE, "%s: firmware \"%s\" loaded successfully\n",
+			__func__, firmware_name);
+	return pas_id;
+
+exit:
+	if (virt)
+		memunmap(virt);
+	if (firmware)
+		release_firmware(firmware);
+	return rc;
+}
+
+int load_cvp_fw_impl(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!device->resources.fw.cookie) {
+		device->resources.fw.cookie =
+			__load_fw_to_memory(device->res->pdev,
+			device->res->fw_name);
+		if (device->resources.fw.cookie <= 0) {
+			dprintk(CVP_ERR, "Failed to download firmware\n");
+			device->resources.fw.cookie = 0;
+			rc = -ENOMEM;
+		}
+	}
+	return rc;
+}
+
+int unload_cvp_fw_impl(struct iris_hfi_device *device)
+{
+	qcom_scm_pas_shutdown(device->resources.fw.cookie);
+	device->resources.fw.cookie = 0;
+	return 0;
+}

+ 5805 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_hfi.c

@@ -0,0 +1,5805 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <asm/memory.h>
+#include <linux/coresight-stm.h>
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/hash.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/pm_qos.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/dma-mapping.h>
+#include <linux/reset.h>
+#include <linux/pm_wakeup.h>
+#include "hfi_packetization.h"
+#include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_io.h"
+#include "msm_cvp_dsp.h"
+#include "msm_cvp_clocks.h"
+#include "vm/cvp_vm.h"
+#include "cvp_dump.h"
+
+// ysi - added for debug
+#include <linux/clk/qcom.h>
+#include "msm_cvp_common.h"
+
+#define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
+#define QDSS_IOVA_START 0x80001000
+#define MIN_PAYLOAD_SIZE 3
+
+struct cvp_tzbsp_memprot {
+	u32 cp_start;
+	u32 cp_size;
+	u32 cp_nonpixel_start;
+	u32 cp_nonpixel_size;
+};
+
+#define TZBSP_CVP_PAS_ID    26
+
+/* Poll interval in uS */
+#define POLL_INTERVAL_US 50
+
+enum tzbsp_subsys_state {
+	TZ_SUBSYS_STATE_SUSPEND = 0,
+        TZ_SUBSYS_STATE_RESUME = 1,
+        TZ_SUBSYS_STATE_RESTORE_THRESHOLD = 2,
+};
+
+const struct msm_cvp_gov_data CVP_DEFAULT_BUS_VOTE = {
+	.data = NULL,
+	.data_count = 0,
+};
+
+const int cvp_max_packets = 32;
+
+static void iris_hfi_pm_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(iris_hfi_pm_work, iris_hfi_pm_handler);
+static inline int __resume(struct iris_hfi_device *device);
+static inline int __suspend(struct iris_hfi_device *device);
+static int __disable_regulator(struct iris_hfi_device *device,
+		const char *name);
+static int __enable_regulator(struct iris_hfi_device *device,
+		const char *name);
+static void __flush_debug_queue(struct iris_hfi_device *device, u8 *packet);
+static int __initialize_packetization(struct iris_hfi_device *device);
+static struct cvp_hal_session *__get_session(struct iris_hfi_device *device,
+		u32 session_id);
+static bool __is_session_valid(struct iris_hfi_device *device,
+		struct cvp_hal_session *session, const char *func);
+static int __iface_cmdq_write(struct iris_hfi_device *device,
+					void *pkt);
+static int __load_fw(struct iris_hfi_device *device);
+static int __power_on_init(struct iris_hfi_device *device);
+static void __unload_fw(struct iris_hfi_device *device);
+static int __tzbsp_set_cvp_state(enum tzbsp_subsys_state state);
+static int __enable_subcaches(struct iris_hfi_device *device);
+static int __set_subcaches(struct iris_hfi_device *device);
+static int __release_subcaches(struct iris_hfi_device *device);
+static int __disable_subcaches(struct iris_hfi_device *device);
+static int __power_collapse(struct iris_hfi_device *device, bool force);
+static int iris_hfi_noc_error_info(void *dev);
+
+static void interrupt_init_iris2(struct iris_hfi_device *device);
+static void setup_dsp_uc_memmap_vpu5(struct iris_hfi_device *device);
+static void clock_config_on_enable_vpu5(struct iris_hfi_device *device);
+static void power_off_iris2(struct iris_hfi_device *device);
+
+static int __set_ubwc_config(struct iris_hfi_device *device);
+static void __noc_error_info_iris2(struct iris_hfi_device *device);
+static int __enable_hw_power_collapse(struct iris_hfi_device *device);
+static int __disable_hw_power_collapse(struct iris_hfi_device *device);
+
+static int __power_off_controller(struct iris_hfi_device *device);
+static int __hwfence_regs_map(struct iris_hfi_device *device);
+static int __hwfence_regs_unmap(struct iris_hfi_device *device);
+
+static int __reset_control_assert_name(struct iris_hfi_device *device, const char *name);
+static int __reset_control_deassert_name(struct iris_hfi_device *device, const char *name);
+static int __reset_control_acquire(struct iris_hfi_device *device, const char *name);
+static int __reset_control_release(struct iris_hfi_device *device, const char *name);
+static bool __is_ctl_power_on(struct iris_hfi_device *device);
+
+
+static void __print_sidebandmanager_regs(struct iris_hfi_device *device);
+static void dump_noc_reg(struct iris_hfi_device *device);
+
+static struct cvp_hal_ops hal_ops = {
+	.interrupt_init = interrupt_init_iris2,
+	.setup_dsp_uc_memmap = setup_dsp_uc_memmap_vpu5,
+	.clock_config_on_enable = clock_config_on_enable_vpu5,
+	.power_off = power_off_iris2,
+	.noc_error_info = __noc_error_info_iris2,
+	.reset_control_assert_name = __reset_control_assert_name,
+	.reset_control_deassert_name = __reset_control_deassert_name,
+	.reset_control_acquire_name = __reset_control_acquire,
+	.reset_control_release_name = __reset_control_release,
+};
+
+/**
+ * Utility function to enforce some of our assumptions.  Spam calls to this
+ * in hotspots in code to double check some of the assumptions that we hold.
+ */
+static inline void __strict_check(struct iris_hfi_device *device)
+{
+	msm_cvp_res_handle_fatal_hw_error(device->res,
+		!mutex_is_locked(&device->lock));
+}
+
+static inline void __set_state(struct iris_hfi_device *device,
+		enum iris_hfi_state state)
+{
+	device->state = state;
+}
+
+static inline bool __core_in_valid_state(struct iris_hfi_device *device)
+{
+	return device->state != IRIS_STATE_DEINIT;
+}
+
+static inline bool is_sys_cache_present(struct iris_hfi_device *device)
+{
+	return device->res->sys_cache_present;
+}
+
+static int cvp_synx_recover(void)
+{
+#ifdef CVP_SYNX_ENABLED
+	return synx_recover(SYNX_CLIENT_EVA_CTX0);
+#else
+	return 0;
+#endif	/* End of CVP_SYNX_ENABLED */
+}
+
+#define ROW_SIZE 32
+
+unsigned long long get_aon_time(void)
+{
+        unsigned long long val;
+
+        asm volatile("mrs %0, cntvct_el0" : "=r" (val));
+
+        return val;
+}
+
+int get_hfi_version(void)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hfi;
+
+	core = cvp_driver->cvp_core;
+	hfi = (struct iris_hfi_device *)core->dev_ops->hfi_device_data;
+
+	return hfi->version;
+}
+
+unsigned int get_msg_size(struct cvp_hfi_msg_session_hdr *hdr)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *device;
+	u32 minor_ver;
+
+	core = cvp_driver->cvp_core;
+	if (core)
+		device = core->dev_ops->hfi_device_data;
+	else
+		return 0;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return 0;
+	}
+
+	minor_ver = (device->version & HFI_VERSION_MINOR_MASK) >>
+				HFI_VERSION_MINOR_SHIFT;
+
+	if (minor_ver < 2)
+		return sizeof(struct cvp_hfi_msg_session_hdr);
+
+	if (hdr->packet_type == HFI_MSG_SESSION_CVP_FD)
+		return sizeof(struct cvp_hfi_msg_session_hdr_ext);
+	else
+		return sizeof(struct cvp_hfi_msg_session_hdr);
+
+}
+
+unsigned int get_msg_session_id(void *msg)
+{
+	struct cvp_hfi_msg_session_hdr *hdr =
+		(struct cvp_hfi_msg_session_hdr *)msg;
+
+	return hdr->session_id;
+}
+
+unsigned int get_msg_errorcode(void *msg)
+{
+	struct cvp_hfi_msg_session_hdr *hdr =
+		(struct cvp_hfi_msg_session_hdr *)msg;
+
+	return hdr->error_type;
+}
+
+int get_msg_opconfigs(void *msg, unsigned int *session_id,
+		unsigned int *error_type, unsigned int *config_id)
+{
+	struct cvp_hfi_msg_session_op_cfg_packet *cfg =
+		(struct cvp_hfi_msg_session_op_cfg_packet *)msg;
+
+	*session_id = cfg->session_id;
+	*error_type = cfg->error_type;
+	*config_id = cfg->op_conf_id;
+	return 0;
+}
+
+static void __dump_packet(u8 *packet, enum cvp_msg_prio log_level)
+{
+	u32 c = 0, packet_size = *(u32 *)packet;
+	/*
+	 * row must contain enough for 0xdeadbaad * 8 to be converted into
+	 * "de ad ba ab " * 8 + '\0'
+	 */
+	char row[3 * ROW_SIZE];
+
+	for (c = 0; c * ROW_SIZE < packet_size; ++c) {
+		int bytes_to_read = ((c + 1) * ROW_SIZE > packet_size) ?
+			packet_size % ROW_SIZE : ROW_SIZE;
+		hex_dump_to_buffer(packet + c * ROW_SIZE, bytes_to_read,
+				ROW_SIZE, 4, row, sizeof(row), false);
+		dprintk(log_level, "%s\n", row);
+	}
+}
+
+static int __dsp_suspend(struct iris_hfi_device *device, bool force)
+{
+	int rc;
+
+	if (msm_cvp_dsp_disable)
+		return 0;
+
+	dprintk(CVP_DSP, "%s: suspend dsp\n", __func__);
+	rc = cvp_dsp_suspend(force);
+	if (rc) {
+		if (rc != -EBUSY)
+			dprintk(CVP_ERR,
+				"%s: dsp suspend failed with error %d\n",
+				__func__, rc);
+		return rc;
+	}
+
+	dprintk(CVP_DSP, "%s: dsp suspended\n", __func__);
+	return 0;
+}
+
+static int __dsp_resume(struct iris_hfi_device *device)
+{
+	int rc;
+
+	if (msm_cvp_dsp_disable)
+		return 0;
+
+	dprintk(CVP_DSP, "%s: resume dsp\n", __func__);
+	rc = cvp_dsp_resume();
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: dsp resume failed with error %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	dprintk(CVP_DSP, "%s: dsp resumed\n", __func__);
+	return rc;
+}
+
+static int __dsp_shutdown(struct iris_hfi_device *device)
+{
+	int rc;
+
+	if (msm_cvp_dsp_disable)
+		return 0;
+
+	dprintk(CVP_DSP, "%s: shutdown dsp\n", __func__);
+	rc = cvp_dsp_shutdown();
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: dsp shutdown failed with error %d\n",
+			__func__, rc);
+		WARN_ON(1);
+	}
+
+	dprintk(CVP_DSP, "%s: dsp shutdown successful\n", __func__);
+	return rc;
+}
+
+static int __acquire_regulator(struct regulator_info *rinfo,
+				struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (rinfo->has_hw_power_collapse) {
+		/*Acquire XO_RESET to avoid race condition with video*/
+		rc = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+		if (rc) {
+			dprintk(CVP_ERR,
+				"XO_RESET could not be acquired: skip acquiring the regulator %s from FW\n",
+				rinfo->name);
+			return -EINVAL;
+		}
+
+		rc = regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_NORMAL);
+		if (rc) {
+			/*
+			 * This is somewhat fatal, but nothing we can do
+			 * about it. We can't disable the regulator w/o
+			 * getting it back under s/w control
+			 */
+			dprintk(CVP_WARN,
+				"Failed to acquire regulator control: %s\n",
+					rinfo->name);
+		} else {
+
+			dprintk(CVP_PWR,
+					"Acquire regulator control from HW: %s\n",
+					rinfo->name);
+
+		}
+		/*Release XO_RESET after regulator is enabled.*/
+		call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+	}
+
+	if (!regulator_is_enabled(rinfo->regulator)) {
+		dprintk(CVP_WARN, "Regulator is not enabled %s\n",
+			rinfo->name);
+		msm_cvp_res_handle_fatal_hw_error(device->res, true);
+	}
+
+	return rc;
+}
+
+static int __hand_off_regulator(struct iris_hfi_device *device, struct regulator_info *rinfo)
+{
+	int rc = 0;
+
+	if (rinfo->has_hw_power_collapse) {
+		/*Acquire XO_RESET to avoid race condition with video*/
+		rc = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+		if (rc) {
+			dprintk(CVP_ERR,
+				"XO_RESET could not be acquired: skip hand off the regulator %s to FW\n",
+				rinfo->name);
+			return -EINVAL;
+		}
+
+		rc = regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_FAST);
+
+		/*Release XO_RESET after regulator is enabled.*/
+		call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+
+		if (rc) {
+			dprintk(CVP_WARN,
+				"Failed to hand off regulator control: %s\n",
+					rinfo->name);
+		} else {
+			dprintk(CVP_PWR,
+					"Hand off regulator control to HW: %s\n",
+					rinfo->name);
+		}
+	}
+
+	return rc;
+}
+
+static int __hand_off_regulators(struct iris_hfi_device *device)
+{
+	struct regulator_info *rinfo;
+	int rc = 0, c = 0;
+
+	iris_hfi_for_each_regulator(device, rinfo) {
+		rc = __hand_off_regulator(device, rinfo);
+		/*
+		 * If one regulator hand off failed, driver should take
+		 * the control for other regulators back.
+		 */
+		if (rc)
+			goto err_reg_handoff_failed;
+		c++;
+	}
+
+	return rc;
+err_reg_handoff_failed:
+	iris_hfi_for_each_regulator_reverse_continue(device, rinfo, c)
+		__acquire_regulator(rinfo, device);
+
+	return rc;
+}
+
+static int __take_back_regulators(struct iris_hfi_device *device)
+{
+	struct regulator_info *rinfo;
+	int rc = 0;
+
+	iris_hfi_for_each_regulator(device, rinfo) {
+		rc = __acquire_regulator(rinfo, device);
+		/*
+		 * if one regulator hand off failed, driver should take
+		 * the control for other regulators back.
+		 */
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+
+static int __write_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
+		bool *rx_req_is_set)
+{
+	struct cvp_hfi_queue_header *queue;
+	struct cvp_hfi_cmd_session_hdr *cmd_pkt;
+	u32 packet_size_in_words, new_write_idx;
+	u32 empty_space, read_idx, write_idx;
+	u32 *write_ptr;
+
+	if (!qinfo || !packet) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	} else if (!qinfo->q_array.align_virtual_addr) {
+		dprintk(CVP_WARN, "Queues have already been freed\n");
+		return -EINVAL;
+	}
+
+	queue = (struct cvp_hfi_queue_header *) qinfo->q_hdr;
+	if (!queue) {
+		dprintk(CVP_ERR, "queue not present\n");
+		return -ENOENT;
+	}
+
+	cmd_pkt = (struct cvp_hfi_cmd_session_hdr *)packet;
+
+	if (cmd_pkt->size >= sizeof(struct cvp_hfi_cmd_session_hdr))
+		dprintk(CVP_CMD, "%s: pkt_type %08x sess_id %08x trans_id %u ktid %llu\n",
+			__func__, cmd_pkt->packet_type,
+			cmd_pkt->session_id,
+			cmd_pkt->client_data.transaction_id,
+			cmd_pkt->client_data.kdata & (FENCE_BIT - 1));
+	else if (cmd_pkt->size >= 12)
+		dprintk(CVP_CMD, "%s: pkt_type %08x sess_id %08x\n", __func__,
+			cmd_pkt->packet_type, cmd_pkt->session_id);
+
+	if (msm_cvp_debug & CVP_PKT) {
+		dprintk(CVP_PKT, "%s: %pK\n", __func__, qinfo);
+		__dump_packet(packet, CVP_PKT);
+	}
+
+	packet_size_in_words = (*(u32 *)packet) >> 2;
+	if (!packet_size_in_words || packet_size_in_words >
+		qinfo->q_array.mem_size>>2) {
+		dprintk(CVP_ERR, "Invalid packet size\n");
+		return -ENODATA;
+	}
+
+	spin_lock(&qinfo->hfi_lock);
+	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
+
+	empty_space = (write_idx >= read_idx) ?
+		((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
+		(read_idx - write_idx);
+	if (empty_space <= packet_size_in_words) {
+		queue->qhdr_tx_req =  1;
+		spin_unlock(&qinfo->hfi_lock);
+		dprintk(CVP_ERR, "Insufficient size (%d) to write (%d)\n",
+					  empty_space, packet_size_in_words);
+		return -ENOTEMPTY;
+	}
+
+	queue->qhdr_tx_req =  0;
+
+	new_write_idx = write_idx + packet_size_in_words;
+	write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+		(write_idx << 2));
+	if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+		write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+		qinfo->q_array.mem_size)) {
+		spin_unlock(&qinfo->hfi_lock);
+		dprintk(CVP_ERR, "Invalid write index\n");
+		return -ENODATA;
+	}
+
+	if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
+		memcpy(write_ptr, packet, packet_size_in_words << 2);
+	} else {
+		new_write_idx -= qinfo->q_array.mem_size >> 2;
+		memcpy(write_ptr, packet, (packet_size_in_words -
+			new_write_idx) << 2);
+		memcpy((void *)qinfo->q_array.align_virtual_addr,
+			packet + ((packet_size_in_words - new_write_idx) << 2),
+			new_write_idx  << 2);
+	}
+
+	/*
+	 * Memory barrier to make sure packet is written before updating the
+	 * write index
+	 */
+	mb();
+	queue->qhdr_write_idx = new_write_idx;
+	if (rx_req_is_set)
+		*rx_req_is_set = queue->qhdr_rx_req == 1;
+	/*
+	 * Memory barrier to make sure write index is updated before an
+	 * interrupt is raised.
+	 */
+	mb();
+	spin_unlock(&qinfo->hfi_lock);
+	return 0;
+}
+
+static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
+		u32 *pb_tx_req_is_set)
+{
+	struct cvp_hfi_queue_header *queue;
+	struct cvp_hfi_msg_session_hdr *msg_pkt;
+	u32 packet_size_in_words, new_read_idx;
+	u32 *read_ptr;
+	u32 receive_request = 0;
+	u32 read_idx, write_idx;
+		int rc = 0;
+
+	if (!qinfo || !packet || !pb_tx_req_is_set) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	} else if (!qinfo->q_array.align_virtual_addr) {
+		dprintk(CVP_WARN, "Queues have already been freed\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Memory barrier to make sure data is valid before
+	 *reading it
+	 */
+	mb();
+	queue = (struct cvp_hfi_queue_header *) qinfo->q_hdr;
+
+	if (!queue) {
+		dprintk(CVP_ERR, "Queue memory is not allocated\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Do not set receive request for debug queue, if set,
+	 * Iris generates interrupt for debug messages even
+	 * when there is no response message available.
+	 * In general debug queue will not become full as it
+	 * is being emptied out for every interrupt from Iris.
+	 * Iris will anyway generates interrupt if it is full.
+	 */
+	spin_lock(&qinfo->hfi_lock);
+	if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
+		receive_request = 1;
+
+	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
+
+	if (read_idx == write_idx) {
+		queue->qhdr_rx_req = receive_request;
+		/*
+		 * mb() to ensure qhdr is updated in main memory
+		 * so that iris reads the updated header values
+		 */
+		mb();
+		*pb_tx_req_is_set = 0;
+		if (write_idx != queue->qhdr_write_idx) {
+			queue->qhdr_rx_req = 0;
+		} else {
+			spin_unlock(&qinfo->hfi_lock);
+			dprintk(CVP_HFI,
+				"%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
+				receive_request ? "message" : "debug",
+				queue->qhdr_rx_req, queue->qhdr_tx_req,
+				queue->qhdr_read_idx);
+			return -ENODATA;
+		}
+	}
+
+	read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+				(read_idx << 2));
+	if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+		read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+		qinfo->q_array.mem_size - sizeof(*read_ptr))) {
+		spin_unlock(&qinfo->hfi_lock);
+		dprintk(CVP_ERR, "Invalid read index\n");
+		return -ENODATA;
+	}
+
+	packet_size_in_words = (*read_ptr) >> 2;
+	if (!packet_size_in_words) {
+		spin_unlock(&qinfo->hfi_lock);
+		dprintk(CVP_ERR, "Zero packet size\n");
+		return -ENODATA;
+	}
+
+	new_read_idx = read_idx + packet_size_in_words;
+	if (((packet_size_in_words << 2) <= CVP_IFACEQ_VAR_HUGE_PKT_SIZE)
+			&& read_idx <= (qinfo->q_array.mem_size >> 2)) {
+		if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
+			memcpy(packet, read_ptr,
+					packet_size_in_words << 2);
+		} else {
+			new_read_idx -= (qinfo->q_array.mem_size >> 2);
+			memcpy(packet, read_ptr,
+			(packet_size_in_words - new_read_idx) << 2);
+			memcpy(packet + ((packet_size_in_words -
+					new_read_idx) << 2),
+					(u8 *)qinfo->q_array.align_virtual_addr,
+					new_read_idx << 2);
+		}
+	} else {
+		dprintk(CVP_WARN,
+			"BAD packet received, read_idx: %#x, pkt_size: %d\n",
+			read_idx, packet_size_in_words << 2);
+		dprintk(CVP_WARN, "Dropping this packet\n");
+		new_read_idx = write_idx;
+		rc = -ENODATA;
+	}
+
+	if (new_read_idx != queue->qhdr_write_idx)
+		queue->qhdr_rx_req = 0;
+	else
+		queue->qhdr_rx_req = receive_request;
+	queue->qhdr_read_idx = new_read_idx;
+	/*
+	 * mb() to ensure qhdr is updated in main memory
+	 * so that iris reads the updated header values
+	 */
+	mb();
+
+	*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
+
+	spin_unlock(&qinfo->hfi_lock);
+
+	if (!(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
+		msg_pkt = (struct cvp_hfi_msg_session_hdr *)packet;
+		dprintk(CVP_CMD, "%s:  "
+			"pkt_type %08x sess_id %08x trans_id %u ktid %llu\n",
+			__func__, msg_pkt->packet_type,
+			msg_pkt->session_id,
+			msg_pkt->client_data.transaction_id,
+			msg_pkt->client_data.kdata & (FENCE_BIT - 1));
+	}
+
+	if ((msm_cvp_debug & CVP_PKT) &&
+		!(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
+		dprintk(CVP_PKT, "%s: %pK\n", __func__, qinfo);
+		__dump_packet(packet, CVP_PKT);
+	}
+
+	return rc;
+}
+
+static int __smem_alloc(struct iris_hfi_device *dev, struct cvp_mem_addr *mem,
+			u32 size, u32 align, u32 flags)
+{
+	struct msm_cvp_smem *alloc = &mem->mem_data;
+	int rc = 0;
+
+	if (!dev || !mem || !size) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	dprintk(CVP_INFO, "start to alloc size: %d, flags: %d\n", size, flags);
+	alloc->flags = flags;
+	rc = msm_cvp_smem_alloc(size, align, 1, (void *)dev->res, alloc);
+	if (rc) {
+		dprintk(CVP_ERR, "Alloc failed\n");
+		rc = -ENOMEM;
+		goto fail_smem_alloc;
+	}
+
+	dprintk(CVP_MEM, "%s: ptr = %pK, size = %d\n", __func__,
+			alloc->kvaddr, size);
+
+	mem->mem_size = alloc->size;
+	mem->align_virtual_addr = alloc->kvaddr;
+	mem->align_device_addr = alloc->device_addr;
+	alloc->pkt_type = 0;
+	alloc->buf_idx = 0;
+
+	return rc;
+fail_smem_alloc:
+	return rc;
+}
+
+static void __smem_free(struct iris_hfi_device *dev, struct msm_cvp_smem *mem)
+{
+	if (!dev || !mem) {
+		dprintk(CVP_ERR, "invalid param %pK %pK\n", dev, mem);
+		return;
+	}
+
+	msm_cvp_smem_free(mem);
+}
+
+static void __write_register(struct iris_hfi_device *device,
+		u32 reg, u32 value)
+{
+	u32 hwiosymaddr = reg;
+	u8 *base_addr;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return;
+	}
+
+	__strict_check(device);
+
+	if (!device->power_enabled) {
+		dprintk(CVP_WARN,
+			"HFI Write register failed : Power is OFF\n");
+		msm_cvp_res_handle_fatal_hw_error(device->res, true);
+		return;
+	}
+
+	base_addr = device->cvp_hal_data->register_base;
+	dprintk(CVP_REG, "Base addr: %pK, written to: %#x, Value: %#x...\n",
+		base_addr, hwiosymaddr, value);
+	base_addr += hwiosymaddr;
+	writel_relaxed(value, base_addr);
+
+	/*
+	 * Memory barrier to make sure value is written into the register.
+	 */
+	wmb();
+}
+
+static int __read_gcc_register(struct iris_hfi_device *device, u32 reg)
+{
+	int rc = 0;
+	u8 *base_addr;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!device->power_enabled) {
+		dprintk(CVP_WARN,
+			"%s HFI Read register failed : Power is OFF\n",
+			__func__);
+		msm_cvp_res_handle_fatal_hw_error(device->res, true);
+		return -EINVAL;
+	}
+
+	base_addr = device->cvp_hal_data->gcc_reg_base;
+
+	rc = readl_relaxed(base_addr + reg);
+	/*
+	 * Memory barrier to make sure value is read correctly from the
+	 * register.
+	 */
+	rmb();
+	dprintk(CVP_REG,
+		"GCC Base addr: %pK, read from: %#x, value: %#x...\n",
+		base_addr, reg, rc);
+
+	return rc;
+}
+
+
+static int __read_register(struct iris_hfi_device *device, u32 reg)
+{
+	int rc = 0;
+	u8 *base_addr;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!device->power_enabled) {
+		dprintk(CVP_WARN,
+			"HFI Read register failed : Power is OFF\n");
+		msm_cvp_res_handle_fatal_hw_error(device->res, true);
+		return -EINVAL;
+	}
+
+	base_addr = device->cvp_hal_data->register_base;
+
+	rc = readl_relaxed(base_addr + reg);
+	/*
+	 * Memory barrier to make sure value is read correctly from the
+	 * register.
+	 */
+	rmb();
+	dprintk(CVP_REG, "Base addr: %pK, read from: %#x, value: %#x...\n",
+		base_addr, reg, rc);
+
+	return rc;
+}
+
+static bool __is_ctl_power_on(struct iris_hfi_device *device)
+{
+	u32 reg;
+
+	reg = __read_register(device, CVP_CC_MVS1C_GDSCR);
+	if (!(reg & 0x80000000))
+		return false;
+
+	reg = __read_register(device, CVP_CC_MVS1C_CBCR);
+	if (reg & 0x80000000)
+		return false;
+
+	return true;
+}
+
+static int __set_registers(struct iris_hfi_device *device)
+{
+	struct msm_cvp_core *core;
+	struct msm_cvp_platform_data *pdata;
+	struct reg_set *reg_set;
+	int i;
+
+	if (!device->res) {
+		dprintk(CVP_ERR,
+			"device resources null, cannot set registers\n");
+		return -EINVAL ;
+	}
+
+	core = cvp_driver->cvp_core;
+	pdata = core->platform_data;
+
+	reg_set = &device->res->reg_set;
+	for (i = 0; i < reg_set->count; i++) {
+		__write_register(device, reg_set->reg_tbl[i].reg,
+				reg_set->reg_tbl[i].value);
+		dprintk(CVP_REG, "write_reg offset=%x, val=%x\n",
+					reg_set->reg_tbl[i].reg,
+					reg_set->reg_tbl[i].value);
+	}
+
+	i = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+	if (i) {
+		dprintk(CVP_WARN, "%s Fail acquire xo_reset\n", __func__);
+		return -EINVAL;
+	}
+
+	__write_register(device, CVP_CPU_CS_AXI4_QOS,
+				pdata->noc_qos->axi_qos);
+	__write_register(device,
+			CVP_NOC_RGE_PRIORITYLUT_LOW +
+			device->res->qos_noc_rge_niu_offset,
+			pdata->noc_qos->prioritylut_low);
+	__write_register(device,
+			CVP_NOC_RGE_PRIORITYLUT_HIGH +
+			device->res->qos_noc_rge_niu_offset,
+			pdata->noc_qos->prioritylut_high);
+	__write_register(device,
+			CVP_NOC_RGE_URGENCY_LOW +
+			device->res->qos_noc_rge_niu_offset,
+			pdata->noc_qos->urgency_low);
+	__write_register(device,
+			CVP_NOC_RGE_DANGERLUT_LOW +
+			device->res->qos_noc_rge_niu_offset,
+			pdata->noc_qos->dangerlut_low);
+	__write_register(device,
+			CVP_NOC_RGE_SAFELUT_LOW +
+			device->res->qos_noc_rge_niu_offset,
+			pdata->noc_qos->safelut_low);
+	__write_register(device,
+			CVP_NOC_GCE_PRIORITYLUT_LOW +
+			device->res->qos_noc_gce_vadl_tof_niu_offset,
+			pdata->noc_qos->prioritylut_low);
+	__write_register(device,
+			CVP_NOC_GCE_PRIORITYLUT_HIGH +
+			device->res->qos_noc_gce_vadl_tof_niu_offset,
+			pdata->noc_qos->prioritylut_high);
+	__write_register(device,
+			CVP_NOC_GCE_URGENCY_LOW +
+			device->res->qos_noc_gce_vadl_tof_niu_offset,
+			pdata->noc_qos->urgency_low);
+	__write_register(device,
+			CVP_NOC_GCE_DANGERLUT_LOW +
+			device->res->qos_noc_gce_vadl_tof_niu_offset,
+			pdata->noc_qos->dangerlut_low);
+	__write_register(device,
+			CVP_NOC_GCE_SAFELUT_LOW +
+			device->res->qos_noc_gce_vadl_tof_niu_offset,
+			pdata->noc_qos->safelut_low);
+
+	__write_register(device,
+			CVP_NOC_CDM_PRIORITYLUT_LOW +
+			device->res->qos_noc_cdm_niu_offset,
+			pdata->noc_qos->prioritylut_low);
+	__write_register(device,
+			CVP_NOC_CDM_PRIORITYLUT_HIGH +
+			device->res->qos_noc_cdm_niu_offset,
+			pdata->noc_qos->prioritylut_high);
+	__write_register(device,
+			CVP_NOC_CDM_URGENCY_LOW +
+			device->res->qos_noc_cdm_niu_offset,
+			pdata->noc_qos->urgency_low_ro);
+	__write_register(device,
+			CVP_NOC_CDM_DANGERLUT_LOW +
+			device->res->qos_noc_cdm_niu_offset,
+			pdata->noc_qos->dangerlut_low);
+	__write_register(device,
+			CVP_NOC_CDM_SAFELUT_LOW +
+			device->res->qos_noc_cdm_niu_offset,
+			pdata->noc_qos->safelut_low);
+
+	/* Below registers write moved from FW to SW to enable UBWC */
+	__write_register(device,
+			CVP_NOC_RGE_NIU_DECCTL_LOW +
+			device->res->qos_noc_rge_niu_offset,
+			0x1);
+	__write_register(device,
+			CVP_NOC_RGE_NIU_ENCCTL_LOW +
+			device->res->qos_noc_rge_niu_offset,
+			0x1);
+	__write_register(device,
+			CVP_NOC_GCE_VADL_TOF_NIU_DECCTL_LOW +
+			device->res->qos_noc_gce_vadl_tof_niu_offset,
+			0x1);
+	__write_register(device,
+			CVP_NOC_GCE_VADL_TOF_NIU_ENCCTL_LOW +
+			device->res->qos_noc_gce_vadl_tof_niu_offset,
+			0x1);
+
+	__write_register(device,
+			CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS +
+			device->res->noc_core_err_offset,
+			0x3);
+	__write_register(device,
+			CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW +
+			device->res->noc_main_sidebandmanager_offset,
+			0x1);
+
+	call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+	return 0;
+}
+
+/*
+ * The existence of this function is a hack for 8996 (or certain Iris versions)
+ * to overcome a hardware bug.  Whenever the GDSCs momentarily power collapse
+ * (after calling __hand_off_regulators()), the values of the threshold
+ * registers (typically programmed by TZ) are incorrectly reset.  As a result
+ * reprogram these registers at certain agreed upon points.
+ */
+static void __set_threshold_registers(struct iris_hfi_device *device)
+{
+	u32 version = __read_register(device, CVP_WRAPPER_HW_VERSION);
+
+	version &= ~GENMASK(15, 0);
+	if (version != (0x3 << 28 | 0x43 << 16))
+		return;
+
+	if (__tzbsp_set_cvp_state(TZ_SUBSYS_STATE_RESTORE_THRESHOLD))
+		dprintk(CVP_ERR, "Failed to restore threshold values\n");
+}
+
+static int __unvote_buses(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct bus_info *bus = NULL;
+
+	kfree(device->bus_vote.data);
+	device->bus_vote.data = NULL;
+	device->bus_vote.data_count = 0;
+
+	iris_hfi_for_each_bus(device, bus) {
+		rc = cvp_set_bw(bus, 0);
+		if (rc) {
+			dprintk(CVP_ERR,
+			"%s: Failed unvoting bus\n", __func__);
+			goto err_unknown_device;
+		}
+	}
+
+err_unknown_device:
+	return rc;
+}
+
+static int __vote_buses(struct iris_hfi_device *device,
+		struct cvp_bus_vote_data *data, int num_data)
+{
+	int rc = 0;
+	struct bus_info *bus = NULL;
+	struct cvp_bus_vote_data *new_data = NULL;
+
+	if (!num_data) {
+		dprintk(CVP_PWR, "No vote data available\n");
+		goto no_data_count;
+	} else if (!data) {
+		dprintk(CVP_ERR, "Invalid voting data\n");
+		return -EINVAL;
+	}
+
+	new_data = kmemdup(data, num_data * sizeof(*new_data), GFP_KERNEL);
+	if (!new_data) {
+		dprintk(CVP_ERR, "Can't alloc memory to cache bus votes\n");
+		rc = -ENOMEM;
+		goto err_no_mem;
+	}
+
+no_data_count:
+	kfree(device->bus_vote.data);
+	device->bus_vote.data = new_data;
+	device->bus_vote.data_count = num_data;
+
+	iris_hfi_for_each_bus(device, bus) {
+		if (bus) {
+			rc = cvp_set_bw(bus, bus->range[1]);
+			if (rc)
+				dprintk(CVP_ERR,
+				"Failed voting bus %s to ab %u\n",
+				bus->name, bus->range[1]*1000);
+		}
+	}
+
+err_no_mem:
+	return rc;
+}
+
+static int iris_hfi_vote_buses(void *dev, struct bus_info *bus, unsigned long bw)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = dev;
+
+	if (!device)
+		return -EINVAL;
+
+	mutex_lock(&device->lock);
+	rc = cvp_set_bw(bus, bw);
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int __core_set_resource(struct iris_hfi_device *device,
+		struct cvp_resource_hdr *resource_hdr, void *resource_value)
+{
+	struct cvp_hfi_cmd_sys_set_resource_packet *pkt;
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+
+	if (!device || !resource_hdr || !resource_value) {
+		dprintk(CVP_ERR, "set_res: Invalid Params\n");
+		return -EINVAL;
+	}
+
+	pkt = (struct cvp_hfi_cmd_sys_set_resource_packet *) packet;
+
+	rc = call_hfi_pkt_op(device, sys_set_resource,
+			pkt, resource_hdr, resource_value);
+	if (rc) {
+		dprintk(CVP_ERR, "set_res: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	rc = __iface_cmdq_write(device, pkt);
+	if (rc)
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+static int __core_release_resource(struct iris_hfi_device *device,
+		struct cvp_resource_hdr *resource_hdr)
+{
+	struct cvp_hfi_cmd_sys_release_resource_packet *pkt;
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+
+	if (!device || !resource_hdr) {
+		dprintk(CVP_ERR, "release_res: Invalid Params\n");
+		return -EINVAL;
+	}
+
+	pkt = (struct cvp_hfi_cmd_sys_release_resource_packet *) packet;
+
+	rc = call_hfi_pkt_op(device, sys_release_resource,
+			pkt, resource_hdr);
+
+	if (rc) {
+		dprintk(CVP_ERR, "release_res: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	rc = __iface_cmdq_write(device, pkt);
+	if (rc)
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+static int __tzbsp_set_cvp_state(enum tzbsp_subsys_state state)
+{
+	int rc = 0;
+
+	rc = qcom_scm_set_remote_state(state, TZBSP_CVP_PAS_ID);
+	dprintk(CVP_CORE, "Set state %d, resp %d\n", state, rc);
+
+	if (rc) {
+		dprintk(CVP_ERR, "Failed qcom_scm_set_remote_state %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/*
+ * Based on fal10_veto, X2RPMh, core_pwr_on and PWAitMode value, infer
+ * value of xtss_sw_reset. xtss_sw_reset is a TZ register bit. Driver
+ * cannot access it directly.
+ *
+ * In __boot_firmware() function, the caller of this function. It checks
+ * "core_pwr_on" == false, basically core powered off. So this function
+ * doesn't check core_pwr_on. Assume core_pwr_on = false.
+ *
+ * fal10_veto = VPU_CPU_CS_X2RPMh[2] |
+ *		( ~VPU_CPU_CS_X2RPMh[1] & core_pwr_on ) |
+ *		( ~VPU_CPU_CS_X2RPMh[0] & ~( xtss_sw_reset | PWaitMode ) ) ;
+ */
+static inline void check_tensilica_in_reset(struct iris_hfi_device *device)
+{
+	u32 X2RPMh, fal10_veto, wait_mode;
+
+	X2RPMh =  __read_register(device, CVP_CPU_CS_X2RPMh);
+	X2RPMh = X2RPMh & 0x7;
+
+	/* wait_mode = 1: Tensilica is in WFI mode (PWaitMode = true) */
+	wait_mode = __read_register(device, CVP_WRAPPER_CPU_STATUS);
+	wait_mode = wait_mode & 0x1;
+
+	fal10_veto = __read_register(device, CVP_CPU_CS_X2RPMh_STATUS);
+	fal10_veto = fal10_veto & 0x1;
+
+	dprintk(CVP_WARN, "tensilica reset check %#x %#x %#x\n",
+		X2RPMh, wait_mode, fal10_veto);
+}
+
+static const char boot_states[0x40][32] = {
+	"NOT INIT",
+	"RST_START",
+	"INIT_MEMCTL",
+	"INTENABLE_RST",
+	"LITBASE_RST",
+	"PREFETCH_EN",
+	"MPU_INIT",
+	"CTRL_INIT_READ",
+	"MEMCTL_L1_FIX",
+	"RESTORE_EXTRA_NW",
+	"CORE_RESTORE",
+	"COLD_BOOT",
+	"DISABLE_CACHE",
+	"BEFORE_MPU_C",
+	"RET_MPU_C",
+	"IN_MPU_C",
+	"IN_MPU_DEFAULT",
+	"IN_MPU_SYNX",
+	"UCR_SIZE_FAIL",
+	"UCR_ADDR_FAIL",
+	"UCR1_SIZE_FAIL",
+	"UCR1_ADDR_FAIL",
+	"UCR_OVERLAPPED_UCR1",
+	"UCR1_OVERLAPPED_UCR",
+	"UCR_EQ_UCR1",
+	"MPU_CHECK_DONE",
+	"BEFORE_INT_LOCK",
+	"AFTER_INT_LOCK",
+	"BEFORE_INT_UNLOCK",
+	"AFTER_INT_UNLOCK",
+	"CALL_START",
+	"MAIN_ENTRY",
+	"VENUS_INIT_ENTRY",
+	"VSYS_INIT_ENTRY",
+	"BEFORE_XOS_CLK",
+	"AFTER_XOS_CLK",
+	"LOG_MUTEX_INIT",
+	"CREATE_FRAMEWORK_ENTRY",
+	"DTG_INIT",
+	"IDLE_TASK_INIT",
+	"VENUS_CORE_INIT",
+	"HW_CORES_INIT",
+	"RST_THREAD_INIT",
+	"HOST_THREAD_INIT",
+	"ALL_THREADS_INIT",
+	"TASK_MEMPOOL",
+	"SESSION_MUTEX",
+	"SIGNALS_INIT",
+	"RST_SIGNAL_INIT",
+	"INTR_EN_HOST",
+	"INTR_REG_HOST",
+	"INTR_EN_DSP",
+	"INTR_REG_DSP",
+	"X2HSOFTINTEN",
+	"H2XSOFTINTEN",
+	"CPU2DSPINTEN",
+	"DSP2CPUINT_SWRESET",
+	"THREADS_START",
+	"RST_THREAD_START",
+	"HST_THREAD_START",
+	"HST_THREAD_ENTRY"
+};
+
+static inline int __boot_firmware(struct iris_hfi_device *device)
+{
+	int rc = 0, loop = 10;
+	u32 ctrl_init_val = 0, ctrl_status = 0, count = 0, max_tries = 5000;
+	u32 reg_gdsc;
+
+	/*
+	 * Hand off control of regulators to h/w _after_ enabling clocks.
+	 * Note that the GDSC will turn off when switching from normal
+	 * (s/w triggered) to fast (HW triggered) unless the h/w vote is
+	 * present. Since Iris isn't up yet, the GDSC will be off briefly.
+	 */
+	if (__enable_hw_power_collapse(device))
+		dprintk(CVP_ERR, "Failed to enabled inter-frame PC\n");
+
+	if (!msm_cvp_fw_low_power_mode)
+		goto skip_core_power_check;
+
+	while (loop) {
+		reg_gdsc = __read_register(device, CVP_CC_MVS1_GDSCR);
+		if (reg_gdsc & 0x80000000) {
+			usleep_range(100, 200);
+			loop--;
+		} else {
+			break;
+		}
+	}
+
+	if (!loop)
+		dprintk(CVP_ERR, "fail to power off CORE during resume\n");
+
+skip_core_power_check:
+	ctrl_init_val = BIT(0);
+	/* RUMI: CVP_CTRL_INIT in MPTest has bit 0 and 3 set */
+	__write_register(device, CVP_CTRL_INIT, ctrl_init_val);
+	while (!(ctrl_status & CVP_CTRL_INIT_STATUS__M) && count < max_tries) {
+		ctrl_status = __read_register(device, CVP_CTRL_STATUS);
+		if ((ctrl_status & CVP_CTRL_ERROR_STATUS__M) == 0x4) {
+			dprintk(CVP_ERR, "invalid setting for UC_REGION\n");
+			rc = -ENODATA;
+			break;
+		}
+
+		/* Reduce to 50, 100 on silicon */
+		usleep_range(50, 100);
+		count++;
+	}
+
+	if (!(ctrl_status & CVP_CTRL_INIT_STATUS__M)) {
+		ctrl_init_val = __read_register(device, CVP_CTRL_INIT);
+		dprintk(CVP_ERR,
+			"Failed to boot FW status: %x %x %s\n",
+			ctrl_status, ctrl_init_val,
+			boot_states[(ctrl_status >> 9) & 0x3f]);
+		check_tensilica_in_reset(device);
+		rc = -ENODEV;
+	}
+
+	/* Enable interrupt before sending commands to tensilica */
+	__write_register(device, CVP_CPU_CS_H2XSOFTINTEN, 0x1);
+	__write_register(device, CVP_CPU_CS_X2RPMh, 0x0);
+
+	return rc;
+}
+
+static int iris_hfi_resume(void *dev)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = (struct iris_hfi_device *) dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_CORE, "Resuming Iris\n");
+
+	mutex_lock(&device->lock);
+	rc = __resume(device);
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int iris_hfi_suspend(void *dev)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = (struct iris_hfi_device *) dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	} else if (!device->res->sw_power_collapsible) {
+		return -ENOTSUPP;
+	}
+
+	dprintk(CVP_CORE, "Suspending Iris\n");
+	mutex_lock(&device->lock);
+	rc = __power_collapse(device, true);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: Iris is busy\n", __func__);
+		rc = -EBUSY;
+	}
+	mutex_unlock(&device->lock);
+
+	/* Cancel pending delayed works if any */
+	if (!rc)
+		cancel_delayed_work(&iris_hfi_pm_work);
+
+	return rc;
+}
+
+void cvp_dump_csr(struct iris_hfi_device *dev)
+{
+	u32 reg;
+
+	if (!dev)
+		return;
+	if (!dev->power_enabled || dev->reg_dumped)
+		return;
+	reg = __read_register(dev, CVP_WRAPPER_CPU_STATUS);
+	dprintk(CVP_ERR, "CVP_WRAPPER_CPU_STATUS: %x\n", reg);
+	reg = __read_register(dev, CVP_CPU_CS_SCIACMDARG0);
+	dprintk(CVP_ERR, "CVP_CPU_CS_SCIACMDARG0: %x\n", reg);
+	//reg = __read_register(dev, CVP_WRAPPER_INTR_STATUS);
+	//dprintk(CVP_ERR, "CVP_WRAPPER_INTR_STATUS: %x\n", reg);
+	//reg = __read_register(dev, CVP_CPU_CS_H2ASOFTINT);
+	//dprintk(CVP_ERR, "CVP_CPU_CS_H2ASOFTINT: %x\n", reg);
+	reg = __read_register(dev, CVP_CPU_CS_A2HSOFTINT);
+	dprintk(CVP_ERR, "CVP_CPU_CS_A2HSOFTINT: %x\n", reg);
+	reg = __read_register(dev, CVP_CC_MVS1C_GDSCR);
+	dprintk(CVP_ERR, "CVP_CC_MVS1C_GDSCR: %x\n", reg);
+	reg = __read_register(dev, CVP_CC_MVS1C_CBCR);
+	dprintk(CVP_ERR, "CVP_CC_MVS1C_CBCR: %x\n", reg);
+	reg = __read_register(dev, CVP_WRAPPER_CPU_CLOCK_CONFIG);
+	dprintk(CVP_ERR, "CVP_WRAPPER_CPU_CLOCK_CONFIG: %x\n", reg);
+	reg = __read_register(dev, CVP_WRAPPER_CORE_CLOCK_CONFIG);
+	dprintk(CVP_ERR, "CVP_WRAPPER_CORE_CLOCK_CONFIG: %x\n", reg);
+
+	dump_noc_reg(dev);
+
+	dev->reg_dumped = true;
+}
+
+static int iris_hfi_flush_debug_queue(void *dev)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = (struct iris_hfi_device *) dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	if (!device->power_enabled) {
+		dprintk(CVP_WARN, "%s: iris power off\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+	cvp_dump_csr(device);
+	__flush_debug_queue(device, NULL);
+exit:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int iris_hfi_scale_clocks(void *dev, u32 freq)
+{
+	int rc = 0;
+	struct iris_hfi_device *device = dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid args: %pK\n", device);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	if (__resume(device)) {
+		dprintk(CVP_ERR, "Resume from power collapse failed\n");
+		rc = -ENODEV;
+		goto exit;
+	}
+
+	rc = msm_cvp_set_clocks_impl(device, freq);
+exit:
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+/* Writes into cmdq without raising an interrupt */
+static int __iface_cmdq_write_relaxed(struct iris_hfi_device *device,
+		void *pkt, bool *requires_interrupt)
+{
+	struct cvp_iface_q_info *q_info;
+	struct cvp_hal_cmd_pkt_hdr *cmd_packet;
+	int result = -E2BIG;
+
+	if (!device || !pkt) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!__core_in_valid_state(device)) {
+		dprintk(CVP_ERR, "%s - fw not in init state\n", __func__);
+		result = -EINVAL;
+		goto err_q_null;
+	}
+
+	cmd_packet = (struct cvp_hal_cmd_pkt_hdr *)pkt;
+	device->last_packet_type = cmd_packet->packet_type;
+
+	q_info = &device->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	if (!q_info) {
+		dprintk(CVP_ERR, "cannot write to shared Q's\n");
+		goto err_q_null;
+	}
+
+	if (!q_info->q_array.align_virtual_addr) {
+		dprintk(CVP_ERR, "cannot write to shared CMD Q's\n");
+		result = -ENODATA;
+		goto err_q_null;
+	}
+
+	if (__resume(device)) {
+		dprintk(CVP_ERR, "%s: Power on failed\n", __func__);
+		goto err_q_write;
+	}
+
+	if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) {
+		if (device->res->sw_power_collapsible) {
+			cancel_delayed_work(&iris_hfi_pm_work);
+			if (!queue_delayed_work(device->iris_pm_workq,
+				&iris_hfi_pm_work,
+				msecs_to_jiffies(
+				device->res->msm_cvp_pwr_collapse_delay))) {
+				dprintk(CVP_PWR,
+				"PM work already scheduled\n");
+			}
+		}
+
+		result = 0;
+	} else {
+		dprintk(CVP_ERR, "__iface_cmdq_write: queue full\n");
+	}
+
+err_q_write:
+err_q_null:
+	return result;
+}
+
+static int __iface_cmdq_write(struct iris_hfi_device *device, void *pkt)
+{
+	bool needs_interrupt = false;
+	int rc = __iface_cmdq_write_relaxed(device, pkt, &needs_interrupt);
+	int i = 0;
+
+	if (!rc && needs_interrupt) {
+		/* Consumer of cmdq prefers that we raise an interrupt */
+		rc = 0;
+		if (!__is_ctl_power_on(device))
+			dprintk(CVP_ERR, "%s power off, don't access reg\n", __func__);
+		i = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+		if (i) {
+			dprintk(CVP_WARN, "%s Fail acquire xo_reset at %d\n", __func__, __LINE__);
+			return -EINVAL;
+		}
+		__write_register(device, CVP_CPU_CS_H2ASOFTINT, 1);
+		call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+	}
+
+	return rc;
+}
+
+static int __iface_msgq_read(struct iris_hfi_device *device, void *pkt)
+{
+	u32 tx_req_is_set = 0;
+	int rc = 0;
+	struct cvp_iface_q_info *q_info;
+	int i = 0;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	if (!__core_in_valid_state(device)) {
+		dprintk(CVP_WARN, "%s - fw not in init state\n", __func__);
+		rc = -EINVAL;
+		goto read_error_null;
+	}
+
+	q_info = &device->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	if (q_info->q_array.align_virtual_addr == NULL) {
+		dprintk(CVP_ERR, "cannot read from shared MSG Q's\n");
+		rc = -ENODATA;
+		goto read_error_null;
+	}
+
+	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+		if (tx_req_is_set) {
+			if (!__is_ctl_power_on(device))
+				dprintk(CVP_ERR, "%s power off, don't access reg\n", __func__);
+			i = call_iris_op(device, reset_control_acquire_name, device,
+					"cvp_xo_reset");
+			if (i) {
+				dprintk(CVP_WARN, "%s Fail acquire xo_reset at %d\n",
+						__func__, __LINE__);
+				return -EINVAL;
+			}
+			__write_register(device, CVP_CPU_CS_H2ASOFTINT, 1);
+			call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+		}
+		rc = 0;
+	} else
+		rc = -ENODATA;
+
+read_error_null:
+	return rc;
+}
+
+static int __iface_dbgq_read(struct iris_hfi_device *device, void *pkt)
+{
+	u32 tx_req_is_set = 0;
+	int rc = 0;
+	struct cvp_iface_q_info *q_info;
+	int i = 0;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	__strict_check(device);
+
+	q_info = &device->iface_queues[CVP_IFACEQ_DBGQ_IDX];
+	if (q_info->q_array.align_virtual_addr == NULL) {
+		dprintk(CVP_ERR, "cannot read from shared DBG Q's\n");
+		rc = -ENODATA;
+		goto dbg_error_null;
+	}
+
+	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+		if (tx_req_is_set) {
+			if (!__is_ctl_power_on(device))
+				dprintk(CVP_ERR, "%s power off, don't access reg\n", __func__);
+			i = call_iris_op(device, reset_control_acquire_name, device,
+					"cvp_xo_reset");
+			if (i) {
+				dprintk(CVP_WARN, "%s Fail acquire xo_reset at %d\n",
+						__func__, __LINE__);
+				return -EINVAL;
+			}
+			__write_register(device, CVP_CPU_CS_H2ASOFTINT, 1);
+			call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+		}
+		rc = 0;
+	} else
+		rc = -ENODATA;
+
+dbg_error_null:
+	return rc;
+}
+
+static void __set_queue_hdr_defaults(struct cvp_hfi_queue_header *q_hdr)
+{
+	q_hdr->qhdr_status = 0x1;
+	q_hdr->qhdr_type = CVP_IFACEQ_DFLT_QHDR;
+	q_hdr->qhdr_q_size = CVP_IFACEQ_QUEUE_SIZE / 4;
+	q_hdr->qhdr_pkt_size = 0;
+	q_hdr->qhdr_rx_wm = 0x1;
+	q_hdr->qhdr_tx_wm = 0x1;
+	q_hdr->qhdr_rx_req = 0x1;
+	q_hdr->qhdr_tx_req = 0x0;
+	q_hdr->qhdr_rx_irq_status = 0x0;
+	q_hdr->qhdr_tx_irq_status = 0x0;
+	q_hdr->qhdr_read_idx = 0x0;
+	q_hdr->qhdr_write_idx = 0x0;
+}
+
+/*
+ *Unused, keep for reference
+ */
+/*
+static void __interface_dsp_queues_release(struct iris_hfi_device *device)
+{
+	int i;
+	struct msm_cvp_smem *mem_data = &device->dsp_iface_q_table.mem_data;
+	struct context_bank_info *cb = mem_data->mapping_info.cb_info;
+
+	if (!device->dsp_iface_q_table.align_virtual_addr) {
+		dprintk(CVP_ERR, "%s: already released\n", __func__);
+		return;
+	}
+
+	dma_unmap_single_attrs(cb->dev, mem_data->device_addr,
+		mem_data->size, DMA_BIDIRECTIONAL, 0);
+	dma_free_coherent(device->res->mem_cdsp.dev, mem_data->size,
+		mem_data->kvaddr, mem_data->dma_handle);
+
+	for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+		device->dsp_iface_queues[i].q_hdr = NULL;
+		device->dsp_iface_queues[i].q_array.align_virtual_addr = NULL;
+		device->dsp_iface_queues[i].q_array.align_device_addr = 0;
+	}
+	device->dsp_iface_q_table.align_virtual_addr = NULL;
+	device->dsp_iface_q_table.align_device_addr = 0;
+}
+*/
+
+static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
+{
+	int rc = 0;
+	u32 i;
+	struct cvp_iface_q_info *iface_q;
+	int offset = 0;
+	phys_addr_t fw_bias = 0;
+	size_t q_size;
+	struct msm_cvp_smem *mem_data;
+	void *kvaddr;
+	dma_addr_t dma_handle;
+	dma_addr_t iova;
+	struct context_bank_info *cb;
+
+	q_size = ALIGN(QUEUE_SIZE, SZ_1M);
+	mem_data = &dev->dsp_iface_q_table.mem_data;
+
+	if (mem_data->kvaddr) {
+		memset((void *)mem_data->kvaddr, 0, q_size);
+		cvp_dsp_init_hfi_queue_hdr(dev);
+		return 0;
+	}
+	/* Allocate dsp queues from CDSP device memory */
+	kvaddr = dma_alloc_coherent(dev->res->mem_cdsp.dev, q_size,
+				&dma_handle, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(kvaddr)) {
+		dprintk(CVP_ERR, "%s: failed dma allocation\n", __func__);
+		goto fail_dma_alloc;
+	}
+	cb = msm_cvp_smem_get_context_bank(dev->res, SMEM_CDSP);
+	if (!cb) {
+		dprintk(CVP_ERR,
+			"%s: failed to get DSP context bank\n", __func__);
+		goto fail_dma_map;
+	}
+	iova = dma_map_single_attrs(cb->dev, phys_to_virt(dma_handle),
+				q_size, DMA_BIDIRECTIONAL, 0);
+	if (dma_mapping_error(cb->dev, iova)) {
+		dprintk(CVP_ERR, "%s: failed dma mapping\n", __func__);
+		goto fail_dma_map;
+	}
+	dprintk(CVP_DSP,
+		"%s: kvaddr %pK dma_handle %#llx iova %#llx size %zd\n",
+		__func__, kvaddr, dma_handle, iova, q_size);
+
+	memset(mem_data, 0, sizeof(struct msm_cvp_smem));
+	mem_data->kvaddr = kvaddr;
+	mem_data->device_addr = iova;
+	mem_data->dma_handle = dma_handle;
+	mem_data->size = q_size;
+	mem_data->mapping_info.cb_info = cb;
+
+	if (!is_iommu_present(dev->res))
+		fw_bias = dev->cvp_hal_data->firmware_base;
+
+	dev->dsp_iface_q_table.align_virtual_addr = kvaddr;
+	dev->dsp_iface_q_table.align_device_addr = iova - fw_bias;
+	dev->dsp_iface_q_table.mem_size = CVP_IFACEQ_TABLE_SIZE;
+	offset = dev->dsp_iface_q_table.mem_size;
+
+	for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+		iface_q = &dev->dsp_iface_queues[i];
+		iface_q->q_array.align_device_addr = iova + offset - fw_bias;
+		iface_q->q_array.align_virtual_addr = kvaddr + offset;
+		iface_q->q_array.mem_size = CVP_IFACEQ_QUEUE_SIZE;
+		offset += iface_q->q_array.mem_size;
+		spin_lock_init(&iface_q->hfi_lock);
+	}
+
+	cvp_dsp_init_hfi_queue_hdr(dev);
+
+	return rc;
+
+fail_dma_map:
+	dma_free_coherent(dev->res->mem_cdsp.dev, q_size, kvaddr, dma_handle);
+fail_dma_alloc:
+	return -ENOMEM;
+}
+
+static void __interface_queues_release(struct iris_hfi_device *device)
+{
+#ifdef CONFIG_EVA_TVM
+	int i;
+	struct cvp_hfi_mem_map_table *qdss;
+	struct cvp_hfi_mem_map *mem_map;
+	int num_entries = device->res->qdss_addr_set.count;
+	unsigned long mem_map_table_base_addr;
+	struct context_bank_info *cb;
+
+	if (device->qdss.align_virtual_addr) {
+		qdss = (struct cvp_hfi_mem_map_table *)
+			device->qdss.align_virtual_addr;
+		qdss->mem_map_num_entries = num_entries;
+		mem_map_table_base_addr =
+			device->qdss.align_device_addr +
+			sizeof(struct cvp_hfi_mem_map_table);
+		qdss->mem_map_table_base_addr =
+			(u32)mem_map_table_base_addr;
+		if ((unsigned long)qdss->mem_map_table_base_addr !=
+			mem_map_table_base_addr) {
+			dprintk(CVP_ERR,
+				"Invalid mem_map_table_base_addr %#lx",
+				mem_map_table_base_addr);
+		}
+
+		mem_map = (struct cvp_hfi_mem_map *)(qdss + 1);
+		cb = msm_cvp_smem_get_context_bank(device->res, 0);
+
+		for (i = 0; cb && i < num_entries; i++) {
+			iommu_unmap(cb->domain,
+						mem_map[i].virtual_addr,
+						mem_map[i].size);
+		}
+
+		__smem_free(device, &device->qdss.mem_data);
+	}
+
+	__smem_free(device, &device->iface_q_table.mem_data);
+	__smem_free(device, &device->sfr.mem_data);
+
+	for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+		device->iface_queues[i].q_hdr = NULL;
+		device->iface_queues[i].q_array.align_virtual_addr = NULL;
+		device->iface_queues[i].q_array.align_device_addr = 0;
+	}
+
+	device->iface_q_table.align_virtual_addr = NULL;
+	device->iface_q_table.align_device_addr = 0;
+
+	device->qdss.align_virtual_addr = NULL;
+	device->qdss.align_device_addr = 0;
+
+	device->sfr.align_virtual_addr = NULL;
+	device->sfr.align_device_addr = 0;
+
+	device->mem_addr.align_virtual_addr = NULL;
+	device->mem_addr.align_device_addr = 0;
+#endif
+}
+
+static int __get_qdss_iommu_virtual_addr(struct iris_hfi_device *dev,
+		struct cvp_hfi_mem_map *mem_map,
+		struct iommu_domain *domain)
+{
+	int i;
+	int rc = 0;
+	dma_addr_t iova = QDSS_IOVA_START;
+	int num_entries = dev->res->qdss_addr_set.count;
+	struct addr_range *qdss_addr_tbl = dev->res->qdss_addr_set.addr_tbl;
+
+	if (!num_entries)
+		return -ENODATA;
+
+	for (i = 0; i < num_entries; i++) {
+		if (domain) {
+			rc = iommu_map(domain, iova,
+					qdss_addr_tbl[i].start,
+					qdss_addr_tbl[i].size,
+					IOMMU_READ | IOMMU_WRITE);
+
+			if (rc) {
+				dprintk(CVP_ERR,
+						"IOMMU QDSS mapping failed for addr %#x\n",
+						qdss_addr_tbl[i].start);
+				rc = -ENOMEM;
+				break;
+			}
+		} else {
+			iova =  qdss_addr_tbl[i].start;
+		}
+
+		mem_map[i].virtual_addr = (u32)iova;
+		mem_map[i].physical_addr = qdss_addr_tbl[i].start;
+		mem_map[i].size = qdss_addr_tbl[i].size;
+		mem_map[i].attr = 0x0;
+
+		iova += mem_map[i].size;
+	}
+
+	if (i < num_entries) {
+		dprintk(CVP_ERR,
+			"QDSS mapping failed, Freeing other entries %d\n", i);
+
+		for (--i; domain && i >= 0; i--) {
+			iommu_unmap(domain,
+				mem_map[i].virtual_addr,
+				mem_map[i].size);
+		}
+	}
+
+	return rc;
+}
+
+static void __setup_ucregion_memory_map(struct iris_hfi_device *device)
+{
+	__write_register(device, CVP_UC_REGION_ADDR,
+			(u32)device->iface_q_table.align_device_addr);
+	__write_register(device, CVP_UC_REGION_SIZE, SHARED_QSIZE);
+	__write_register(device, CVP_QTBL_ADDR,
+			(u32)device->iface_q_table.align_device_addr);
+	__write_register(device, CVP_QTBL_INFO, 0x01);
+	if (device->sfr.align_device_addr)
+		__write_register(device, CVP_SFR_ADDR,
+				(u32)device->sfr.align_device_addr);
+	if (device->qdss.align_device_addr)
+		__write_register(device, CVP_MMAP_ADDR,
+				(u32)device->qdss.align_device_addr);
+	call_iris_op(device, setup_dsp_uc_memmap, device);
+}
+
+static void __hfi_queue_init(struct iris_hfi_device *dev)
+{
+	int i, offset = 0;
+	struct cvp_hfi_queue_table_header *q_tbl_hdr;
+	struct cvp_iface_q_info *iface_q;
+	struct cvp_hfi_queue_header *q_hdr;
+
+	if (!dev)
+		return;
+
+	offset += dev->iface_q_table.mem_size;
+
+	for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+		iface_q = &dev->iface_queues[i];
+		iface_q->q_array.align_device_addr =
+			dev->iface_q_table.align_device_addr + offset;
+		iface_q->q_array.align_virtual_addr =
+			dev->iface_q_table.align_virtual_addr + offset;
+		iface_q->q_array.mem_size = CVP_IFACEQ_QUEUE_SIZE;
+		offset += iface_q->q_array.mem_size;
+		iface_q->q_hdr = CVP_IFACEQ_GET_QHDR_START_ADDR(
+				dev->iface_q_table.align_virtual_addr, i);
+		__set_queue_hdr_defaults(iface_q->q_hdr);
+		spin_lock_init(&iface_q->hfi_lock);
+	}
+
+	q_tbl_hdr = (struct cvp_hfi_queue_table_header *)
+			dev->iface_q_table.align_virtual_addr;
+	q_tbl_hdr->qtbl_version = 0;
+	q_tbl_hdr->device_addr = (void *)dev;
+	strlcpy(q_tbl_hdr->name, "msm_cvp", sizeof(q_tbl_hdr->name));
+	q_tbl_hdr->qtbl_size = CVP_IFACEQ_TABLE_SIZE;
+	q_tbl_hdr->qtbl_qhdr0_offset =
+				sizeof(struct cvp_hfi_queue_table_header);
+	q_tbl_hdr->qtbl_qhdr_size = sizeof(struct cvp_hfi_queue_header);
+	q_tbl_hdr->qtbl_num_q = CVP_IFACEQ_NUMQ;
+	q_tbl_hdr->qtbl_num_active_q = CVP_IFACEQ_NUMQ;
+
+	iface_q = &dev->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
+
+	iface_q = &dev->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
+
+	iface_q = &dev->iface_queues[CVP_IFACEQ_DBGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
+	/*
+	 * Set receive request to zero on debug queue as there is no
+	 * need of interrupt from cvp hardware for debug messages
+	 */
+	q_hdr->qhdr_rx_req = 0;
+
+}
+
+static void __sfr_init(struct iris_hfi_device *dev)
+{
+	struct cvp_hfi_sfr_struct *vsfr;
+
+	if (!dev)
+		return;
+
+	vsfr = (struct cvp_hfi_sfr_struct *) dev->sfr.align_virtual_addr;
+	if (vsfr)
+		vsfr->bufSize = ALIGNED_SFR_SIZE;
+
+}
+
+static int __interface_queues_init(struct iris_hfi_device *dev)
+{
+	int rc = 0;
+	struct cvp_hfi_mem_map_table *qdss;
+	struct cvp_hfi_mem_map *mem_map;
+	struct cvp_mem_addr *mem_addr;
+	int num_entries = dev->res->qdss_addr_set.count;
+	phys_addr_t fw_bias = 0;
+	size_t q_size;
+	unsigned long mem_map_table_base_addr;
+	struct context_bank_info *cb;
+
+	q_size = SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE;
+	mem_addr = &dev->mem_addr;
+	if (!is_iommu_present(dev->res))
+		fw_bias = dev->cvp_hal_data->firmware_base;
+
+	if (dev->iface_q_table.align_virtual_addr) {
+		memset((void *)dev->iface_q_table.align_virtual_addr,
+				0, q_size);
+		goto hfi_queue_init;
+	}
+	rc = __smem_alloc(dev, mem_addr, q_size, 1, SMEM_UNCACHED);
+	if (rc) {
+		dprintk(CVP_ERR, "iface_q_table_alloc_fail\n");
+		goto fail_alloc_queue;
+	}
+
+	dev->iface_q_table.align_virtual_addr = mem_addr->align_virtual_addr;
+	dev->iface_q_table.align_device_addr = mem_addr->align_device_addr -
+					fw_bias;
+	dev->iface_q_table.mem_size = CVP_IFACEQ_TABLE_SIZE;
+	dev->iface_q_table.mem_data = mem_addr->mem_data;
+
+hfi_queue_init:
+	__hfi_queue_init(dev);
+
+	if (dev->sfr.align_virtual_addr) {
+		memset((void *)dev->sfr.align_virtual_addr,
+				0, ALIGNED_SFR_SIZE);
+		goto sfr_init;
+	}
+	rc = __smem_alloc(dev, mem_addr, ALIGNED_SFR_SIZE, 1, SMEM_UNCACHED);
+	if (rc) {
+		dprintk(CVP_WARN, "sfr_alloc_fail: SFR not will work\n");
+		dev->sfr.align_device_addr = 0;
+	} else {
+		dev->sfr.align_device_addr = mem_addr->align_device_addr -
+					fw_bias;
+		dev->sfr.align_virtual_addr = mem_addr->align_virtual_addr;
+		dev->sfr.mem_size = ALIGNED_SFR_SIZE;
+		dev->sfr.mem_data = mem_addr->mem_data;
+	}
+sfr_init:
+	__sfr_init(dev);
+
+	if (dev->qdss.align_virtual_addr)
+		goto dsp_hfi_queue_init;
+
+	if ((msm_cvp_fw_debug_mode & HFI_DEBUG_MODE_QDSS) && num_entries) {
+		rc = __smem_alloc(dev, mem_addr, ALIGNED_QDSS_SIZE, 1,
+				SMEM_UNCACHED);
+		if (rc) {
+			dprintk(CVP_WARN,
+				"qdss_alloc_fail: QDSS messages logging will not work\n");
+			dev->qdss.align_device_addr = 0;
+		} else {
+			dev->qdss.align_device_addr =
+				mem_addr->align_device_addr - fw_bias;
+			dev->qdss.align_virtual_addr =
+				mem_addr->align_virtual_addr;
+			dev->qdss.mem_size = ALIGNED_QDSS_SIZE;
+			dev->qdss.mem_data = mem_addr->mem_data;
+		}
+	}
+
+
+	if (dev->qdss.align_virtual_addr) {
+		qdss =
+		(struct cvp_hfi_mem_map_table *)dev->qdss.align_virtual_addr;
+		qdss->mem_map_num_entries = num_entries;
+		mem_map_table_base_addr = dev->qdss.align_device_addr +
+			sizeof(struct cvp_hfi_mem_map_table);
+		qdss->mem_map_table_base_addr = mem_map_table_base_addr;
+
+		mem_map = (struct cvp_hfi_mem_map *)(qdss + 1);
+		cb = msm_cvp_smem_get_context_bank(dev->res, 0);
+		if (!cb) {
+			dprintk(CVP_ERR,
+				"%s: failed to get context bank\n", __func__);
+			return -EINVAL;
+		}
+
+		rc = __get_qdss_iommu_virtual_addr(dev, mem_map, cb->domain);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"IOMMU mapping failed, Freeing qdss memdata\n");
+			__smem_free(dev, &dev->qdss.mem_data);
+			dev->qdss.align_virtual_addr = NULL;
+			dev->qdss.align_device_addr = 0;
+		}
+	}
+
+dsp_hfi_queue_init:
+	rc = __interface_dsp_queues_init(dev);
+	if (rc) {
+		dprintk(CVP_ERR, "dsp_queues_init failed\n");
+		goto fail_alloc_queue;
+	}
+
+	__setup_ucregion_memory_map(dev);
+	return 0;
+fail_alloc_queue:
+	return -ENOMEM;
+}
+
+static int __sys_set_debug(struct iris_hfi_device *device, u32 debug)
+{
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	rc = call_hfi_pkt_op(device, sys_debug_config, pkt, debug);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"Debug mode setting to FW failed\n");
+		return -ENOTEMPTY;
+	}
+
+	if (__iface_cmdq_write(device, pkt))
+		return -ENOTEMPTY;
+	return 0;
+}
+
+static int __sys_set_idle_indicator(struct iris_hfi_device *device,
+	bool enable)
+{
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	rc = call_hfi_pkt_op(device, sys_set_idle_indicator, pkt, enable);
+	if (__iface_cmdq_write(device, pkt))
+		return -ENOTEMPTY;
+	return 0;
+}
+
+static int __sys_set_coverage(struct iris_hfi_device *device, u32 mode)
+{
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	rc = call_hfi_pkt_op(device, sys_coverage_config,
+			pkt, mode);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"Coverage mode setting to FW failed\n");
+		return -ENOTEMPTY;
+	}
+
+	if (__iface_cmdq_write(device, pkt)) {
+		dprintk(CVP_WARN, "Failed to send coverage pkt to f/w\n");
+		return -ENOTEMPTY;
+	}
+
+	return 0;
+}
+
+static int __sys_set_power_control(struct iris_hfi_device *device,
+	bool enable)
+{
+	struct regulator_info *rinfo;
+	bool supported = false;
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	iris_hfi_for_each_regulator(device, rinfo) {
+		if (rinfo->has_hw_power_collapse) {
+			supported = true;
+			break;
+		}
+	}
+
+	if (!supported)
+		return 0;
+
+	call_hfi_pkt_op(device, sys_power_control, pkt, enable);
+	if (__iface_cmdq_write(device, pkt))
+		return -ENOTEMPTY;
+	return 0;
+}
+
+static void cvp_pm_qos_update(struct iris_hfi_device *device, bool vote_on)
+{
+	u32 latency, off_vote_cnt;
+	int i, err = 0;
+
+	spin_lock(&device->res->pm_qos.lock);
+	off_vote_cnt = device->res->pm_qos.off_vote_cnt;
+	spin_unlock(&device->res->pm_qos.lock);
+
+	if (vote_on && off_vote_cnt)
+		return;
+
+	latency = vote_on ? device->res->pm_qos.latency_us :
+			PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+
+	if (device->res->pm_qos.latency_us && device->res->pm_qos.pm_qos_hdls)
+		for (i = 0; i < device->res->pm_qos.silver_count; i++) {
+			if (!cpu_possible(device->res->pm_qos.silver_cores[i]))
+				continue;
+			err = dev_pm_qos_update_request(
+				&device->res->pm_qos.pm_qos_hdls[i],
+				latency);
+			if (err < 0) {
+				if (vote_on) {
+					dprintk(CVP_WARN,
+						"pm qos on failed %d\n", err);
+				} else {
+					dprintk(CVP_WARN,
+						"pm qos off failed %d\n", err);
+				}
+			}
+		}
+}
+static int iris_pm_qos_update(void *device)
+{
+	struct iris_hfi_device *dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s Invalid device\n", __func__);
+		return -ENODEV;
+	}
+
+	dev = device;
+
+	mutex_lock(&dev->lock);
+	cvp_pm_qos_update(dev, true);
+	mutex_unlock(&dev->lock);
+
+	return 0;
+}
+
+static int __hwfence_regs_map(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct context_bank_info *cb;
+
+	cb = msm_cvp_smem_get_context_bank(device->res, 0);
+	if (!cb) {
+		dprintk(CVP_ERR, "%s: fail to get cb\n", __func__);
+		return -EINVAL;
+	}
+
+	if (device->res->reg_mappings.ipclite_phyaddr != 0) {
+		rc = iommu_map(cb->domain,
+			device->res->reg_mappings.ipclite_iova,
+			device->res->reg_mappings.ipclite_phyaddr,
+			device->res->reg_mappings.ipclite_size,
+			IOMMU_READ | IOMMU_WRITE);
+		if (rc) {
+			dprintk(CVP_ERR, "map ipclite fail %d %#x %#x %#x\n",
+				rc, device->res->reg_mappings.ipclite_iova,
+				device->res->reg_mappings.ipclite_phyaddr,
+				device->res->reg_mappings.ipclite_size);
+			return rc;
+		}
+	}
+	if (device->res->reg_mappings.hwmutex_phyaddr != 0) {
+		rc = iommu_map(cb->domain,
+			device->res->reg_mappings.hwmutex_iova,
+			device->res->reg_mappings.hwmutex_phyaddr,
+			device->res->reg_mappings.hwmutex_size,
+			IOMMU_MMIO | IOMMU_READ | IOMMU_WRITE);
+		if (rc) {
+			dprintk(CVP_ERR, "map hwmutex fail %d %#x %#x %#x\n",
+				rc, device->res->reg_mappings.hwmutex_iova,
+				device->res->reg_mappings.hwmutex_phyaddr,
+				device->res->reg_mappings.hwmutex_size);
+			return rc;
+		}
+	}
+	if (device->res->reg_mappings.aon_phyaddr != 0) {
+		rc = iommu_map(cb->domain,
+			device->res->reg_mappings.aon_iova,
+			device->res->reg_mappings.aon_phyaddr,
+			device->res->reg_mappings.aon_size,
+			IOMMU_MMIO | IOMMU_READ | IOMMU_WRITE);
+		if (rc) {
+			dprintk(CVP_ERR, "map aon fail %d %#x %#x %#x\n",
+				rc, device->res->reg_mappings.aon_iova,
+				device->res->reg_mappings.aon_phyaddr,
+				device->res->reg_mappings.aon_size);
+			return rc;
+		}
+	}
+	if (device->res->reg_mappings.timer_phyaddr != 0) {
+		rc = iommu_map(cb->domain,
+			device->res->reg_mappings.timer_iova,
+			device->res->reg_mappings.timer_phyaddr,
+			device->res->reg_mappings.timer_size,
+			IOMMU_MMIO | IOMMU_READ | IOMMU_WRITE);
+		if (rc) {
+			dprintk(CVP_ERR, "map timer fail %d %#x %#x %#x\n",
+				rc, device->res->reg_mappings.timer_iova,
+				device->res->reg_mappings.timer_phyaddr,
+				device->res->reg_mappings.timer_size);
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static int __hwfence_regs_unmap(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct context_bank_info *cb;
+
+	cb = msm_cvp_smem_get_context_bank(device->res, 0);
+	if (!cb) {
+		dprintk(CVP_ERR, "%s: fail to get cb\n", __func__);
+		return -EINVAL;
+	}
+
+	if (device->res->reg_mappings.ipclite_iova != 0) {
+		iommu_unmap(cb->domain,
+			device->res->reg_mappings.ipclite_iova,
+			device->res->reg_mappings.ipclite_size);
+	}
+	if (device->res->reg_mappings.hwmutex_iova != 0) {
+		iommu_unmap(cb->domain,
+			device->res->reg_mappings.hwmutex_iova,
+			device->res->reg_mappings.hwmutex_size);
+	}
+	if (device->res->reg_mappings.aon_iova != 0) {
+		iommu_unmap(cb->domain,
+			device->res->reg_mappings.aon_iova,
+			device->res->reg_mappings.aon_size);
+	}
+	if (device->res->reg_mappings.timer_iova != 0) {
+		iommu_unmap(cb->domain,
+			device->res->reg_mappings.timer_iova,
+			device->res->reg_mappings.timer_size);
+	}
+	return rc;
+}
+
+static int iris_hfi_core_init(void *device)
+{
+	int rc = 0;
+	u32 ipcc_iova;
+	struct cvp_hfi_cmd_sys_init_packet pkt;
+	struct cvp_hfi_cmd_sys_get_property_packet version_pkt;
+	struct iris_hfi_device *dev;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid device\n");
+		return -ENODEV;
+	}
+
+	dev = device;
+
+	dprintk(CVP_CORE, "Core initializing\n");
+
+	pm_stay_awake(dev->res->pdev->dev.parent);
+	mutex_lock(&dev->lock);
+
+	dev->bus_vote.data =
+		kzalloc(sizeof(struct cvp_bus_vote_data), GFP_KERNEL);
+	if (!dev->bus_vote.data) {
+		dprintk(CVP_ERR, "Bus vote data memory is not allocated\n");
+		rc = -ENOMEM;
+		goto err_no_mem;
+	}
+
+	dev->bus_vote.data_count = 1;
+	dev->bus_vote.data->power_mode = CVP_POWER_TURBO;
+
+	__hwfence_regs_map(dev);
+
+	rc = __power_on_init(dev);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to power on init EVA\n");
+		goto err_load_fw;
+	}
+
+	rc = cvp_synx_recover();
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to recover synx\n");
+		goto err_core_init;
+	}
+
+	/* mmrm registration */
+	if (msm_cvp_mmrm_enabled) {
+		rc = msm_cvp_mmrm_register(device);
+		if (rc) {
+			dprintk(CVP_ERR, "Failed to register mmrm client\n");
+			goto err_core_init;
+		}
+	}
+	__set_state(dev, IRIS_STATE_INIT);
+	dev->reg_dumped = false;
+
+	dprintk(CVP_CORE, "Dev_Virt: %pa, Reg_Virt: %pK\n",
+		&dev->cvp_hal_data->firmware_base,
+		dev->cvp_hal_data->register_base);
+
+
+	rc = __interface_queues_init(dev);
+	if (rc) {
+		dprintk(CVP_ERR, "failed to init queues\n");
+		rc = -ENOMEM;
+		goto err_core_init;
+	}
+	cvp_register_va_md_region();
+
+	// Add node for dev struct
+	add_va_node_to_list(CVP_QUEUE_DUMP, dev,
+			sizeof(struct iris_hfi_device),
+			"iris_hfi_device-dev", false);
+	add_queue_header_to_va_md_list((void*)dev);
+	add_hfi_queue_to_va_md_list((void*)dev);
+
+	rc = msm_cvp_map_ipcc_regs(&ipcc_iova);
+	if (!rc) {
+		dprintk(CVP_CORE, "IPCC iova 0x%x\n", ipcc_iova);
+		__write_register(dev, CVP_MMAP_ADDR, ipcc_iova);
+	}
+
+	rc = __load_fw(dev);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to load Iris FW\n");
+		goto err_core_init;
+	}
+
+	rc = __boot_firmware(dev);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to start core\n");
+		rc = -ENODEV;
+		goto err_core_init;
+	}
+
+	dev->version = __read_register(dev, CVP_VERSION_INFO);
+
+	rc =  call_hfi_pkt_op(dev, sys_init, &pkt, 0);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to create sys init pkt\n");
+		goto err_core_init;
+	}
+
+	if (__iface_cmdq_write(dev, &pkt)) {
+		rc = -ENOTEMPTY;
+		goto err_core_init;
+	}
+
+	rc = call_hfi_pkt_op(dev, sys_image_version, &version_pkt);
+	if (rc || __iface_cmdq_write(dev, &version_pkt))
+		dprintk(CVP_WARN, "Failed to send image version pkt to f/w\n");
+
+	__sys_set_debug(device, msm_cvp_fw_debug);
+
+	__enable_subcaches(device);
+	__set_subcaches(device);
+
+	__set_ubwc_config(device);
+	__sys_set_idle_indicator(device, true);
+
+	if (dev->res->pm_qos.latency_us) {
+		int err = 0;
+		u32 i, cpu;
+
+		dev->res->pm_qos.pm_qos_hdls = kcalloc(
+				dev->res->pm_qos.silver_count,
+				sizeof(struct dev_pm_qos_request),
+				GFP_KERNEL);
+
+		if (!dev->res->pm_qos.pm_qos_hdls) {
+			dprintk(CVP_WARN, "Failed allocate pm_qos_hdls\n");
+			goto pm_qos_bail;
+		}
+
+		for (i = 0; i < dev->res->pm_qos.silver_count; i++) {
+			cpu = dev->res->pm_qos.silver_cores[i];
+			if (!cpu_possible(cpu))
+				continue;
+			err = dev_pm_qos_add_request(
+				get_cpu_device(cpu),
+				&dev->res->pm_qos.pm_qos_hdls[i],
+				DEV_PM_QOS_RESUME_LATENCY,
+				dev->res->pm_qos.latency_us);
+			if (err < 0)
+				dprintk(CVP_WARN,
+					"%s pm_qos_add_req %d failed\n",
+					__func__, i);
+		}
+	}
+
+pm_qos_bail:
+	mutex_unlock(&dev->lock);
+
+	cvp_dsp_send_hfi_queue();
+
+	pm_relax(dev->res->pdev->dev.parent);
+	dprintk(CVP_CORE, "Core inited successfully\n");
+
+	return 0;
+
+err_core_init:
+	__set_state(dev, IRIS_STATE_DEINIT);
+	__unload_fw(dev);
+	if (dev->mmrm_cvp)
+	{
+		msm_cvp_mmrm_deregister(dev);
+	}
+err_load_fw:
+	__hwfence_regs_unmap(dev);
+err_no_mem:
+	dprintk(CVP_ERR, "Core init failed\n");
+	mutex_unlock(&dev->lock);
+	pm_relax(dev->res->pdev->dev.parent);
+	return rc;
+}
+
+static int iris_hfi_core_release(void *dev)
+{
+	int rc = 0, i;
+	struct iris_hfi_device *device = dev;
+	struct cvp_hal_session *session, *next;
+	struct dev_pm_qos_request *qos_hdl;
+	u32 ipcc_iova;
+
+	if (!device) {
+		dprintk(CVP_ERR, "invalid device\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&device->lock);
+	dprintk(CVP_WARN, "Core releasing\n");
+	if (device->res->pm_qos.latency_us &&
+		device->res->pm_qos.pm_qos_hdls) {
+		for (i = 0; i < device->res->pm_qos.silver_count; i++) {
+			if (!cpu_possible(device->res->pm_qos.silver_cores[i]))
+				continue;
+			qos_hdl = &device->res->pm_qos.pm_qos_hdls[i];
+			if ((qos_hdl != NULL) && dev_pm_qos_request_active(qos_hdl))
+				dev_pm_qos_remove_request(qos_hdl);
+		}
+		kfree(device->res->pm_qos.pm_qos_hdls);
+		device->res->pm_qos.pm_qos_hdls = NULL;
+	}
+
+	__resume(device);
+	__set_state(device, IRIS_STATE_DEINIT);
+	rc = __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_SUSPEND);
+	if (rc)
+		dprintk(CVP_WARN, "Failed to suspend cvp FW%d\n", rc);
+
+	__dsp_shutdown(device);
+
+	__disable_subcaches(device);
+	ipcc_iova = __read_register(device, CVP_MMAP_ADDR);
+	msm_cvp_unmap_ipcc_regs(ipcc_iova);
+	__unload_fw(device);
+	__hwfence_regs_unmap(device);
+
+	if (msm_cvp_mmrm_enabled) {
+		rc = msm_cvp_mmrm_deregister(device);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: Failed msm_cvp_mmrm_deregister:%d\n",
+				__func__, rc);
+		}
+	}
+
+	/* unlink all sessions from device */
+	list_for_each_entry_safe(session, next, &device->sess_head, list) {
+		list_del(&session->list);
+		session->device = NULL;
+	}
+
+	dprintk(CVP_CORE, "Core released successfully\n");
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static void __core_clear_interrupt(struct iris_hfi_device *device)
+{
+	u32 intr_status = 0, mask = 0;
+	int i = 0;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	i = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+	if (i) {
+		dprintk(CVP_WARN, "%s Fail acquire xo_reset at %d\n", __func__, __LINE__);
+		return;
+	}
+	intr_status = __read_register(device, CVP_WRAPPER_INTR_STATUS);
+	call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+
+	mask = (CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK | CVP_FATAL_INTR_BMSK);
+
+	if (intr_status & mask) {
+		device->intr_status |= intr_status;
+		device->reg_count++;
+		dprintk(CVP_CORE,
+			"INTERRUPT for device: %pK: times: %d status: %d\n",
+			device, device->reg_count, intr_status);
+	} else {
+		device->spur_count++;
+	}
+
+	__write_register(device, CVP_CPU_CS_A2HSOFTINTCLR, 1);
+}
+
+static int iris_hfi_core_trigger_ssr(void *device,
+		enum hal_ssr_trigger_type type)
+{
+	struct cvp_hfi_cmd_sys_test_ssr_packet pkt;
+	int rc = 0;
+	struct iris_hfi_device *dev;
+
+	cvp_free_va_md_list();
+	if (!device) {
+		dprintk(CVP_ERR, "invalid device\n");
+		return -ENODEV;
+	}
+
+	dev = device;
+	if (mutex_trylock(&dev->lock)) {
+		rc = call_hfi_pkt_op(dev, ssr_cmd, type, &pkt);
+		if (rc) {
+			dprintk(CVP_ERR, "%s: failed to create packet\n",
+					__func__);
+			goto err_create_pkt;
+		}
+
+		if (__iface_cmdq_write(dev, &pkt))
+			rc = -ENOTEMPTY;
+	} else {
+		return -EAGAIN;
+	}
+
+err_create_pkt:
+	mutex_unlock(&dev->lock);
+	return rc;
+}
+
+static void __set_default_sys_properties(struct iris_hfi_device *device)
+{
+	if (__sys_set_debug(device, msm_cvp_fw_debug))
+		dprintk(CVP_WARN, "Setting fw_debug msg ON failed\n");
+	if (__sys_set_power_control(device, msm_cvp_fw_low_power_mode))
+		dprintk(CVP_WARN, "Setting h/w power collapse ON failed\n");
+}
+
+static void __session_clean(struct cvp_hal_session *session)
+{
+	struct cvp_hal_session *temp, *next;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_WARN, "%s: invalid params\n", __func__);
+		return;
+	}
+	device = session->device;
+	dprintk(CVP_SESS, "deleted the session: %pK\n", session);
+	/*
+	 * session might have been removed from the device list in
+	 * core_release, so check and remove if it is in the list
+	 */
+	list_for_each_entry_safe(temp, next, &device->sess_head, list) {
+		if (session == temp) {
+			list_del(&session->list);
+			break;
+		}
+	}
+	/* Poison the session handle with zeros */
+	*session = (struct cvp_hal_session){ {0} };
+	kfree(session);
+}
+
+static int iris_hfi_session_clean(void *session)
+{
+	struct cvp_hal_session *sess_close;
+	struct iris_hfi_device *device;
+
+	if (!session || session == (void *)0xdeadbeef) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess_close = session;
+	device = sess_close->device;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid device handle %s\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	__session_clean(sess_close);
+
+	mutex_unlock(&device->lock);
+	return 0;
+}
+
+static int iris_debug_hook(void *device)
+{
+	struct iris_hfi_device *dev = device;
+	u32 val;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s Invalid device\n", __func__);
+		return -ENODEV;
+	}
+	//__write_register(dev, CVP_WRAPPER_CORE_CLOCK_CONFIG, 0x11);
+	//__write_register(dev, CVP_WRAPPER_TZ_CPU_CLOCK_CONFIG, 0x1);
+	dprintk(CVP_ERR, "Halt Tensilica and core and axi\n");
+	return 0;
+
+	/******* FDU & MPU *****/
+#define CVP0_CVP_SS_FDU_SECURE_ENABLE 0x90
+#define CVP0_CVP_SS_MPU_SECURE_ENABLE 0x94
+#define CVP0_CVP_SS_ARP_THREAD_0_SECURE_ENABLE 0xA0
+#define CVP0_CVP_SS_ARP_THREAD_1_SECURE_ENABLE 0xA4
+#define CVP0_CVP_SS_ARP_THREAD_2_SECURE_ENABLE 0xA8
+#define CVP0_CVP_SS_ARP_THREAD_3_SECURE_ENABLE 0xAC
+	val = __read_register(dev, CVP0_CVP_SS_FDU_SECURE_ENABLE);
+	dprintk(CVP_ERR, "FDU_SECURE_ENABLE %#x\n", val);
+
+	val = __read_register(dev, CVP0_CVP_SS_MPU_SECURE_ENABLE);
+	dprintk(CVP_ERR, "MPU_SECURE_ENABLE %#x\n", val);
+
+	val = __read_register(dev, CVP0_CVP_SS_ARP_THREAD_0_SECURE_ENABLE);
+	dprintk(CVP_ERR, "ARP_THREAD_0_SECURE_ENABLE %#x\n", val);
+
+	val = __read_register(dev, CVP0_CVP_SS_ARP_THREAD_1_SECURE_ENABLE);
+	dprintk(CVP_ERR, "ARP_THREAD_1_SECURE_ENABLE %#x\n", val);
+
+	val = __read_register(dev, CVP0_CVP_SS_ARP_THREAD_2_SECURE_ENABLE);
+	dprintk(CVP_ERR, "ARP_THREAD_2_SECURE_ENABLE %#x\n", val);
+
+	val = __read_register(dev, CVP0_CVP_SS_ARP_THREAD_3_SECURE_ENABLE);
+	dprintk(CVP_ERR, "ARP_THREAD_3_SECURE_ENABLE %#x\n", val);
+
+
+	if (true)
+		return 0;
+	/***** GCE *******
+	 * Bit 0 of below register is CDM secure enable for GCE
+	 * CDM buffer will be in CB4 if set
+	 */
+#define CVP_GCE_GCE_SS_CP_CTL	0x51100
+	 /* STATUS bit0 && CFG bit 4 of below register set,
+	  * expect pixel buffers in CB3,
+	  * otherwise in CB0
+	  * CFG bit 9:8 b01 -> LMC input in CB3
+	  * CFG bit 9:8 b10 -> LMC input in CB4
+	  */
+#define CVP_GCE0_CP_STATUS	0x51080
+#define CVP_GCE0_BIU_RD_INPUT_IF_SECURITY_CFG	0x52020
+
+	val = __read_register(dev, CVP_GCE_GCE_SS_CP_CTL);
+	dprintk(CVP_ERR, "CVP_GCE_GCE_SS_CP_CTL %#x\n", val);
+	val = __read_register(dev, CVP_GCE0_CP_STATUS);
+	dprintk(CVP_ERR, "CVP_GCE0_CP_STATUS %#x\n", val);
+	val = __read_register(dev, CVP_GCE0_BIU_RD_INPUT_IF_SECURITY_CFG);
+	dprintk(CVP_ERR, "CVP_GCE0_BIU_RD_INPUT_IF_SECURITY_CFG %#x\n", val);
+	  /***** RGE *****
+	   * Bit 0 of below regiser is CDM secure enable for RGE
+	   * CDM buffer to be in CB4 i fset
+	   */
+#define CVP_RGE0_TOPRGE_CP_CTL	0x31010
+	   /* CFG bit 4 && IN bit 0:
+	    * if both are set, expect CB3 or CB4 depending on IN 6:4 field
+	    * either is clear, expect CB0
+	    */
+#define CVP_RGE0_BUS_RD_INPUT_IF_SECURITY_CFG  0x32020
+#define CVP_RGE0_TOPSPARE_IN	0x311F4
+
+	val = __read_register(dev, CVP_RGE0_TOPRGE_CP_CTL);
+	dprintk(CVP_ERR, "CVP_RGE0_TOPRGE_CP_CTL %#x\n", val);
+	val = __read_register(dev, CVP_RGE0_BUS_RD_INPUT_IF_SECURITY_CFG);
+	dprintk(CVP_ERR, "CVP_RGE0_BUS_RD_INPUT_IF_SECURITY_CFG %#x\n", val);
+	val = __read_register(dev, CVP_RGE0_TOPSPARE_IN);
+	dprintk(CVP_ERR, "CVP_RGE0_TOPSPARE_IN %#x\n", val);
+	/****** VADL ******
+	 * Bit 0 of below register is CDM secure enable for VADL
+	 * CDM buffer will bei in CB4 if set
+	 */
+#define CVP_VADL0_VADL_SS_CP_CTL	0x21010
+	/* Below registers are used the same way as RGE */
+#define CVP_VADL0_BUS_RD_INPUT_IF_SECURITY_CFG	0x22020
+#define CVP_VADL0_SPARE_IN	0x211F4
+
+	val = __read_register(dev, CVP_VADL0_VADL_SS_CP_CTL);
+	dprintk(CVP_ERR, "CVP_VADL0_VADL_SS_CP_CTL %#x\n", val);
+	val = __read_register(dev, CVP_VADL0_BUS_RD_INPUT_IF_SECURITY_CFG);
+	dprintk(CVP_ERR, "CVP_VADL0_BUS_RD_INPUT_IF_SECURITY_CFG %#x\n", val);
+	val = __read_register(dev, CVP_VADL0_SPARE_IN);
+	dprintk(CVP_ERR, "CVP_VADL0_SPARE_IN %#x\n", val);
+	/****** ITOF *****
+	 * Below registers are used the same way as RGE
+	 */
+#define CVP_ITOF0_TOF_SS_CP_CTL 0x41010
+#define CVP_ITOF0_BUS_RD_INPUT_IF_SECURITY_CFG	0x42020
+#define CVP_ITOF0_TOF_SS_SPARE_IN 0x411F4
+
+	val = __read_register(dev, CVP_ITOF0_TOF_SS_CP_CTL);
+	dprintk(CVP_ERR, "CVP_ITOF0_TOF_SS_CP_CTL %#x\n", val);
+	val = __read_register(dev, CVP_ITOF0_BUS_RD_INPUT_IF_SECURITY_CFG);
+	dprintk(CVP_ERR, "CVP_ITOF0_BUS_RD_INPUT_IF_SECURITY_CFG %#x\n", val);
+	val = __read_register(dev, CVP_ITOF0_TOF_SS_SPARE_IN);
+	dprintk(CVP_ERR, "CVP_ITOF0_TOF_SS_SPARE_IN %#x\n", val);
+
+	return 0;
+}
+
+static int iris_hfi_session_init(void *device, void *session_id,
+		void **new_session)
+{
+	struct cvp_hfi_cmd_sys_session_init_packet pkt;
+	struct iris_hfi_device *dev;
+	struct cvp_hal_session *s;
+
+	if (!device || !new_session) {
+		dprintk(CVP_ERR, "%s - invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	dev = device;
+	mutex_lock(&dev->lock);
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s) {
+		dprintk(CVP_ERR, "new session fail: Out of memory\n");
+		goto err_session_init_fail;
+	}
+
+	s->session_id = session_id;
+	s->device = dev;
+	dprintk(CVP_SESS,
+		"%s: inst %pK, session %pK\n", __func__, session_id, s);
+
+	list_add_tail(&s->list, &dev->sess_head);
+
+	__set_default_sys_properties(device);
+
+	if (call_hfi_pkt_op(dev, session_init, &pkt, s)) {
+		dprintk(CVP_ERR, "session_init: failed to create packet\n");
+		goto err_session_init_fail;
+	}
+
+	*new_session = s;
+	if (__iface_cmdq_write(dev, &pkt))
+		goto err_session_init_fail;
+
+	mutex_unlock(&dev->lock);
+	return 0;
+
+err_session_init_fail:
+	if (s)
+		__session_clean(s);
+	*new_session = NULL;
+	mutex_unlock(&dev->lock);
+	return -EINVAL;
+}
+
+static int __send_session_cmd(struct cvp_hal_session *session, int pkt_type)
+{
+	struct cvp_hal_session_cmd_pkt pkt;
+	int rc = 0;
+	struct iris_hfi_device *device = session->device;
+
+	if (!__is_session_valid(device, session, __func__))
+		return -ECONNRESET;
+
+	rc = call_hfi_pkt_op(device, session_cmd,
+			&pkt, pkt_type, session);
+	if (rc == -EPERM)
+		return 0;
+
+	if (rc) {
+		dprintk(CVP_ERR, "send session cmd: create pkt failed\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+static int iris_hfi_session_end(void *session)
+{
+	struct cvp_hal_session *sess;
+	struct iris_hfi_device *device;
+	int rc = 0;
+
+	if (!session) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	sess = session;
+	device = sess->device;
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid session %s\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	if (msm_cvp_fw_coverage) {
+		if (__sys_set_coverage(sess->device, msm_cvp_fw_coverage))
+			dprintk(CVP_WARN, "Fw_coverage msg ON failed\n");
+	}
+
+	rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_END);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int iris_hfi_session_abort(void *sess)
+{
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+	int rc = 0;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	device = session->device;
+
+	mutex_lock(&device->lock);
+
+	rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_ABORT);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int iris_hfi_session_set_buffers(void *sess, u32 iova, u32 size)
+{
+	struct cvp_hfi_cmd_session_set_buffers_packet pkt;
+	int rc = 0;
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device || !iova || !size) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (!__is_session_valid(device, session, __func__)) {
+		rc = -ECONNRESET;
+		goto err_create_pkt;
+	}
+
+	rc = call_hfi_pkt_op(device, session_set_buffers,
+			&pkt, session, iova, size);
+	if (rc) {
+		dprintk(CVP_ERR, "set buffers: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int iris_hfi_session_release_buffers(void *sess)
+{
+	struct cvp_session_release_buffers_packet pkt;
+	int rc = 0;
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+
+	if (!session || session == (void *)0xdeadbeef || !session->device) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (!__is_session_valid(device, session, __func__)) {
+		rc = -ECONNRESET;
+		goto err_create_pkt;
+	}
+
+	rc = call_hfi_pkt_op(device, session_release_buffers, &pkt, session);
+	if (rc) {
+		dprintk(CVP_ERR, "release buffers: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int iris_hfi_session_send(void *sess,
+		struct eva_kmd_hfi_packet *in_pkt)
+{
+	int rc = 0;
+	struct eva_kmd_hfi_packet pkt;
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "invalid session");
+		return -ENODEV;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (!__is_session_valid(device, session, __func__)) {
+		rc = -ECONNRESET;
+		goto err_send_pkt;
+	}
+	rc = call_hfi_pkt_op(device, session_send,
+			&pkt, session, in_pkt);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"failed to create pkt\n");
+		goto err_send_pkt;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+err_send_pkt:
+	mutex_unlock(&device->lock);
+	return rc;
+
+	return rc;
+}
+
+static int iris_hfi_session_flush(void *sess)
+{
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+	int rc = 0;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	device = session->device;
+
+	mutex_lock(&device->lock);
+
+	rc = __send_session_cmd(session, HFI_CMD_SESSION_CVP_FLUSH);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int iris_hfi_session_start(void *sess)
+{
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+	int rc = 0;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	device = session->device;
+
+	mutex_lock(&device->lock);
+
+	rc = __send_session_cmd(session, HFI_CMD_SESSION_EVA_START);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int iris_hfi_session_stop(void *sess)
+{
+	struct cvp_hal_session *session = sess;
+	struct iris_hfi_device *device;
+	int rc = 0;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+		return -EINVAL;
+	}
+
+	device = session->device;
+
+	mutex_lock(&device->lock);
+
+	rc = __send_session_cmd(session, HFI_CMD_SESSION_EVA_STOP);
+
+	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static void __process_fatal_error(
+		struct iris_hfi_device *device)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	device->callback(HAL_SYS_ERROR, &cmd_done);
+}
+
+static int __prepare_pc(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_pc_prep_packet pkt;
+
+	rc = call_hfi_pkt_op(device, sys_pc_prep, &pkt);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to create sys pc prep pkt\n");
+		goto err_pc_prep;
+	}
+
+	if (__iface_cmdq_write(device, &pkt))
+		rc = -ENOTEMPTY;
+	if (rc)
+		dprintk(CVP_ERR, "Failed to prepare iris for power off");
+err_pc_prep:
+	return rc;
+}
+
+static void iris_hfi_pm_handler(struct work_struct *work)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *device;
+
+	core = cvp_driver->cvp_core;
+	if (core)
+		device = core->dev_ops->hfi_device_data;
+	else
+		return;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	dprintk(CVP_PWR,
+		"Entering %s\n", __func__);
+	/*
+	 * It is ok to check this variable outside the lock since
+	 * it is being updated in this context only
+	 */
+	if (device->skip_pc_count >= CVP_MAX_PC_SKIP_COUNT) {
+		dprintk(CVP_WARN, "Failed to PC for %d times\n",
+				device->skip_pc_count);
+		device->skip_pc_count = 0;
+		__process_fatal_error(device);
+		return;
+	}
+
+	mutex_lock(&device->lock);
+	if (gfa_cv.state == DSP_SUSPEND)
+		rc = __power_collapse(device, true);
+	else
+		rc = __power_collapse(device, false);
+	mutex_unlock(&device->lock);
+	switch (rc) {
+	case 0:
+		device->skip_pc_count = 0;
+		/* Cancel pending delayed works if any */
+		cancel_delayed_work(&iris_hfi_pm_work);
+		dprintk(CVP_PWR, "%s: power collapse successful!\n",
+			__func__);
+		break;
+	case -EBUSY:
+		device->skip_pc_count = 0;
+		dprintk(CVP_PWR, "%s: retry PC as cvp is busy\n", __func__);
+		queue_delayed_work(device->iris_pm_workq,
+			&iris_hfi_pm_work, msecs_to_jiffies(
+			device->res->msm_cvp_pwr_collapse_delay));
+		break;
+	case -EAGAIN:
+		device->skip_pc_count++;
+		dprintk(CVP_WARN, "%s: retry power collapse (count %d)\n",
+			__func__, device->skip_pc_count);
+		queue_delayed_work(device->iris_pm_workq,
+			&iris_hfi_pm_work, msecs_to_jiffies(
+			device->res->msm_cvp_pwr_collapse_delay));
+		break;
+	default:
+		dprintk(CVP_ERR, "%s: power collapse failed\n", __func__);
+		break;
+	}
+}
+
+static int __power_collapse(struct iris_hfi_device *device, bool force)
+{
+	int rc = 0;
+	u32 wfi_status = 0, idle_status = 0, pc_ready = 0;
+	int count = 0;
+	const int max_tries = 150;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (!device->power_enabled) {
+		dprintk(CVP_PWR, "%s: Power already disabled\n",
+				__func__);
+		goto exit;
+	}
+
+	rc = __core_in_valid_state(device);
+	if (!rc) {
+		dprintk(CVP_WARN,
+				"Core is in bad state, Skipping power collapse\n");
+		return -EINVAL;
+	}
+
+	rc = __dsp_suspend(device, force);
+	if (rc == -EBUSY)
+		goto exit;
+	else if (rc)
+		goto skip_power_off;
+
+	__flush_debug_queue(device, device->raw_packet);
+
+	pc_ready = __read_register(device, CVP_CTRL_STATUS) &
+		CVP_CTRL_STATUS_PC_READY;
+	if (!pc_ready) {
+		wfi_status = __read_register(device,
+				CVP_WRAPPER_CPU_STATUS);
+		idle_status = __read_register(device,
+				CVP_CTRL_STATUS);
+		if (!(wfi_status & BIT(0))) {
+			dprintk(CVP_WARN,
+				"Skipping PC as wfi_status (%#x) bit not set\n",
+				wfi_status);
+			goto skip_power_off;
+		}
+		if (!(idle_status & BIT(30))) {
+			dprintk(CVP_WARN,
+				"Skipping PC as idle_status (%#x) bit not set\n",
+				idle_status);
+			goto skip_power_off;
+		}
+
+		rc = __prepare_pc(device);
+		if (rc) {
+			dprintk(CVP_WARN, "Failed PC %d\n", rc);
+			goto skip_power_off;
+		}
+
+		while (count < max_tries) {
+			wfi_status = __read_register(device,
+					CVP_WRAPPER_CPU_STATUS);
+			pc_ready = __read_register(device,
+					CVP_CTRL_STATUS);
+			if ((wfi_status & BIT(0)) && (pc_ready &
+				CVP_CTRL_STATUS_PC_READY))
+				break;
+			usleep_range(150, 250);
+			count++;
+		}
+
+		if (count == max_tries) {
+			dprintk(CVP_ERR,
+				"Skip PC. Core is not ready (%#x, %#x)\n",
+				wfi_status, pc_ready);
+			goto skip_power_off;
+		}
+	} else {
+		wfi_status = __read_register(device, CVP_WRAPPER_CPU_STATUS);
+		if (!(wfi_status & BIT(0))) {
+			dprintk(CVP_WARN,
+				"Skip PC as wfi_status (%#x) bit not set\n",
+				wfi_status);
+			goto skip_power_off;
+		}
+	}
+
+	rc = __suspend(device);
+	if (rc)
+		dprintk(CVP_ERR, "Failed __suspend\n");
+
+exit:
+	return rc;
+
+skip_power_off:
+	dprintk(CVP_PWR, "Skip PC(%#x, %#x, %#x)\n",
+		wfi_status, idle_status, pc_ready);
+	__flush_debug_queue(device, device->raw_packet);
+	return -EAGAIN;
+}
+
+static void __process_sys_error(struct iris_hfi_device *device)
+{
+	struct cvp_hfi_sfr_struct *vsfr = NULL;
+
+	vsfr = (struct cvp_hfi_sfr_struct *)device->sfr.align_virtual_addr;
+	if (vsfr) {
+		void *p = memchr(vsfr->rg_data, '\0', vsfr->bufSize);
+		/*
+		 * SFR isn't guaranteed to be NULL terminated
+		 * since SYS_ERROR indicates that Iris is in the
+		 * process of crashing.
+		 */
+		if (p == NULL)
+			vsfr->rg_data[vsfr->bufSize - 1] = '\0';
+
+		dprintk(CVP_ERR, "SFR Message from FW: %s\n",
+				vsfr->rg_data);
+	}
+}
+
+static void __flush_debug_queue(struct iris_hfi_device *device, u8 *packet)
+{
+	bool local_packet = false;
+	enum cvp_msg_prio log_level = CVP_FW;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return;
+	}
+
+	if (!packet) {
+		packet = kzalloc(CVP_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL);
+		if (!packet) {
+			dprintk(CVP_ERR, "In %s() Fail to allocate mem\n",
+				__func__);
+			return;
+		}
+
+		local_packet = true;
+
+		/*
+		 * Local packek is used when something FATAL occurred.
+		 * It is good to print these logs by default.
+		 */
+
+		log_level = CVP_ERR;
+	}
+
+#define SKIP_INVALID_PKT(pkt_size, payload_size, pkt_hdr_size) ({ \
+		if (pkt_size < pkt_hdr_size || \
+			payload_size < MIN_PAYLOAD_SIZE || \
+			payload_size > \
+			(pkt_size - pkt_hdr_size + sizeof(u8))) { \
+			dprintk(CVP_ERR, \
+				"%s: invalid msg size - %d\n", \
+				__func__, pkt->msg_size); \
+			continue; \
+		} \
+	})
+
+	while (!__iface_dbgq_read(device, packet)) {
+		struct cvp_hfi_packet_header *pkt =
+			(struct cvp_hfi_packet_header *) packet;
+
+		if (pkt->size < sizeof(struct cvp_hfi_packet_header)) {
+			dprintk(CVP_ERR, "Invalid pkt size - %s\n",
+				__func__);
+			continue;
+		}
+
+		if (pkt->packet_type == HFI_MSG_SYS_DEBUG) {
+			struct cvp_hfi_msg_sys_debug_packet *pkt =
+				(struct cvp_hfi_msg_sys_debug_packet *) packet;
+
+			SKIP_INVALID_PKT(pkt->size,
+				pkt->msg_size, sizeof(*pkt));
+
+			/*
+			 * All fw messages starts with new line character. This
+			 * causes dprintk to print this message in two lines
+			 * in the kernel log. Ignoring the first character
+			 * from the message fixes this to print it in a single
+			 * line.
+			 */
+			pkt->rg_msg_data[pkt->msg_size-1] = '\0';
+			dprintk(log_level, "%s", &pkt->rg_msg_data[1]);
+		}
+	}
+#undef SKIP_INVALID_PKT
+
+	if (local_packet)
+		kfree(packet);
+}
+
+static bool __is_session_valid(struct iris_hfi_device *device,
+		struct cvp_hal_session *session, const char *func)
+{
+	struct cvp_hal_session *temp = NULL;
+
+	if (!device || !session)
+		goto invalid;
+
+	list_for_each_entry(temp, &device->sess_head, list)
+		if (session == temp)
+			return true;
+
+invalid:
+	dprintk(CVP_WARN, "%s: device %pK, invalid session %pK\n",
+			func, device, session);
+	return false;
+}
+
+static struct cvp_hal_session *__get_session(struct iris_hfi_device *device,
+		u32 session_id)
+{
+	struct cvp_hal_session *temp = NULL;
+
+	list_for_each_entry(temp, &device->sess_head, list) {
+		if (session_id == hash32_ptr(temp))
+			return temp;
+	}
+
+	return NULL;
+}
+
+#define _INVALID_MSG_ "Unrecognized MSG (%#x) session (%pK), discarding\n"
+#define _INVALID_STATE_ "Ignore responses from %d to %d invalid state\n"
+#define _DEVFREQ_FAIL_ "Failed to add devfreq device bus %s governor %s: %d\n"
+
+static void process_system_msg(struct msm_cvp_cb_info *info,
+		struct iris_hfi_device *device,
+		void *raw_packet)
+{
+	struct cvp_hal_sys_init_done sys_init_done = {0};
+
+	switch (info->response_type) {
+	case HAL_SYS_ERROR:
+		__process_sys_error(device);
+		break;
+	case HAL_SYS_RELEASE_RESOURCE_DONE:
+		dprintk(CVP_CORE, "Received SYS_RELEASE_RESOURCE\n");
+		break;
+	case HAL_SYS_INIT_DONE:
+		dprintk(CVP_CORE, "Received SYS_INIT_DONE\n");
+		sys_init_done.capabilities =
+			device->sys_init_capabilities;
+		cvp_hfi_process_sys_init_done_prop_read(
+			(struct cvp_hfi_msg_sys_init_done_packet *)
+				raw_packet, &sys_init_done);
+		info->response.cmd.data.sys_init_done = sys_init_done;
+		break;
+	default:
+		break;
+	}
+}
+
+
+static void **get_session_id(struct msm_cvp_cb_info *info)
+{
+	void **session_id = NULL;
+
+	/* For session-related packets, validate session */
+	switch (info->response_type) {
+	case HAL_SESSION_INIT_DONE:
+	case HAL_SESSION_END_DONE:
+	case HAL_SESSION_ABORT_DONE:
+	case HAL_SESSION_START_DONE:
+	case HAL_SESSION_STOP_DONE:
+	case HAL_SESSION_FLUSH_DONE:
+	case HAL_SESSION_SET_BUFFER_DONE:
+	case HAL_SESSION_SUSPEND_DONE:
+	case HAL_SESSION_RESUME_DONE:
+	case HAL_SESSION_SET_PROP_DONE:
+	case HAL_SESSION_GET_PROP_DONE:
+	case HAL_SESSION_RELEASE_BUFFER_DONE:
+	case HAL_SESSION_REGISTER_BUFFER_DONE:
+	case HAL_SESSION_UNREGISTER_BUFFER_DONE:
+	case HAL_SESSION_PROPERTY_INFO:
+	case HAL_SESSION_EVENT_CHANGE:
+	case HAL_SESSION_DUMP_NOTIFY:
+	case HAL_SESSION_ERROR:
+		session_id = &info->response.cmd.session_id;
+		break;
+	case HAL_RESPONSE_UNUSED:
+	default:
+		session_id = NULL;
+		break;
+	}
+	return session_id;
+}
+
+static void print_msg_hdr(void *hdr)
+{
+	struct cvp_hfi_msg_session_hdr *new_hdr =
+			(struct cvp_hfi_msg_session_hdr *)hdr;
+	dprintk(CVP_HFI, "HFI MSG received: %x %x %x %x %x %x %x %#llx\n",
+			new_hdr->size, new_hdr->packet_type,
+			new_hdr->session_id,
+			new_hdr->client_data.transaction_id,
+			new_hdr->client_data.data1,
+			new_hdr->client_data.data2,
+			new_hdr->error_type,
+			new_hdr->client_data.kdata);
+}
+
+static int __response_handler(struct iris_hfi_device *device)
+{
+	struct msm_cvp_cb_info *packets;
+	int packet_count = 0;
+	u8 *raw_packet = NULL;
+	bool requeue_pm_work = true;
+
+	if (!device || device->state != IRIS_STATE_INIT)
+		return 0;
+
+	packets = device->response_pkt;
+
+	raw_packet = device->raw_packet;
+
+	if (!raw_packet || !packets) {
+		dprintk(CVP_ERR,
+			"%s: Invalid args : Res pkt = %pK, Raw pkt = %pK\n",
+			__func__, packets, raw_packet);
+		return 0;
+	}
+
+	if (device->intr_status & CVP_FATAL_INTR_BMSK) {
+		if (device->intr_status & CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK)
+			dprintk(CVP_ERR, "Received Xtensa NOC error\n");
+
+		if (device->intr_status & CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK)
+			dprintk(CVP_ERR, "Received CVP core NOC error\n");
+	}
+
+	/* Bleed the msg queue dry of packets */
+	while (!__iface_msgq_read(device, raw_packet)) {
+		void **session_id = NULL;
+		struct msm_cvp_cb_info *info = &packets[packet_count++];
+		struct cvp_hfi_msg_session_hdr *hdr =
+			(struct cvp_hfi_msg_session_hdr *)raw_packet;
+		int rc = 0;
+
+		print_msg_hdr(hdr);
+		rc = cvp_hfi_process_msg_packet(0, raw_packet, info);
+		if (rc) {
+			dprintk(CVP_WARN,
+				"Corrupt/unknown packet found, discarding\n");
+			--packet_count;
+			continue;
+		} else if (info->response_type == HAL_NO_RESP) {
+			--packet_count;
+			continue;
+		}
+
+		/* Process the packet types that we're interested in */
+		process_system_msg(info, device, raw_packet);
+
+		session_id = get_session_id(info);
+		/*
+		 * hfi_process_msg_packet provides a session_id that's a hashed
+		 * value of struct cvp_hal_session, we need to coerce the hashed
+		 * value back to pointer that we can use. Ideally, hfi_process\
+		 * _msg_packet should take care of this, but it doesn't have
+		 * required information for it
+		 */
+		if (session_id) {
+			struct cvp_hal_session *session = NULL;
+
+			if (upper_32_bits((uintptr_t)*session_id) != 0) {
+				dprintk(CVP_ERR,
+					"Upper 32-bits != 0 for sess_id=%pK\n",
+					*session_id);
+			}
+			session = __get_session(device,
+					(u32)(uintptr_t)*session_id);
+			if (!session) {
+				dprintk(CVP_ERR, _INVALID_MSG_,
+						info->response_type,
+						*session_id);
+				--packet_count;
+				continue;
+			}
+
+			*session_id = session->session_id;
+		}
+
+		if (packet_count >= cvp_max_packets) {
+			dprintk(CVP_WARN,
+				"Too many packets in message queue!\n");
+			break;
+		}
+
+		/* do not read packets after sys error packet */
+		if (info->response_type == HAL_SYS_ERROR)
+			break;
+	}
+
+	if (requeue_pm_work && device->res->sw_power_collapsible) {
+		cancel_delayed_work(&iris_hfi_pm_work);
+		if (!queue_delayed_work(device->iris_pm_workq,
+			&iris_hfi_pm_work,
+			msecs_to_jiffies(
+				device->res->msm_cvp_pwr_collapse_delay))) {
+			dprintk(CVP_ERR, "PM work already scheduled\n");
+		}
+	}
+
+	__flush_debug_queue(device, raw_packet);
+	return packet_count;
+}
+
+irqreturn_t iris_hfi_core_work_handler(int irq, void *data)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *device;
+	int num_responses = 0, i = 0;
+	u32 intr_status;
+	static bool warning_on = true;
+
+	core = cvp_driver->cvp_core;
+	if (core)
+		device = core->dev_ops->hfi_device_data;
+	else
+		return IRQ_HANDLED;
+
+	mutex_lock(&device->lock);
+	if (!__core_in_valid_state(device)) {
+		if (warning_on) {
+			dprintk(CVP_WARN, "%s Core not in init state\n",
+				__func__);
+			warning_on = false;
+		}
+		goto err_no_work;
+	}
+
+	warning_on = true;
+
+	if (!device->callback) {
+		dprintk(CVP_ERR, "No interrupt callback function: %pK\n",
+				device);
+		goto err_no_work;
+	}
+
+	if (__resume(device)) {
+		dprintk(CVP_ERR, "%s: Power enable failed\n", __func__);
+		goto err_no_work;
+	}
+
+	__core_clear_interrupt(device);
+	num_responses = __response_handler(device);
+	dprintk(CVP_HFI, "%s:: cvp_driver_debug num_responses = %d ",
+		__func__, num_responses);
+
+err_no_work:
+
+	/* Keep the interrupt status before releasing device lock */
+	intr_status = device->intr_status;
+	mutex_unlock(&device->lock);
+
+	/*
+	 * Issue the callbacks outside of the locked contex to preserve
+	 * re-entrancy.
+	 */
+
+	for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) &&
+		i < num_responses; ++i) {
+		struct msm_cvp_cb_info *r = &device->response_pkt[i];
+		void *rsp = (void *)&r->response;
+
+		if (!__core_in_valid_state(device)) {
+			dprintk(CVP_ERR,
+				_INVALID_STATE_, (i + 1), num_responses);
+			break;
+		}
+		dprintk(CVP_HFI, "Processing response %d of %d, type %d\n",
+			(i + 1), num_responses, r->response_type);
+		/* callback = void cvp_handle_cmd_response() */
+		device->callback(r->response_type, rsp);
+	}
+
+	/* We need re-enable the irq which was disabled in ISR handler */
+	if (!(intr_status & CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+		enable_irq(device->cvp_hal_data->irq);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t cvp_hfi_isr(int irq, void *dev)
+{
+	disable_irq_nosync(irq);
+	return IRQ_WAKE_THREAD;
+}
+
+static void iris_hfi_wd_work_handler(struct work_struct *work)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *device;
+	struct msm_cvp_cb_cmd_done response  = {0};
+	enum hal_command_response cmd = HAL_SYS_WATCHDOG_TIMEOUT;
+	core = cvp_driver->cvp_core;
+	if (core)
+		device = core->dev_ops->hfi_device_data;
+	else
+		return;
+	if (msm_cvp_hw_wd_recovery) {
+		dprintk(CVP_ERR, "Cleaning up as HW WD recovery is enable %d\n",
+				msm_cvp_hw_wd_recovery);
+		__print_sidebandmanager_regs(device);
+		response.device_id = 0;
+		handle_sys_error(cmd, (void *) &response);
+		enable_irq(device->cvp_hal_data->irq_wd);
+	}
+	else {
+		dprintk(CVP_ERR, "Crashing the device as HW WD recovery is disable %d\n",
+				msm_cvp_hw_wd_recovery);
+		BUG_ON(1);
+	}
+}
+
+static DECLARE_WORK(iris_hfi_wd_work, iris_hfi_wd_work_handler);
+
+irqreturn_t iris_hfi_isr_wd(int irq, void *dev)
+{
+	struct iris_hfi_device *device = dev;
+	dprintk(CVP_ERR, "Got HW WDOG IRQ at %llu! \n", get_aon_time());
+	disable_irq_nosync(irq);
+	queue_work(device->cvp_workq, &iris_hfi_wd_work);
+	return IRQ_HANDLED;
+
+}
+
+static int __init_reset_clk(struct msm_cvp_platform_resources *res,
+			int reset_index)
+{
+	int rc = 0;
+	struct reset_control *rst;
+	struct reset_info *rst_info;
+	struct reset_set *rst_set = &res->reset_set;
+
+	if (!rst_set->reset_tbl)
+		return 0;
+
+	rst_info = &rst_set->reset_tbl[reset_index];
+	rst = rst_info->rst;
+	dprintk(CVP_PWR, "reset_clk: name %s rst %pK required_stage=%d\n",
+		rst_set->reset_tbl[reset_index].name, rst, rst_info->required_stage);
+
+	if (rst)
+		goto skip_reset_init;
+
+	if (rst_info->required_stage == CVP_ON_USE) {
+		rst = reset_control_get_exclusive_released(&res->pdev->dev,
+			rst_set->reset_tbl[reset_index].name);
+		if (IS_ERR(rst)) {
+			rc = PTR_ERR(rst);
+			dprintk(CVP_ERR, "reset get exclusive fail %d\n", rc);
+			return rc;
+		}
+		dprintk(CVP_PWR, "reset_clk: name %s get exclusive rst %llx\n",
+				rst_set->reset_tbl[reset_index].name, rst);
+	} else if (rst_info->required_stage == CVP_ON_INIT) {
+		rst = devm_reset_control_get(&res->pdev->dev,
+				rst_set->reset_tbl[reset_index].name);
+		if (IS_ERR(rst)) {
+			rc = PTR_ERR(rst);
+			dprintk(CVP_ERR, "reset get fail %d\n", rc);
+			return rc;
+		}
+		dprintk(CVP_PWR, "reset_clk: name %s get rst %llx\n",
+				rst_set->reset_tbl[reset_index].name, rst);
+	} else {
+		dprintk(CVP_ERR, "Invalid reset stage\n");
+		return -EINVAL;
+	}
+
+	rst_set->reset_tbl[reset_index].rst = rst;
+	rst_info->state = RESET_INIT;
+
+	return 0;
+
+skip_reset_init:
+	return rc;
+}
+
+static int __reset_control_assert_name(struct iris_hfi_device *device,
+	const char *name)
+{
+	struct reset_info *rcinfo = NULL;
+	int rc = 0;
+	bool found = false;
+
+	iris_hfi_for_each_reset_clock(device, rcinfo) {
+		if (strcmp(rcinfo->name, name))
+			continue;
+
+		found = true;
+		rc = reset_control_assert(rcinfo->rst);
+		if (rc)
+			dprintk(CVP_ERR,
+				"%s: failed to assert reset control (%s), rc = %d\n",
+				__func__, rcinfo->name, rc);
+		else
+			dprintk(CVP_PWR, "%s: assert reset control (%s)\n",
+				__func__, rcinfo->name);
+		break;
+	}
+	if (!found) {
+		dprintk(CVP_PWR, "%s: reset control (%s) not found\n",
+			__func__, name);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int __reset_control_deassert_name(struct iris_hfi_device *device,
+	const char *name)
+{
+	struct reset_info *rcinfo = NULL;
+	int rc = 0;
+	bool found = false;
+
+	iris_hfi_for_each_reset_clock(device, rcinfo) {
+		if (strcmp(rcinfo->name, name))
+			continue;
+		found = true;
+		rc = reset_control_deassert(rcinfo->rst);
+		if (rc)
+			dprintk(CVP_ERR, 
+				"%s: deassert reset control for (%s) failed, rc %d\n",
+				__func__, rcinfo->name, rc);
+		else
+			dprintk(CVP_PWR, "%s: deassert reset control (%s)\n",
+				__func__, rcinfo->name);
+		break;
+	}
+	if (!found) {
+		dprintk(CVP_PWR, "%s: reset control (%s) not found\n",
+			__func__, name);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int __reset_control_acquire(struct iris_hfi_device *device,
+	const char *name)
+{
+	struct reset_info *rcinfo = NULL;
+	int rc = 0;
+	bool found = false;
+	int max_retries = 10000;
+
+	iris_hfi_for_each_reset_clock(device, rcinfo) {
+		if (strcmp(rcinfo->name, name))
+			continue;
+		found = true;
+		if (rcinfo->state == RESET_ACQUIRED)
+			return rc;
+acquire_again:
+		rc = reset_control_acquire(rcinfo->rst);
+		if (rc) {
+			if (rc == -EBUSY) {
+				usleep_range(100, 150);
+				max_retries--;
+				if (max_retries) {
+					goto acquire_again;
+				} else {
+					dprintk(CVP_ERR,
+						"%s acquire %s -EBUSY\n",
+							__func__, rcinfo->name);
+					BUG_ON(1);
+				}
+			} else {
+				dprintk(CVP_ERR,
+					"%s: acquire failed (%s) rc %d\n",
+					__func__, rcinfo->name, rc);
+				rc = -EINVAL;
+			}
+		} else {
+			dprintk(CVP_PWR, "%s: reset acquire succeed (%s)\n",
+				__func__, rcinfo->name);
+			rcinfo->state = RESET_ACQUIRED;
+		}
+		break;
+	}
+	if (!found) {
+		dprintk(CVP_PWR, "%s: reset control (%s) not found\n",
+			__func__, name);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int __reset_control_release(struct iris_hfi_device *device,
+	const char *name)
+{
+	struct reset_info *rcinfo = NULL;
+	int rc = 0;
+	bool found = false;
+
+	iris_hfi_for_each_reset_clock(device, rcinfo) {
+		if (strcmp(rcinfo->name, name))
+			continue;
+		found = true;
+		if (rcinfo->state != RESET_ACQUIRED) {
+			dprintk(CVP_WARN, "Double releasing reset clk?\n");
+			return -EINVAL;
+		}
+		reset_control_release(rcinfo->rst);
+		dprintk(CVP_PWR, "%s: reset release succeed (%s)\n",
+			__func__, rcinfo->name);
+		rcinfo->state = RESET_RELEASED;
+		break;
+	}
+	if (!found) {
+		dprintk(CVP_PWR, "%s: reset control (%s) not found\n",
+			__func__, name);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+static void __deinit_bus(struct iris_hfi_device *device)
+{
+	struct bus_info *bus = NULL;
+
+	if (!device)
+		return;
+
+	kfree(device->bus_vote.data);
+	device->bus_vote = CVP_DEFAULT_BUS_VOTE;
+
+	iris_hfi_for_each_bus_reverse(device, bus) {
+		dev_set_drvdata(bus->dev, NULL);
+		icc_put(bus->client);
+		bus->client = NULL;
+	}
+}
+
+static int __init_bus(struct iris_hfi_device *device)
+{
+	struct bus_info *bus = NULL;
+	int rc = 0;
+
+	if (!device)
+		return -EINVAL;
+
+	iris_hfi_for_each_bus(device, bus) {
+		/*
+		 * This is stupid, but there's no other easy way to ahold
+		 * of struct bus_info in iris_hfi_devfreq_*()
+		 */
+		WARN(dev_get_drvdata(bus->dev), "%s's drvdata already set\n",
+				dev_name(bus->dev));
+		dev_set_drvdata(bus->dev, device);
+		bus->client = icc_get(&device->res->pdev->dev,
+				bus->master, bus->slave);
+		if (IS_ERR_OR_NULL(bus->client)) {
+			rc = PTR_ERR(bus->client) ?: -EBADHANDLE;
+			dprintk(CVP_ERR, "Failed to register bus %s: %d\n",
+					bus->name, rc);
+			bus->client = NULL;
+			goto err_add_dev;
+		}
+	}
+
+	return 0;
+
+err_add_dev:
+	__deinit_bus(device);
+	return rc;
+}
+
+static void __deinit_regulators(struct iris_hfi_device *device)
+{
+	struct regulator_info *rinfo = NULL;
+
+	iris_hfi_for_each_regulator_reverse(device, rinfo) {
+		if (rinfo->regulator) {
+			regulator_put(rinfo->regulator);
+			rinfo->regulator = NULL;
+		}
+	}
+}
+
+static int __init_regulators(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct regulator_info *rinfo = NULL;
+
+	iris_hfi_for_each_regulator(device, rinfo) {
+		rinfo->regulator = regulator_get(&device->res->pdev->dev,
+				rinfo->name);
+		if (IS_ERR_OR_NULL(rinfo->regulator)) {
+			rc = PTR_ERR(rinfo->regulator) ?: -EBADHANDLE;
+			dprintk(CVP_ERR, "Failed to get regulator: %s\n",
+					rinfo->name);
+			rinfo->regulator = NULL;
+			goto err_reg_get;
+		}
+	}
+
+	return 0;
+
+err_reg_get:
+	__deinit_regulators(device);
+	return rc;
+}
+
+static void __deinit_subcaches(struct iris_hfi_device *device)
+{
+	struct subcache_info *sinfo = NULL;
+
+	if (!device) {
+		dprintk(CVP_ERR, "deinit_subcaches: invalid device %pK\n",
+			device);
+		goto exit;
+	}
+
+	if (!is_sys_cache_present(device))
+		goto exit;
+
+	iris_hfi_for_each_subcache_reverse(device, sinfo) {
+		if (sinfo->subcache) {
+			dprintk(CVP_CORE, "deinit_subcaches: %s\n",
+				sinfo->name);
+			llcc_slice_putd(sinfo->subcache);
+			sinfo->subcache = NULL;
+		}
+	}
+
+exit:
+	return;
+}
+
+static int __init_subcaches(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct subcache_info *sinfo = NULL;
+
+	if (!device) {
+		dprintk(CVP_ERR, "init_subcaches: invalid device %pK\n",
+			device);
+		return -EINVAL;
+	}
+
+	if (!is_sys_cache_present(device))
+		return 0;
+
+	iris_hfi_for_each_subcache(device, sinfo) {
+		if (!strcmp("cvp", sinfo->name)) {
+			sinfo->subcache = llcc_slice_getd(LLCC_CVP);
+		} else if (!strcmp("cvpfw", sinfo->name)) {
+			sinfo->subcache = llcc_slice_getd(LLCC_CVPFW);
+		} else {
+			dprintk(CVP_ERR, "Invalid subcache name %s\n",
+					sinfo->name);
+		}
+		if (IS_ERR_OR_NULL(sinfo->subcache)) {
+			rc = PTR_ERR(sinfo->subcache) ?
+				PTR_ERR(sinfo->subcache) : -EBADHANDLE;
+			dprintk(CVP_ERR,
+				 "init_subcaches: invalid subcache: %s rc %d\n",
+				sinfo->name, rc);
+			sinfo->subcache = NULL;
+			goto err_subcache_get;
+		}
+		dprintk(CVP_CORE, "init_subcaches: %s\n",
+			sinfo->name);
+	}
+
+	return 0;
+
+err_subcache_get:
+	__deinit_subcaches(device);
+	return rc;
+}
+
+static int __init_resources(struct iris_hfi_device *device,
+				struct msm_cvp_platform_resources *res)
+{
+	int i, rc = 0;
+
+	rc = __init_regulators(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to get all regulators\n");
+		return -ENODEV;
+	}
+
+	rc = msm_cvp_init_clocks(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init clocks\n");
+		rc = -ENODEV;
+		goto err_init_clocks;
+	}
+
+	for (i = 0; i < device->res->reset_set.count; i++) {
+		rc = __init_reset_clk(res, i);
+		if (rc) {
+			dprintk(CVP_ERR, "Failed to init reset clocks\n");
+			rc = -ENODEV;
+			goto err_init_reset_clk;
+		}
+	}
+
+	rc = __init_bus(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init bus: %d\n", rc);
+		goto err_init_bus;
+	}
+
+	rc = __init_subcaches(device);
+	if (rc)
+		dprintk(CVP_WARN, "Failed to init subcaches: %d\n", rc);
+
+	device->sys_init_capabilities =
+		kzalloc(sizeof(struct msm_cvp_capability)
+		* CVP_MAX_SESSIONS, GFP_KERNEL);
+
+	return rc;
+
+err_init_reset_clk:
+err_init_bus:
+	msm_cvp_deinit_clocks(device);
+err_init_clocks:
+	__deinit_regulators(device);
+	return rc;
+}
+
+static void __deinit_resources(struct iris_hfi_device *device)
+{
+	__deinit_subcaches(device);
+	__deinit_bus(device);
+	msm_cvp_deinit_clocks(device);
+	__deinit_regulators(device);
+	kfree(device->sys_init_capabilities);
+	device->sys_init_capabilities = NULL;
+}
+
+static int __disable_regulator_impl(struct regulator_info *rinfo,
+				struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	dprintk(CVP_PWR, "Disabling regulator %s\n", rinfo->name);
+
+	/*
+	 * This call is needed. Driver needs to acquire the control back
+	 * from HW in order to disable the regualtor. Else the behavior
+	 * is unknown.
+	 */
+
+	rc = __acquire_regulator(rinfo, device);
+	if (rc) {
+		/*
+		 * This is somewhat fatal, but nothing we can do
+		 * about it. We can't disable the regulator w/o
+		 * getting it back under s/w control
+		 */
+		dprintk(CVP_WARN,
+			"Failed to acquire control on %s\n",
+			rinfo->name);
+
+		goto disable_regulator_failed;
+	}
+
+	/*Acquire XO_RESET to avoid race condition with video*/
+	rc = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+	if (rc) {
+		dprintk(CVP_ERR,
+			"XO_RESET could not be acquired: skip disabling the regulator %s\n",
+			rinfo->name);
+		return -EINVAL;
+	}
+
+	rc = regulator_disable(rinfo->regulator);
+
+	/*Release XO_RESET after regulator is enabled.*/
+	call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+
+	if (rc) {
+		dprintk(CVP_WARN,
+			"Failed to disable %s: %d\n",
+			rinfo->name, rc);
+		goto disable_regulator_failed;
+	}
+
+	return 0;
+disable_regulator_failed:
+
+	/* Bring attention to this issue */
+	msm_cvp_res_handle_fatal_hw_error(device->res, true);
+	return rc;
+}
+
+static int __disable_hw_power_collapse(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!msm_cvp_fw_low_power_mode) {
+		dprintk(CVP_PWR, "Not enabling hardware power collapse\n");
+		return 0;
+	}
+
+	rc = __take_back_regulators(device);
+	if (rc)
+		dprintk(CVP_WARN,
+			"%s : Failed to disable HW power collapse %d\n",
+				__func__, rc);
+	return rc;
+}
+
+static int __enable_hw_power_collapse(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!msm_cvp_fw_low_power_mode) {
+		dprintk(CVP_PWR, "Not enabling hardware power collapse\n");
+		return 0;
+	}
+
+	rc = __hand_off_regulators(device);
+	if (rc)
+		dprintk(CVP_WARN,
+			"%s : Failed to enable HW power collapse %d\n",
+				__func__, rc);
+	return rc;
+}
+
+static int __enable_regulator(struct iris_hfi_device *device,
+		const char *name)
+{
+	int rc = 0;
+	struct regulator_info *rinfo;
+
+	iris_hfi_for_each_regulator(device, rinfo) {
+		if (strcmp(rinfo->name, name))
+			continue;
+
+		/*Acquire XO_RESET to avoid race condition with video*/
+		rc = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+		if (rc) {
+			dprintk(CVP_ERR,
+				"XO_RESET could not be acquired: skip enabling the regulator %s\n",
+				rinfo->name);
+			return -EINVAL;
+		}
+
+		rc = regulator_enable(rinfo->regulator);
+
+		/*Release XO_RESET after regulator is enabled.*/
+		call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+
+		if (rc) {
+			dprintk(CVP_ERR, "Failed to enable %s: %d\n",
+					rinfo->name, rc);
+			return rc;
+		}
+
+		if (!regulator_is_enabled(rinfo->regulator)) {
+			dprintk(CVP_ERR,"%s: regulator %s not enabled\n",
+					__func__, rinfo->name);
+			regulator_disable(rinfo->regulator);
+			return -EINVAL;
+		}
+
+		dprintk(CVP_PWR, "Enabled regulator %s\n", rinfo->name);
+		return 0;
+	}
+
+	dprintk(CVP_ERR, "regulator %s not found\n", name);
+	return -EINVAL;
+}
+
+static int __disable_regulator(struct iris_hfi_device *device,
+		const char *name)
+{
+	struct regulator_info *rinfo;
+
+	iris_hfi_for_each_regulator_reverse(device, rinfo) {
+
+		if (strcmp(rinfo->name, name))
+			continue;
+
+		__disable_regulator_impl(rinfo, device);
+		dprintk(CVP_PWR, "%s Disabled regulator %s\n", __func__, name);
+		return 0;
+	}
+
+	dprintk(CVP_ERR, "%s regulator %s not found\n", __func__, name);
+	return -EINVAL;
+}
+
+static int __enable_subcaches(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	u32 c = 0;
+	struct subcache_info *sinfo;
+
+	if (msm_cvp_syscache_disable || !is_sys_cache_present(device))
+		return 0;
+
+	/* Activate subcaches */
+	iris_hfi_for_each_subcache(device, sinfo) {
+		rc = llcc_slice_activate(sinfo->subcache);
+		if (rc) {
+			dprintk(CVP_WARN, "Failed to activate %s: %d\n",
+				sinfo->name, rc);
+			msm_cvp_res_handle_fatal_hw_error(device->res, true);
+			goto err_activate_fail;
+		}
+		sinfo->isactive = true;
+		dprintk(CVP_CORE, "Activated subcache %s\n", sinfo->name);
+		c++;
+	}
+
+	dprintk(CVP_CORE, "Activated %d Subcaches to CVP\n", c);
+
+	return 0;
+
+err_activate_fail:
+	__release_subcaches(device);
+	__disable_subcaches(device);
+	return 0;
+}
+
+static int __set_subcaches(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	u32 c = 0;
+	struct subcache_info *sinfo;
+	u32 resource[CVP_MAX_SUBCACHE_SIZE];
+	struct cvp_hfi_resource_syscache_info_type *sc_res_info;
+	struct cvp_hfi_resource_subcache_type *sc_res;
+	struct cvp_resource_hdr rhdr;
+
+	if (device->res->sys_cache_res_set || msm_cvp_syscache_disable) {
+		dprintk(CVP_CORE, "Subcaches already set or disabled\n");
+		return 0;
+	}
+
+	memset((void *)resource, 0x0, (sizeof(u32) * CVP_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct cvp_hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	iris_hfi_for_each_subcache(device, sinfo) {
+		if (sinfo->isactive) {
+			sc_res[c].size = sinfo->subcache->slice_size;
+			sc_res[c].sc_id = sinfo->subcache->slice_id;
+			c++;
+		}
+	}
+
+	/* Set resource to CVP for activated subcaches */
+	if (c) {
+		dprintk(CVP_CORE, "Setting %d Subcaches\n", c);
+
+		rhdr.resource_handle = sc_res_info; /* cookie */
+		rhdr.resource_id = CVP_RESOURCE_SYSCACHE;
+
+		sc_res_info->num_entries = c;
+
+		rc = __core_set_resource(device, &rhdr, (void *)sc_res_info);
+		if (rc) {
+			dprintk(CVP_WARN, "Failed to set subcaches %d\n", rc);
+			goto err_fail_set_subacaches;
+		}
+
+		iris_hfi_for_each_subcache(device, sinfo) {
+			if (sinfo->isactive)
+				sinfo->isset = true;
+		}
+
+		dprintk(CVP_CORE, "Set Subcaches done to CVP\n");
+		device->res->sys_cache_res_set = true;
+	}
+
+	return 0;
+
+err_fail_set_subacaches:
+	__disable_subcaches(device);
+
+	return 0;
+}
+
+static int __release_subcaches(struct iris_hfi_device *device)
+{
+	struct subcache_info *sinfo;
+	int rc = 0;
+	u32 c = 0;
+	u32 resource[CVP_MAX_SUBCACHE_SIZE];
+	struct cvp_hfi_resource_syscache_info_type *sc_res_info;
+	struct cvp_hfi_resource_subcache_type *sc_res;
+	struct cvp_resource_hdr rhdr;
+
+	if (msm_cvp_syscache_disable || !is_sys_cache_present(device))
+		return 0;
+
+	memset((void *)resource, 0x0, (sizeof(u32) * CVP_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct cvp_hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	/* Release resource command to Iris */
+	iris_hfi_for_each_subcache_reverse(device, sinfo) {
+		if (sinfo->isset) {
+			/* Update the entry */
+			sc_res[c].size = sinfo->subcache->slice_size;
+			sc_res[c].sc_id = sinfo->subcache->slice_id;
+			c++;
+			sinfo->isset = false;
+		}
+	}
+
+	if (c > 0) {
+		dprintk(CVP_CORE, "Releasing %d subcaches\n", c);
+		rhdr.resource_handle = sc_res_info; /* cookie */
+		rhdr.resource_id = CVP_RESOURCE_SYSCACHE;
+
+		rc = __core_release_resource(device, &rhdr);
+		if (rc)
+			dprintk(CVP_WARN,
+				"Failed to release %d subcaches\n", c);
+	}
+
+	device->res->sys_cache_res_set = false;
+
+	return 0;
+}
+
+static int __disable_subcaches(struct iris_hfi_device *device)
+{
+	struct subcache_info *sinfo;
+	int rc = 0;
+
+	if (msm_cvp_syscache_disable || !is_sys_cache_present(device))
+		return 0;
+
+	/* De-activate subcaches */
+	iris_hfi_for_each_subcache_reverse(device, sinfo) {
+		if (sinfo->isactive) {
+			dprintk(CVP_CORE, "De-activate subcache %s\n",
+				sinfo->name);
+			rc = llcc_slice_deactivate(sinfo->subcache);
+			if (rc) {
+				dprintk(CVP_WARN,
+					"Failed to de-activate %s: %d\n",
+					sinfo->name, rc);
+			}
+			sinfo->isactive = false;
+		}
+	}
+
+	return 0;
+}
+
+static void interrupt_init_iris2(struct iris_hfi_device *device)
+{
+	u32 mask_val = 0;
+
+	/* All interrupts should be disabled initially 0x1F6 : Reset value */
+	mask_val = __read_register(device, CVP_WRAPPER_INTR_MASK);
+
+	/* Write 0 to unmask CPU and WD interrupts */
+	mask_val &= ~(CVP_FATAL_INTR_BMSK | CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK);
+	__write_register(device, CVP_WRAPPER_INTR_MASK, mask_val);
+	dprintk(CVP_REG, "Init irq: reg: %x, mask value %x\n",
+		CVP_WRAPPER_INTR_MASK, mask_val);
+
+	mask_val = 0;
+	mask_val = __read_register(device, CVP_SS_IRQ_MASK);
+	mask_val &= ~(CVP_SS_INTR_BMASK);
+	__write_register(device, CVP_SS_IRQ_MASK, mask_val);
+	dprintk(CVP_REG, "Init irq_wd: reg: %x, mask value %x\n",
+			CVP_SS_IRQ_MASK, mask_val);
+}
+
+static void setup_dsp_uc_memmap_vpu5(struct iris_hfi_device *device)
+{
+	/* initialize DSP QTBL & UCREGION with CPU queues */
+	__write_register(device, HFI_DSP_QTBL_ADDR,
+		(u32)device->dsp_iface_q_table.align_device_addr);
+	__write_register(device, HFI_DSP_UC_REGION_ADDR,
+		(u32)device->dsp_iface_q_table.align_device_addr);
+	__write_register(device, HFI_DSP_UC_REGION_SIZE,
+		device->dsp_iface_q_table.mem_data.size);
+}
+
+static void clock_config_on_enable_vpu5(struct iris_hfi_device *device)
+{
+		__write_register(device, CVP_WRAPPER_CPU_CLOCK_CONFIG, 0);
+}
+
+static int __set_ubwc_config(struct iris_hfi_device *device)
+{
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+		(struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+
+	if (!device->res->ubwc_config)
+		return 0;
+
+	rc = call_hfi_pkt_op(device, sys_ubwc_config, pkt,
+		device->res->ubwc_config);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"ubwc config setting to FW failed\n");
+		rc = -ENOTEMPTY;
+		goto fail_to_set_ubwc_config;
+	}
+
+	if (__iface_cmdq_write(device, pkt)) {
+		rc = -ENOTEMPTY;
+		goto fail_to_set_ubwc_config;
+	}
+
+fail_to_set_ubwc_config:
+	return rc;
+}
+
+static int __power_on_controller(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	rc = __enable_regulator(device, "cvp");
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable ctrler: %d\n", rc);
+		return rc;
+	}
+
+	rc = msm_cvp_prepare_enable_clk(device, "sleep_clk");
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable sleep clk: %d\n", rc);
+		goto fail_reset_clks;
+	}
+
+	rc = call_iris_op(device, reset_control_assert_name, device, "cvp_axi_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: assert cvp_axi_reset failed\n", __func__);
+
+	rc = call_iris_op(device, reset_control_assert_name, device, "cvp_core_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: assert cvp_core_reset failed\n", __func__);
+	/* wait for deassert */
+	usleep_range(300, 400);
+
+	rc = call_iris_op(device, reset_control_deassert_name, device, "cvp_axi_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: de-assert cvp_axi_reset failed\n", __func__);
+	rc = call_iris_op(device, reset_control_deassert_name, device, "cvp_core_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: de-assert cvp_core_reset failed\n", __func__);
+
+	rc = msm_cvp_prepare_enable_clk(device, "gcc_video_axi1");
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable axi1 clk: %d\n", rc);
+		goto fail_reset_clks;
+	}
+
+	rc = msm_cvp_prepare_enable_clk(device, "cvp_clk");
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable cvp_clk: %d\n", rc);
+		goto fail_enable_clk;
+	}
+
+	dprintk(CVP_PWR, "EVA controller powered on\n");
+	return 0;
+
+fail_enable_clk:
+	msm_cvp_disable_unprepare_clk(device, "gcc_video_axi1");
+fail_reset_clks:
+	__disable_regulator(device, "cvp");
+	return rc;
+}
+
+static int __power_on_core(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	rc = __enable_regulator(device, "cvp-core");
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable core: %d\n", rc);
+		return rc;
+	}
+
+	rc = msm_cvp_prepare_enable_clk(device, "video_cc_mvs1_clk_src");
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable video_cc_mvs1_clk_src:%d\n",
+			rc);
+		__disable_regulator(device, "cvp-core");
+		return rc;
+	}
+
+	rc = msm_cvp_prepare_enable_clk(device, "core_clk");
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable core_clk: %d\n", rc);
+		__disable_regulator(device, "cvp-core");
+		return rc;
+	}
+
+/*#ifdef CONFIG_EVA_PINEAPPLE
+	__write_register(device, CVP_AON_WRAPPER_CVP_NOC_ARCG_CONTROL, 0);
+	__write_register(device, CVP_NOC_RCGCONTROLLER_HYSTERESIS_LOW, 0x2f);
+	__write_register(device, CVP_NOC_RCG_VNOC_NOC_CLK_FORCECLOCKON_LOW, 1);
+	__write_register(device, CVP_NOC_RCGCONTROLLER_MAINCTL_LOW, 1);
+	usleep_range(50, 100);
+	__write_register(device, CVP_NOC_RCG_VNOC_NOC_CLK_FORCECLOCKON_LOW, 0);
+#endif*/
+	dprintk(CVP_PWR, "EVA core powered on\n");
+	return 0;
+}
+
+static int __iris_power_on(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	u32 reg_gdsc, reg_cbcr, spare_val;
+
+	if (device->power_enabled)
+		return 0;
+
+	/* Vote for all hardware resources */
+	rc = __vote_buses(device, device->bus_vote.data,
+			device->bus_vote.data_count);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to vote buses, err: %d\n", rc);
+		goto fail_vote_buses;
+	}
+
+	rc = __power_on_controller(device);
+	if (rc)
+		goto fail_enable_controller;
+
+	rc = __power_on_core(device);
+	if (rc)
+		goto fail_enable_core;
+
+	rc = msm_cvp_scale_clocks(device);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"Failed to scale clocks, perf may regress\n");
+		rc = 0;
+	} else {
+		dprintk(CVP_PWR, "Done with scaling\n");
+	}
+
+	/*Do not access registers before this point!*/
+	device->power_enabled = true;
+
+	/* Thomas input to debug CPU NoC hang */
+	__write_register(device, CVP_NOC_SBM_FAULTINEN0_LOW, 0x1);
+	__write_register(device, CVP_NOC_ERR_MAINCTL_LOW_OFFS, 0x3);
+
+	/*
+	 * Re-program all of the registers that get reset as a result of
+	 * regulator_disable() and _enable()
+	 * calling below function requires CORE powered on
+	 */
+	rc = __set_registers(device);
+	if (rc)
+		goto fail_enable_core;
+
+	dprintk(CVP_CORE, "Done with register set\n");
+
+	reg_gdsc = __read_register(device, CVP_CC_MVS1_GDSCR);
+	reg_cbcr = __read_register(device, CVP_CC_MVS1_CBCR);
+	if (!(reg_gdsc & 0x80000000) || (reg_cbcr & 0x80000000)) {
+		rc = -EINVAL;
+		dprintk(CVP_ERR, "CORE power on failed gdsc %x cbcr %x\n",
+			reg_gdsc, reg_cbcr);
+		goto fail_enable_core;
+	}
+
+	reg_gdsc = __read_register(device, CVP_CC_MVS1C_GDSCR);
+	reg_cbcr = __read_register(device, CVP_CC_MVS1C_CBCR);
+	if (!(reg_gdsc & 0x80000000) || (reg_cbcr & 0x80000000)) {
+		rc = -EINVAL;
+		dprintk(CVP_ERR, "CTRL power on failed gdsc %x cbcr %x\n",
+			reg_gdsc, reg_cbcr);
+		goto fail_enable_core;
+	}
+
+	spare_val = __read_register(device, CVP_AON_WRAPPER_SPARE);
+	if ((spare_val & 0x2) != 0) {
+		usleep_range(2000, 3000);
+		spare_val = __read_register(device, CVP_AON_WRAPPER_SPARE);
+		if ((spare_val & 0x2) != 0) {
+			dprintk(CVP_ERR, "WRAPPER_SPARE non-zero %#x\n", spare_val);
+			rc = -EINVAL;
+			goto fail_enable_core;
+		}
+	}
+
+	call_iris_op(device, interrupt_init, device);
+	dprintk(CVP_CORE, "Done with interrupt enabling\n");
+	device->intr_status = 0;
+	enable_irq(device->cvp_hal_data->irq);
+	__write_register(device,
+		CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x7);
+	pr_info_ratelimited(CVP_DBG_TAG "cvp (eva) powered on\n", "pwr");
+	return 0;
+
+fail_enable_core:
+	__power_off_controller(device);
+fail_enable_controller:
+	__unvote_buses(device);
+fail_vote_buses:
+	device->power_enabled = false;
+	return rc;
+}
+
+static inline int __suspend(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	} else if (!device->power_enabled) {
+		dprintk(CVP_PWR, "Power already disabled\n");
+		return 0;
+	}
+
+	dprintk(CVP_PWR, "Entering suspend\n");
+
+	rc = __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_SUSPEND);
+	if (rc) {
+		dprintk(CVP_WARN, "Failed to suspend cvp core %d\n", rc);
+		goto err_tzbsp_suspend;
+	}
+
+	__disable_subcaches(device);
+
+	call_iris_op(device, power_off, device);
+
+	if (device->res->pm_qos.latency_us && device->res->pm_qos.pm_qos_hdls)
+		cvp_pm_qos_update(device, false);
+
+	return rc;
+
+err_tzbsp_suspend:
+	return rc;
+}
+
+static void __print_sidebandmanager_regs(struct iris_hfi_device *device)
+{
+	u32 sbm_ln0_low, axi_cbcr, val;
+	u32 main_sbm_ln0_low = 0xdeadbeef, main_sbm_ln0_high = 0xdeadbeef;
+	u32 main_sbm_ln1_high = 0xdeadbeef, cpu_cs_x2rpmh;
+	int rc;
+
+	sbm_ln0_low =
+		__read_register(device, CVP_NOC_SBM_SENSELN0_LOW);
+
+	cpu_cs_x2rpmh = __read_register(device, CVP_CPU_CS_X2RPMh);
+
+	__write_register(device, CVP_CPU_CS_X2RPMh,
+			(cpu_cs_x2rpmh | CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK));
+	usleep_range(500, 1000);
+	cpu_cs_x2rpmh = __read_register(device, CVP_CPU_CS_X2RPMh);
+	if (!(cpu_cs_x2rpmh & CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK)) {
+		dprintk(CVP_WARN,
+			"failed set CVP_CPU_CS_X2RPMH mask %x\n",
+			cpu_cs_x2rpmh);
+		goto exit;
+	}
+
+	axi_cbcr = __read_gcc_register(device, CVP_GCC_VIDEO_AXI1_CBCR);
+	if (axi_cbcr & 0x80000000) {
+		dprintk(CVP_WARN, "failed to turn on AXI clock %x\n",
+			axi_cbcr);
+		goto exit;
+	}
+
+	/* Added by Thomas to debug CPU NoC hang */
+	val = __read_register(device, CVP_NOC_ERR_ERRVLD_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRVLD_LOW %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_SBM_FAULTINSTATUS0_LOW);
+	dprintk(CVP_ERR, "CVP_NOC_SBM_FAULTINSTATUS0_LOW %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG0_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG0_LOW %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG0_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG0_HIGH %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG1_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG1_LOW %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG1_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG1_HIGH %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG2_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG2_LOW %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG2_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG2_HIGH %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG3_LOW_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG3_LOW %#x\n", val);
+
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG3_HIGH_OFFS);
+	dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG3_HIGH %#x\n", val);
+
+	/* end of addition */
+
+	rc = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+	if (rc) {
+		dprintk(CVP_WARN, "%s Fail acquire xo_reset\n", __func__);
+		goto exit;
+	}
+	main_sbm_ln0_low = __read_register(device,
+						CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_LOW +
+						device->res->noc_main_sidebandmanager_offset);
+	main_sbm_ln0_high = __read_register(device,
+						CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_HIGH +
+						device->res->noc_main_sidebandmanager_offset);
+	main_sbm_ln1_high = __read_register(device,
+						CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN1_HIGH +
+						device->res->noc_main_sidebandmanager_offset);
+	call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+
+exit:
+	cpu_cs_x2rpmh = cpu_cs_x2rpmh & (~CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK);
+	__write_register(device, CVP_CPU_CS_X2RPMh, cpu_cs_x2rpmh);
+	dprintk(CVP_WARN, "Sidebandmanager regs %x %x %x %x %x\n",
+		sbm_ln0_low, main_sbm_ln0_low,
+		main_sbm_ln0_high, main_sbm_ln1_high,
+		cpu_cs_x2rpmh);
+}
+
+static void __enter_cpu_noc_lpi(struct iris_hfi_device *device)
+{
+	u32 lpi_status, count = 0, max_count = 2000;
+
+	/* New addition to put CPU/Tensilica to low power */
+	count = 0;
+	__write_register(device, CVP_WRAPPER_CPU_NOC_LPI_CONTROL, 0x1);
+	while (count < max_count) {
+		lpi_status = __read_register(device, CVP_WRAPPER_CPU_NOC_LPI_STATUS);
+		if ((lpi_status & BIT(1)) || ((lpi_status & BIT(2)) && (!(lpi_status & BIT(0))))) {
+			/*
+			 * If QDENY == true, or
+			 * If QACTIVE == true && QACCEPT == false
+			 * Try again
+			 */
+			__write_register(device, CVP_WRAPPER_CPU_NOC_LPI_CONTROL, 0x0);
+			usleep_range(10, 20);
+			__write_register(device, CVP_WRAPPER_CPU_NOC_LPI_CONTROL, 0x1);
+			usleep_range(1000, 1200);
+			count++;
+		} else {
+			break;
+		}
+	}
+
+	dprintk(CVP_PWR,
+		"%s, CPU Noc: lpi_status %x (count %d)\n", __func__, lpi_status, count);
+	if (count == max_count) {
+		u32 pc_ready, wfi_status;
+
+		wfi_status = __read_register(device, CVP_WRAPPER_CPU_STATUS);
+		pc_ready = __read_register(device, CVP_CTRL_STATUS);
+
+		dprintk(CVP_WARN,
+			"%s, CPU NOC not in qaccept status %x %x %x\n",
+			__func__, lpi_status, wfi_status, pc_ready);
+
+		__print_sidebandmanager_regs(device);
+	}
+}
+
+static int __power_off_controller(struct iris_hfi_device *device)
+{
+	u32 lpi_status, count = 0, max_count = 1000;
+	int rc;
+
+	u32 spare_val, spare_status;
+
+	/* HPG 6.2.2 Step 1  */
+	__write_register(device, CVP_CPU_CS_X2RPMh, 0x3);
+
+	/* HPG 6.2.2 Step 2, noc to low power */
+	__enter_cpu_noc_lpi(device);
+
+	/* HPG 6.2.2 Step 3, debug bridge to low power BYPASSED */
+
+	/* HPG 6.2.2 Step 4, debug bridge to lpi release */
+	__write_register(device,
+		CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x0);
+	lpi_status = 0x1;
+	count = 0;
+	while (lpi_status && count < max_count) {
+		lpi_status = __read_register(device,
+				 CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+		usleep_range(50, 100);
+		count++;
+	}
+	dprintk(CVP_PWR,
+		"DBLP Release: lpi_status %d(count %d)\n",
+		lpi_status, count);
+	if (count == max_count) {
+		dprintk(CVP_WARN,
+			"DBLP Release: lpi_status %x\n", lpi_status);
+	}
+
+	/* PDXFIFO reset: addition for Kailua / Lanai */
+
+	__write_register(device, CVP_WRAPPER_AXI_CLOCK_CONFIG, 0x3);
+	__write_register(device, CVP_WRAPPER_QNS4PDXFIFO_RESET, 0x1);
+	__write_register(device, CVP_WRAPPER_QNS4PDXFIFO_RESET, 0x0);
+	__write_register(device, CVP_WRAPPER_AXI_CLOCK_CONFIG, 0x0);
+
+	/* HPG 6.2.2 Step 5 */
+	msm_cvp_disable_unprepare_clk(device, "cvp_clk");
+
+	rc = call_iris_op(device, reset_control_assert_name, device, "cvp_axi_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: assert cvp_axi_reset failed\n", __func__);
+
+	rc = call_iris_op(device, reset_control_assert_name, device, "cvp_core_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: assert cvp_core_reset failed\n", __func__);
+	/* wait for deassert */
+	usleep_range(1000, 1050);
+
+	rc = call_iris_op(device, reset_control_deassert_name, device, "cvp_axi_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: de-assert cvp_axi_reset failed\n", __func__);
+
+	rc = call_iris_op(device, reset_control_deassert_name, device, "cvp_core_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: de-assert cvp_core_reset failed\n", __func__);
+
+	/* disable EVA NoC clock */
+	__write_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_CLK_CONTROL, 0x1);
+
+	/* enable EVA NoC reset */
+	__write_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_SW_RESET, 0x1);
+
+	rc = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+	if (rc) {
+		dprintk(CVP_ERR, "FATAL ERROR, HPG step 17 to 20 will be bypassed\n");
+		goto skip_xo_reset;
+	}
+	spare_status = 0x1;
+	while (spare_status != 0x0) {
+		spare_val = __read_register(device, CVP_AON_WRAPPER_SPARE);
+		spare_status = spare_val & 0x2;
+		usleep_range(50, 100);
+	}
+	__write_register(device, CVP_AON_WRAPPER_SPARE, 0x1);
+	rc = call_iris_op(device, reset_control_assert_name, device, "cvp_xo_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: assert cvp_xo_reset failed\n", __func__);
+
+	/* de-assert EVA_NoC reset */
+	__write_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_SW_RESET, 0x0);
+
+	/* de-assert EVA video_cc XO reset and enable video_cc XO clock after 80us */
+	usleep_range(80, 100);
+	rc = call_iris_op(device, reset_control_deassert_name, device, "cvp_xo_reset");
+	if (rc)
+		dprintk(CVP_ERR, "%s: de-assert cvp_xo_reset failed\n", __func__);
+
+	/* clear XO mask bit - this step was missing in previous sequence */
+	__write_register(device, CVP_AON_WRAPPER_SPARE, 0x0);
+
+	call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+
+skip_xo_reset:
+	/* enable EVA NoC clock */
+	__write_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_CLK_CONTROL, 0x0);
+
+	/* De-assert EVA_CTL Force Sleep Retention */
+
+	usleep_range(400, 500);
+
+	/* HPG 6.2.2 Step 6 */
+	__disable_regulator(device, "cvp");
+
+	/* HPG 6.2.2 Step 7 */
+	rc = msm_cvp_disable_unprepare_clk(device, "gcc_video_axi1");
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enable axi1 clk: %d\n", rc);
+	}
+
+	rc = msm_cvp_disable_unprepare_clk(device, "sleep_clk");
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to disable sleep clk: %d\n", rc);
+	}
+
+	return 0;
+}
+
+static int __power_off_core(struct iris_hfi_device *device)
+{
+	u32 reg_status = 0, lpi_status, config, value = 0, count = 0;
+	u32 warn_flag = 0, max_count = 10;
+
+	value = __read_register(device, CVP_CC_MVS1_GDSCR);
+	if (!(value & 0x80000000)) {
+		/*
+		 * Core has been powered off by f/w.
+		 * Check NOC reset registers to ensure
+		 * NO outstanding NoC transactions
+		 */
+		value = __read_register(device, CVP_NOC_RESET_ACK);
+		if (value) {
+			dprintk(CVP_WARN,
+				"Core off with NOC RESET ACK non-zero %x\n",
+				value);
+			__print_sidebandmanager_regs(device);
+		}
+		__disable_regulator(device, "cvp-core");
+		msm_cvp_disable_unprepare_clk(device, "core_clk");
+		msm_cvp_disable_unprepare_clk(device, "video_cc_mvs1_clk_src");
+		return 0;
+	 } else if (!(value & 0x2)) {
+		/*
+		 * HW_CONTROL PC disabled, then core is powered on for
+		 * CVP NoC access
+		 */
+		__disable_regulator(device, "cvp-core");
+                msm_cvp_disable_unprepare_clk(device, "core_clk");
+                msm_cvp_disable_unprepare_clk(device, "video_cc_mvs1_clk_src");
+                return 0;
+	}
+
+	dprintk(CVP_PWR, "Driver controls Core power off now\n");
+	/*
+	 * check to make sure core clock branch enabled else
+	 * we cannot read core idle register
+	 */
+	config = __read_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG);
+	if (config) {
+		dprintk(CVP_PWR,
+		"core clock config not enabled, enable it to access core\n");
+		__write_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG, 0);
+	}
+
+	/*
+	 * add MNoC idle check before collapsing MVS1 per HPG update
+	 * poll for NoC DMA idle -> HPG 6.2.1
+	 *
+	 */
+	do {
+		value = __read_register(device, CVP_SS_IDLE_STATUS);
+		if (value & 0x400000)
+			break;
+		else
+			usleep_range(1000, 2000);
+		count++;
+	} while (count < max_count);
+
+	if (count == max_count) {
+		dprintk(CVP_WARN, "Core fail to go idle %x\n", value);
+		warn_flag = 1;
+	}
+
+	count = 0;
+	max_count = 1000;
+	__write_register(device, CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL, 0x1);
+	while (!reg_status && count < max_count) {
+		lpi_status =
+			 __read_register(device,
+				CVP_AON_WRAPPER_CVP_NOC_LPI_STATUS);
+		reg_status = lpi_status & BIT(0);
+		/* Wait for Core noc lpi status to be set */
+		usleep_range(50, 100);
+		count++;
+	}
+	dprintk(CVP_PWR,
+		"Core Noc: lpi_status %x noc_status %x (count %d)\n",
+		lpi_status, reg_status, count);
+	if (count == max_count) {
+		u32 pc_ready, wfi_status;
+
+		wfi_status = __read_register(device, CVP_WRAPPER_CPU_STATUS);
+		pc_ready = __read_register(device, CVP_CTRL_STATUS);
+
+		dprintk(CVP_WARN,
+			"Core NOC not in qaccept status %x %x %x %x\n",
+			reg_status, lpi_status, wfi_status, pc_ready);
+
+		__print_sidebandmanager_regs(device);
+	}
+
+	__write_register(device, CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL, 0x0);
+
+	if (warn_flag)
+		__print_sidebandmanager_regs(device);
+
+	/* Reset both sides of 2 ahb2ahb_bridges (TZ and non-TZ) */
+	__write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x3);
+	__write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x2);
+	__write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x0);
+
+	__write_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG, config);
+
+	__disable_hw_power_collapse(device);
+	usleep_range(100, 200);
+	__disable_regulator(device, "cvp-core");
+	msm_cvp_disable_unprepare_clk(device, "core_clk");
+	msm_cvp_disable_unprepare_clk(device, "video_cc_mvs1_clk_src");
+	return 0;
+}
+
+static void power_off_iris2(struct iris_hfi_device *device)
+{
+	if (!device->power_enabled || !device->res->sw_power_collapsible)
+		return;
+
+	if (!(device->intr_status & CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+		disable_irq_nosync(device->cvp_hal_data->irq);
+	device->intr_status = 0;
+
+	__power_off_core(device);
+
+	__power_off_controller(device);
+
+	if (__unvote_buses(device))
+		dprintk(CVP_WARN, "Failed to unvote for buses\n");
+
+	/*Do not access registers after this point!*/
+	device->power_enabled = false;
+	pr_info(CVP_DBG_TAG "cvp (eva) power collapsed\n", "pwr");
+}
+
+static inline int __resume(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	} else if (device->power_enabled) {
+		goto exit;
+	} else if (!__core_in_valid_state(device)) {
+		dprintk(CVP_PWR, "iris_hfi_device in deinit state.");
+		return -EINVAL;
+	}
+
+	core = cvp_driver->cvp_core;
+
+	dprintk(CVP_PWR, "Resuming from power collapse\n");
+	rc = __iris_power_on(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to power on cvp\n");
+		goto err_iris_power_on;
+	}
+
+	__setup_ucregion_memory_map(device);
+
+	/* RUMI: set CVP_CTRL_INIT register to disable synx in FW */
+
+	/* Reboot the firmware */
+	rc = __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_RESUME);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to resume cvp core %d\n", rc);
+		goto err_set_cvp_state;
+	}
+
+	/* Wait for boot completion */
+	rc = __boot_firmware(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to reset cvp core\n");
+		goto err_reset_core;
+	}
+
+	/*
+	 * Work around for H/W bug, need to reprogram these registers once
+	 * firmware is out reset
+	 */
+	__set_threshold_registers(device);
+
+	if (device->res->pm_qos.latency_us && device->res->pm_qos.pm_qos_hdls)
+		cvp_pm_qos_update(device, true);
+
+	__sys_set_debug(device, msm_cvp_fw_debug);
+
+	__enable_subcaches(device);
+	__set_subcaches(device);
+
+
+	__dsp_resume(device);
+
+	dprintk(CVP_PWR, "Resumed from power collapse\n");
+exit:
+	/* Don't reset skip_pc_count for SYS_PC_PREP cmd */
+	if (device->last_packet_type != HFI_CMD_SYS_PC_PREP)
+		device->skip_pc_count = 0;
+	return rc;
+err_reset_core:
+	__tzbsp_set_cvp_state(TZ_SUBSYS_STATE_SUSPEND);
+err_set_cvp_state:
+	call_iris_op(device, power_off, device);
+err_iris_power_on:
+	dprintk(CVP_ERR, "Failed to resume from power collapse\n");
+	return rc;
+}
+
+static int __power_on_init(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	/* Initialize resources */
+	rc = __init_resources(device, device->res);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init resources: %d\n", rc);
+		return rc;
+	}
+
+	rc = __initialize_packetization(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to initialize packetization\n");
+		goto fail_iris_init;
+	}
+
+	rc = __iris_power_on(device);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to power on iris in in load_fw\n");
+		goto fail_iris_init;
+	}
+
+	return rc;
+fail_iris_init:
+	__deinit_resources(device);
+	return rc;
+}
+
+static int __load_fw(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if ((!device->res->use_non_secure_pil && !device->res->firmware_base)
+		|| device->res->use_non_secure_pil) {
+		rc = load_cvp_fw_impl(device);
+		if (rc)
+			goto fail_load_fw;
+	}
+	return rc;
+
+fail_load_fw:
+	call_iris_op(device, power_off, device);
+	return rc;
+}
+
+static void __unload_fw(struct iris_hfi_device *device)
+{
+	if (!device->resources.fw.cookie)
+		return;
+
+	cancel_delayed_work(&iris_hfi_pm_work);
+	if (device->state != IRIS_STATE_DEINIT)
+		flush_workqueue(device->iris_pm_workq);
+
+	/* New addition to put CPU/Tensilica to low power */
+	__enter_cpu_noc_lpi(device);
+
+	unload_cvp_fw_impl(device);
+	__interface_queues_release(device);
+	call_iris_op(device, power_off, device);
+	__deinit_resources(device);
+
+	dprintk(CVP_WARN, "Firmware unloaded\n");
+}
+
+static int iris_hfi_get_fw_info(void *dev, struct cvp_hal_fw_info *fw_info)
+{
+	int i = 0;
+	struct iris_hfi_device *device = dev;
+
+	if (!device || !fw_info) {
+		dprintk(CVP_ERR,
+			"%s Invalid parameter: device = %pK fw_info = %pK\n",
+			__func__, device, fw_info);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->lock);
+
+	while (cvp_driver->fw_version[i++] != 'V' && i < CVP_VERSION_LENGTH)
+		;
+
+	if (i == CVP_VERSION_LENGTH - 1) {
+		dprintk(CVP_WARN, "Iris version string is not proper\n");
+		fw_info->version[0] = '\0';
+		goto fail_version_string;
+	}
+
+	memcpy(&fw_info->version[0], &cvp_driver->fw_version[0],
+			CVP_VERSION_LENGTH);
+	fw_info->version[CVP_VERSION_LENGTH - 1] = '\0';
+
+fail_version_string:
+	dprintk(CVP_CORE, "F/W version retrieved : %s\n", fw_info->version);
+	fw_info->base_addr = device->cvp_hal_data->firmware_base;
+	fw_info->register_base = device->res->register_base;
+	fw_info->register_size = device->cvp_hal_data->register_size;
+	fw_info->irq = device->cvp_hal_data->irq;
+
+	mutex_unlock(&device->lock);
+	return 0;
+}
+
+static int iris_hfi_get_core_capabilities(void *dev)
+{
+	dprintk(CVP_CORE, "%s not supported yet!\n", __func__);
+	return 0;
+}
+
+static const char * const mid_names[16] = {
+	"CVP_FW",
+	"ARP_DATA",
+	"CVP_MPU_PIXEL",
+	"CVP_MPU_NON_PIXEL",
+	"CVP_FDU_PIXEL",
+	"CVP_FDU_NON_PIXEL",
+	"CVP_GCE_PIXEL",
+	"CVP_GCE_NON_PIXEL",
+	"CVP_TOF_PIXEL",
+	"CVP_TOF_NON_PIXEL",
+	"CVP_VADL_PIXEL",
+	"CVP_VADL_NON_PIXEL",
+	"CVP_RGE_NON_PIXEL",
+	"CVP_CDM",
+	"Invalid",
+	"Invalid"
+};
+
+static void __print_reg_details(u32 val)
+{
+	u32 mid, sid;
+
+	mid = (val >> 5) & 0xF;
+	sid = (val >> 2) & 0x7;
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_LOW:     %#x\n", val);
+	dprintk(CVP_ERR, "Sub-client:%s, SID: %d\n", mid_names[mid], sid);
+}
+
+static void __err_log(bool logging, u32 *data, const char *name, u32 val)
+{
+	if (logging)
+		*data = val;
+
+	dprintk(CVP_ERR, "%s: %#x\n", name, val);
+}
+
+static void __noc_error_info_iris2(struct iris_hfi_device *device)
+{
+	struct msm_cvp_core *core;
+	struct cvp_noc_log *noc_log;
+	u32 val = 0, regi, regii, regiii;
+	bool log_required = false;
+	int rc;
+
+	core = cvp_driver->cvp_core;
+
+	if (!core->ssr_count && core->resources.max_ssr_allowed > 1)
+		log_required = true;
+
+	noc_log = &core->log.noc_log;
+
+	if (noc_log->used) {
+		dprintk(CVP_WARN, "Data already in NoC log, skip logging\n");
+		return;
+	}
+	noc_log->used = 1;
+
+	__disable_hw_power_collapse(device);
+
+	val = __read_register(device, CVP_CC_MVS1_GDSCR);
+	regi = __read_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_CLK_CONTROL);
+	regii = __read_register(device, CVP_CC_MVS1_CBCR);
+	regiii = __read_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG);
+	dprintk(CVP_ERR, "noc reg check: %#x %#x %#x %#x\n",
+		val, regi, regii, regiii);
+
+	val = __read_register(device, CVP_NOC_ERR_SWID_LOW_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_swid_low,
+			"CVP_NOC_ERL_MAIN_SWID_LOW", val);
+	val = __read_register(device, CVP_NOC_ERR_SWID_HIGH_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_swid_high,
+			"CVP_NOC_ERL_MAIN_SWID_HIGH", val);
+	val = __read_register(device, CVP_NOC_ERR_MAINCTL_LOW_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_mainctl_low,
+			"CVP_NOC_ERL_MAIN_MAINCTL_LOW", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRVLD_LOW_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errvld_low,
+			"CVP_NOC_ERL_MAIN_ERRVLD_LOW", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRCLR_LOW_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errclr_low,
+			"CVP_NOC_ERL_MAIN_ERRCLR_LOW", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG0_LOW_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errlog0_low,
+			 "CVP_NOC_ERL_MAIN_ERRLOG0_LOW", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG0_HIGH_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errlog0_high,
+			"CVP_NOC_ERL_MAIN_ERRLOG0_HIGH", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG1_LOW_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errlog1_low,
+			"CVP_NOC_ERL_MAIN_ERRLOG1_LOW", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG1_HIGH_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errlog1_high,
+			"CVP_NOC_ERL_MAIN_ERRLOG1_HIGH", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG2_LOW_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errlog2_low,
+			"CVP_NOC_ERL_MAIN_ERRLOG2_LOW", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG2_HIGH_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errlog2_high,
+			"CVP_NOC_ERL_MAIN_ERRLOG2_HIGH", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG3_LOW_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errlog3_low,
+			"CVP_NOC_ERL_MAIN_ERRLOG3_LOW", val);
+	val = __read_register(device, CVP_NOC_ERR_ERRLOG3_HIGH_OFFS);
+	__err_log(log_required, &noc_log->err_ctrl_errlog3_high,
+			"CVP_NOC_ERL_MAIN_ERRLOG3_HIGH", val);
+
+	rc = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+	if (rc) {
+		dprintk(CVP_WARN, "%s Fail acquire xo_reset\n", __func__);
+		return;
+	}
+
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_SWID_LOW_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_swid_low,
+			"CVP_NOC__CORE_ERL_MAIN_SWID_LOW", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_SWID_HIGH_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_swid_high,
+			"CVP_NOC_CORE_ERL_MAIN_SWID_HIGH", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_mainctl_low,
+			"CVP_NOC_CORE_ERL_MAIN_MAINCTL_LOW", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errvld_low,
+			"CVP_NOC_CORE_ERL_MAIN_ERRVLD_LOW", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errclr_low,
+			"CVP_NOC_CORE_ERL_MAIN_ERRCLR_LOW", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errlog0_low,
+			"CVP_NOC_CORE_ERL_MAIN_ERRLOG0_LOW", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errlog0_high,
+			"CVP_NOC_CORE_ERL_MAIN_ERRLOG0_HIGH", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errlog1_low,
+			"CVP_NOC_CORE_ERL_MAIN_ERRLOG1_LOW", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errlog1_high,
+			"CVP_NOC_CORE_ERL_MAIN_ERRLOG1_HIGH", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errlog2_low,
+			"CVP_NOC_CORE_ERL_MAIN_ERRLOG2_LOW", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errlog2_high,
+			"CVP_NOC_CORE_ERL_MAIN_ERRLOG2_HIGH", val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errlog3_low,
+			"CORE ERRLOG3_LOW, below details", val);
+	__print_reg_details(val);
+	val = __read_register(device,
+			CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS + device->res->noc_core_err_offset);
+	__err_log(log_required, &noc_log->err_core_errlog3_high,
+			"CVP_NOC_CORE_ERL_MAIN_ERRLOG3_HIGH", val);
+	__write_register(device,
+			CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS + device->res->noc_core_err_offset, 0x1);
+	call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+#define CVP_SS_CLK_HALT 0x8
+#define CVP_SS_CLK_EN 0xC
+#define CVP_SS_ARP_TEST_BUS_CONTROL 0x700
+#define CVP_SS_ARP_TEST_BUS_REGISTER 0x704
+#define CVP_DMA_TEST_BUS_CONTROL 0x66A0
+#define CVP_DMA_TEST_BUS_REGISTER 0x66A4
+#define CVP_VPU_WRAPPER_CORE_CONFIG 0xB0088
+	__write_register(device, CVP_SS_CLK_HALT, 0);
+	__write_register(device, CVP_SS_CLK_EN, 0x3f);
+	__write_register(device, CVP_VPU_WRAPPER_CORE_CONFIG, 0);
+}
+
+static int iris_hfi_noc_error_info(void *dev)
+{
+	struct iris_hfi_device *device;
+
+	if (!dev) {
+		dprintk(CVP_ERR, "%s: null device\n", __func__);
+		return -EINVAL;
+	}
+	device = dev;
+
+	mutex_lock(&device->lock);
+	dprintk(CVP_ERR, "%s: non error information\n", __func__);
+
+	call_iris_op(device, noc_error_info, device);
+
+	mutex_unlock(&device->lock);
+
+	return 0;
+}
+
+static int __initialize_packetization(struct iris_hfi_device *device)
+{
+	int rc = 0;
+
+	if (!device || !device->res) {
+		dprintk(CVP_ERR, "%s - invalid param\n", __func__);
+		return -EINVAL;
+	}
+
+	device->packetization_type = HFI_PACKETIZATION_4XX;
+
+	device->pkt_ops = cvp_hfi_get_pkt_ops_handle(
+		device->packetization_type);
+	if (!device->pkt_ops) {
+		rc = -EINVAL;
+		dprintk(CVP_ERR, "Failed to get pkt_ops handle\n");
+	}
+
+	return rc;
+}
+
+void __init_cvp_ops(struct iris_hfi_device *device)
+{
+	device->hal_ops = &hal_ops;
+}
+
+static struct iris_hfi_device *__add_device(struct msm_cvp_platform_resources *res,
+			hfi_cmd_response_callback callback)
+{
+	struct iris_hfi_device *hdevice = NULL;
+	int rc = 0;
+
+	if (!res || !callback) {
+		dprintk(CVP_ERR, "Invalid Parameters\n");
+		return NULL;
+	}
+
+	hdevice = kzalloc(sizeof(*hdevice), GFP_KERNEL);
+	if (!hdevice) {
+		dprintk(CVP_ERR, "failed to allocate new device\n");
+		goto exit;
+	}
+
+	hdevice->response_pkt = kmalloc_array(cvp_max_packets,
+				sizeof(*hdevice->response_pkt), GFP_KERNEL);
+	if (!hdevice->response_pkt) {
+		dprintk(CVP_ERR, "failed to allocate response_pkt\n");
+		goto err_cleanup;
+	}
+
+	hdevice->raw_packet =
+		kzalloc(CVP_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL);
+	if (!hdevice->raw_packet) {
+		dprintk(CVP_ERR, "failed to allocate raw packet\n");
+		goto err_cleanup;
+	}
+
+	rc = vm_manager.vm_ops->vm_init_reg_and_irq(hdevice, res);
+	if (rc)
+		goto err_cleanup;
+
+	hdevice->res = res;
+	hdevice->callback = callback;
+
+	__init_cvp_ops(hdevice);
+
+	hdevice->cvp_workq = create_singlethread_workqueue(
+		"msm_cvp_workerq_iris");
+	if (!hdevice->cvp_workq) {
+		dprintk(CVP_ERR, ": create cvp workq failed\n");
+		goto err_cleanup;
+	}
+
+	hdevice->iris_pm_workq = create_singlethread_workqueue(
+			"pm_workerq_iris");
+	if (!hdevice->iris_pm_workq) {
+		dprintk(CVP_ERR, ": create pm workq failed\n");
+		goto err_cleanup;
+	}
+
+	mutex_init(&hdevice->lock);
+	INIT_LIST_HEAD(&hdevice->sess_head);
+
+	return hdevice;
+
+err_cleanup:
+	if (hdevice->iris_pm_workq)
+		destroy_workqueue(hdevice->iris_pm_workq);
+	if (hdevice->cvp_workq)
+		destroy_workqueue(hdevice->cvp_workq);
+	kfree(hdevice->response_pkt);
+	kfree(hdevice->raw_packet);
+	kfree(hdevice);
+exit:
+	return NULL;
+}
+
+static struct iris_hfi_device *__get_device(struct msm_cvp_platform_resources *res,
+				hfi_cmd_response_callback callback)
+{
+	if (!res || !callback) {
+		dprintk(CVP_ERR, "Invalid params: %pK %pK\n", res, callback);
+		return NULL;
+	}
+
+	return __add_device(res, callback);
+}
+
+void cvp_iris_hfi_delete_device(void *device)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *dev = NULL;
+
+	if (!device)
+		return;
+
+	core = cvp_driver->cvp_core;
+	if (core)
+		dev = core->dev_ops->hfi_device_data;
+
+	if (!dev)
+		return;
+
+	mutex_destroy(&dev->lock);
+	destroy_workqueue(dev->cvp_workq);
+	destroy_workqueue(dev->iris_pm_workq);
+	free_irq(dev->cvp_hal_data->irq, dev);
+	iounmap(dev->cvp_hal_data->register_base);
+	iounmap(dev->cvp_hal_data->gcc_reg_base);
+	kfree(dev->cvp_hal_data);
+	kfree(dev->response_pkt);
+	kfree(dev->raw_packet);
+	kfree(dev);
+}
+
+static int iris_hfi_validate_session(void *sess, const char *func)
+{
+	struct cvp_hal_session *session = sess;
+	int rc = 0;
+	struct iris_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, " %s Invalid Params %pK\n", __func__, session);
+		return -EINVAL;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+	if (!__is_session_valid(device, session, func))
+		rc = -ECONNRESET;
+
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static void iris_init_hfi_callbacks(struct cvp_hfi_ops *ops_tbl)
+{
+	ops_tbl->core_init = iris_hfi_core_init;
+	ops_tbl->core_release = iris_hfi_core_release;
+	ops_tbl->core_trigger_ssr = iris_hfi_core_trigger_ssr;
+	ops_tbl->session_init = iris_hfi_session_init;
+	ops_tbl->session_end = iris_hfi_session_end;
+	ops_tbl->session_start = iris_hfi_session_start;
+	ops_tbl->session_stop = iris_hfi_session_stop;
+	ops_tbl->session_abort = iris_hfi_session_abort;
+	ops_tbl->session_clean = iris_hfi_session_clean;
+	ops_tbl->session_set_buffers = iris_hfi_session_set_buffers;
+	ops_tbl->session_release_buffers = iris_hfi_session_release_buffers;
+	ops_tbl->session_send = iris_hfi_session_send;
+	ops_tbl->session_flush = iris_hfi_session_flush;
+	ops_tbl->scale_clocks = iris_hfi_scale_clocks;
+	ops_tbl->vote_bus = iris_hfi_vote_buses;
+	ops_tbl->get_fw_info = iris_hfi_get_fw_info;
+	ops_tbl->get_core_capabilities = iris_hfi_get_core_capabilities;
+	ops_tbl->suspend = iris_hfi_suspend;
+	ops_tbl->resume = iris_hfi_resume;
+	ops_tbl->flush_debug_queue = iris_hfi_flush_debug_queue;
+	ops_tbl->noc_error_info = iris_hfi_noc_error_info;
+	ops_tbl->validate_session = iris_hfi_validate_session;
+	ops_tbl->pm_qos_update = iris_pm_qos_update;
+	ops_tbl->debug_hook = iris_debug_hook;
+}
+
+int cvp_iris_hfi_initialize(struct cvp_hfi_ops *ops_tbl,
+		struct msm_cvp_platform_resources *res,
+		hfi_cmd_response_callback callback)
+{
+	int rc = 0;
+
+	if (!ops_tbl || !res || !callback) {
+		dprintk(CVP_ERR, "Invalid params: %pK %pK %pK\n",
+			ops_tbl, res, callback);
+		rc = -EINVAL;
+		goto err_iris_hfi_init;
+	}
+
+	ops_tbl->hfi_device_data = __get_device(res, callback);
+
+	if (IS_ERR_OR_NULL(ops_tbl->hfi_device_data)) {
+		rc = PTR_ERR(ops_tbl->hfi_device_data) ?: -EINVAL;
+		goto err_iris_hfi_init;
+	}
+
+	iris_init_hfi_callbacks(ops_tbl);
+
+err_iris_hfi_init:
+	return rc;
+}
+
+static void dump_noc_reg(struct iris_hfi_device *device)
+{
+	u32 val = 0, config;
+	int i;
+	struct regulator_info *rinfo;
+	int rc = 0;
+
+	if (msm_cvp_fw_low_power_mode) {
+		iris_hfi_for_each_regulator(device, rinfo) {
+			if (strcmp(rinfo->name, "cvp-core"))
+				continue;
+			rc = __acquire_regulator(rinfo, device);
+			if (rc)
+				dprintk(CVP_WARN,
+						"%s, Failed to acquire regulator control: %s\n",
+						__func__, rinfo->name);
+		}
+	}
+	val = __read_register(device, CVP_CC_MVS1_GDSCR);
+	dprintk(CVP_ERR, "%s, CVP_CC_MVS1_GDSCR: 0x%x", __func__, val);
+	config = __read_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG);
+	dprintk(CVP_ERR, "%s, CVP_WRAPPER_CORE_CLOCK_CONFIG: 0x%x", __func__, config);
+	if (config) {
+		dprintk(CVP_PWR,
+				"core clock config not enabled, enable it to access core\n");
+		__write_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG, 0);
+	}
+	i = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+	if (i) {
+		dprintk(CVP_WARN, "%s Fail acquire xo_reset\n", __func__);
+		return;
+	}
+
+	val = __read_register(device, CVP_NOC_RGE_NIU_DECCTL_LOW
+			+ device->res->qos_noc_rge_niu_offset);
+	dprintk(CVP_ERR, "CVP_NOC_RGE_NIU_DECCTL_LOW: 0x%x", val);
+	val = __read_register(device, CVP_NOC_RGE_NIU_ENCCTL_LOW
+			+ device->res->qos_noc_rge_niu_offset);
+	dprintk(CVP_ERR, "CVP_NOC_RGE_NIU_ENCCTL_LOW: 0x%x", val);
+	val = __read_register(device, CVP_NOC_GCE_VADL_TOF_NIU_DECCTL_LOW
+			+ device->res->qos_noc_gce_vadl_tof_niu_offset);
+	dprintk(CVP_ERR, "CVP_NOC_GCE_VADL_TOF_NIU_DECCTL_LOW: 0x%x", val);
+	val = __read_register(device, CVP_NOC_GCE_VADL_TOF_NIU_ENCCTL_LOW
+			+ device->res->qos_noc_gce_vadl_tof_niu_offset);
+	dprintk(CVP_ERR, "CVP_NOC_GCE_VADL_TOF_NIU_ENCCTL_LOW: 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS: 0x%x", val);
+	val = __read_register(device, CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW
+			+ device->res->noc_main_sidebandmanager_offset);
+	dprintk(CVP_ERR, "CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW: 0x%x", val);
+
+	dprintk(CVP_ERR, "Dumping Core NoC registers\n");
+	val = __read_register(device, CVP_NOC_CORE_ERR_SWID_LOW_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC__CORE_ERL_MAIN_SWID_LOW: 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_SWID_HIGH_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVVP_NOC_CORE_ERL_MAIN_SWID_HIGH 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_MAINCTL_LOW 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRVLD_LOW 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRCLR_LOW 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_LOW 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_HIGH 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_LOW 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_HIGH 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_LOW 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_HIGH 0x%x", val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CORE ERRLOG3_LOW 0x%x, below details", val);
+	__print_reg_details(val);
+	val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS
+			+ device->res->noc_core_err_offset);
+	dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_HIGH 0x%x", val);
+	__write_register(device, CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS
+			+ device->res->noc_core_err_offset, 0x1);
+
+	call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+	if (msm_cvp_fw_low_power_mode) {
+		iris_hfi_for_each_regulator(device, rinfo) {
+			if (strcmp(rinfo->name, "cvp-core"))
+				continue;
+			rc = __hand_off_regulator(device, rinfo);
+		}
+	}
+	__write_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG, config);
+}

+ 390 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_hfi.h

@@ -0,0 +1,390 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __H_CVP_HFI_H__
+#define __H_CVP_HFI_H__
+
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_api.h"
+#include "cvp_comm_def.h"
+
+#define HFI_CMD_SESSION_CVP_START	\
+	(HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_CMD_START_OFFSET + 0x1000)
+
+#define  HFI_CMD_SESSION_CVP_SET_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x001)
+#define  HFI_CMD_SESSION_CVP_RELEASE_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x002)
+
+#define  HFI_CMD_SESSION_CVP_DS\
+	(HFI_CMD_SESSION_CVP_START + 0x003)
+#define  HFI_CMD_SESSION_CVP_HCD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x004)
+#define  HFI_CMD_SESSION_CVP_HCD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x005)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x006)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x007)
+#define  HFI_CMD_SESSION_CVP_SVM\
+	(HFI_CMD_SESSION_CVP_START + 0x008)
+#define  HFI_CMD_SESSION_CVP_NCC_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x009)
+#define  HFI_CMD_SESSION_CVP_NCC_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x00A)
+#define  HFI_CMD_SESSION_CVP_DFS_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x00B)
+#define  HFI_CMD_SESSION_CVP_DFS_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x00C)
+#define  HFI_CMD_SESSION_CVP_FTEXT\
+	(HFI_CMD_SESSION_CVP_START + 0x00F)
+
+/* ==========CHAINED OPERATIONS===================*/
+#define  HFI_CMD_SESSION_CVP_CV_HOG_SVM_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x010)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_SVM_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x011)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_SVM_HCD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x012)
+#define  HFI_CMD_SESSION_CVP_CV_HOG_SVM_HCD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x013)
+#define  HFI_CMD_SESSION_CVP_OPTICAL_FLOW\
+	(HFI_CMD_SESSION_CVP_START + 0x014)
+
+/* ===========USECASE OPERATIONS===============*/
+#define  HFI_CMD_SESSION_CVP_DC_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x030)
+#define  HFI_CMD_SESSION_CVP_DC_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x031)
+#define  HFI_CMD_SESSION_CVP_DCM_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x034)
+#define  HFI_CMD_SESSION_CVP_DCM_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x035)
+
+#define  HFI_CMD_SESSION_CVP_DME_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x039)
+#define  HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x03B)
+#define  HFI_CMD_SESSION_CVP_DME_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x03A)
+
+#define  HFI_CMD_SESSION_EVA_DME_ONLY_CONFIG\
+    (HFI_CMD_SESSION_CVP_START + 0x040)
+#define  HFI_CMD_SESSION_EVA_DME_ONLY_FRAME\
+    (HFI_CMD_SESSION_CVP_START + 0x041)
+
+#define  HFI_CMD_SESSION_CVP_CV_TME_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x047)
+#define  HFI_CMD_SESSION_CVP_CV_TME_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x048)
+#define  HFI_CMD_SESSION_CVP_CV_OD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x049)
+#define  HFI_CMD_SESSION_CVP_CV_OD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x04A)
+#define  HFI_CMD_SESSION_CVP_CV_ODT_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x04B)
+#define  HFI_CMD_SESSION_CVP_CV_ODT_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x04C)
+
+#define  HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x04D)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x050)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x051)
+#define HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x052)
+#define HFI_CMD_SESSION_CVP_FD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x053)
+#define HFI_CMD_SESSION_CVP_FD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x054)
+#define HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x055)
+#define  HFI_CMD_SESSION_CVP_RELEASE_MODEL_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x056)
+#define  HFI_CMD_SESSION_CVP_SGM_DFS_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x057)
+#define  HFI_CMD_SESSION_CVP_SGM_DFS_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x058)
+#define  HFI_CMD_SESSION_CVP_SGM_OF_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x059)
+#define  HFI_CMD_SESSION_CVP_SGM_OF_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x05A)
+#define  HFI_CMD_SESSION_CVP_GCE_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x05B)
+#define  HFI_CMD_SESSION_CVP_GCE_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x05C)
+#define  HFI_CMD_SESSION_CVP_WARP_NCC_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x05D)
+#define  HFI_CMD_SESSION_CVP_WARP_NCC_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x05E)
+#define  HFI_CMD_SESSION_CVP_DMM_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x05F)
+#define  HFI_CMD_SESSION_CVP_DMM_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x060)
+#define HFI_CMD_SESSION_CVP_FLUSH\
+	(HFI_CMD_SESSION_CVP_START + 0x061)
+#define  HFI_CMD_SESSION_CVP_WARP_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x062)
+#define  HFI_CMD_SESSION_CVP_WARP_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x063)
+#define  HFI_CMD_SESSION_CVP_DMM_PARAMS\
+	(HFI_CMD_SESSION_CVP_START + 0x064)
+#define  HFI_CMD_SESSION_CVP_WARP_DS_PARAMS\
+	(HFI_CMD_SESSION_CVP_START + 0x065)
+#define  HFI_CMD_SESSION_CVP_XRA_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x066)
+#define  HFI_CMD_SESSION_CVP_XRA_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x067)
+#define  HFI_CMD_SESSION_CVP_XRA_BLOB_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x069)
+#define  HFI_CMD_SESSION_CVP_XRA_BLOB_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x06A)
+#define  HFI_CMD_SESSION_CVP_XRA_PATCH_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x06B)
+#define  HFI_CMD_SESSION_CVP_XRA_PATCH_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x06C)
+#define  HFI_CMD_SESSION_CVP_XRA_MATCH_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x06D)
+#define  HFI_CMD_SESSION_CVP_XRA_MATCH_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x06E)
+
+
+#define HFI_CMD_SESSION_CVP_SET_SNAPSHOT_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x070)
+#define HFI_CMD_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x071)
+#define HFI_CMD_SESSION_CVP_SNAPSHOT_WRITE_DONE\
+	(HFI_CMD_SESSION_CVP_START + 0x072)
+#define HFI_CMD_SESSION_CVP_SET_SNAPSHOT_MODE\
+	(HFI_CMD_SESSION_CVP_START + 0x073)
+#define  HFI_CMD_SESSION_EVA_ITOF_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x078)
+#define  HFI_CMD_SESSION_EVA_ITOF_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x079)
+#define  HFI_CMD_SESSION_EVA_DLFD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x07C)
+#define  HFI_CMD_SESSION_EVA_DLFD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x07D)
+#define  HFI_CMD_SESSION_CVP_RGE_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x07E)
+#define  HFI_CMD_SESSION_CVP_RGE_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x07F)
+#define  HFI_CMD_SESSION_EVA_DLFL_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x080)
+#define  HFI_CMD_SESSION_EVA_DLFL_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x081)
+#define  HFI_CMD_SESSION_CVP_SYNX\
+	(HFI_CMD_SESSION_CVP_START + 0x086)
+#define  HFI_CMD_SESSION_EVA_START\
+	(HFI_CMD_SESSION_CVP_START + 0x088)
+#define  HFI_CMD_SESSION_EVA_STOP\
+	(HFI_CMD_SESSION_CVP_START + 0x089)
+#define  HFI_CMD_SESSION_CVP_ICA_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x100)
+#define  HFI_CMD_SESSION_CVP_ICA_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x101)
+#define  HFI_CMD_SESSION_CVP_DS_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x02F)
+
+
+#define HFI_MSG_SESSION_CVP_START	\
+	(HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x1000)
+
+#define HFI_MSG_SESSION_CVP_SET_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x001)
+#define HFI_MSG_SESSION_CVP_RELEASE_BUFFERS \
+	(HFI_MSG_SESSION_CVP_START + 0x002)
+#define HFI_MSG_SESSION_CVP_DS\
+	(HFI_MSG_SESSION_CVP_START + 0x003)
+#define HFI_MSG_SESSION_CVP_HCD\
+	(HFI_MSG_SESSION_CVP_START + 0x004)
+#define HFI_MSG_SESSION_CVP_CV_HOG\
+	(HFI_MSG_SESSION_CVP_START + 0x005)
+#define HFI_MSG_SESSION_CVP_SVM\
+	(HFI_MSG_SESSION_CVP_START + 0x006)
+#define HFI_MSG_SESSION_CVP_NCC\
+	(HFI_MSG_SESSION_CVP_START + 0x007)
+#define HFI_MSG_SESSION_CVP_DFS\
+	(HFI_MSG_SESSION_CVP_START + 0x008)
+#define HFI_MSG_SESSION_CVP_TME\
+	(HFI_MSG_SESSION_CVP_START + 0x009)
+#define HFI_MSG_SESSION_CVP_FTEXT\
+	(HFI_MSG_SESSION_CVP_START + 0x00A)
+
+#define HFI_MSG_SESSION_CVP_ICA\
+	(HFI_MSG_SESSION_CVP_START + 0x014)
+
+#define HFI_MSG_SESSION_CVP_DME\
+	(HFI_MSG_SESSION_CVP_START + 0x023)
+#define  HFI_MSG_SESSION_EVA_DME_ONLY\
+    (HFI_MSG_SESSION_CVP_START + 0x050)
+#define HFI_MSG_SESSION_CVP_OPERATION_CONFIG (HFI_MSG_SESSION_CVP_START + 0x030)
+
+#define HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x034)
+#define HFI_MSG_SESSION_CVP_SET_MODEL_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x036)
+#define HFI_MSG_SESSION_CVP_FD\
+	(HFI_MSG_SESSION_CVP_START + 0x037)
+#define HFI_MSG_SESSION_CVP_RELEASE_PERSIST_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x038)
+#define  HFI_MSG_SESSION_CVP_RELEASE_MODEL_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x039)
+#define  HFI_MSG_SESSION_CVP_SGM_OF\
+	(HFI_MSG_SESSION_CVP_START + 0x03A)
+#define  HFI_MSG_SESSION_CVP_GCE\
+	(HFI_MSG_SESSION_CVP_START + 0x03B)
+#define  HFI_MSG_SESSION_CVP_WARP_NCC\
+	(HFI_MSG_SESSION_CVP_START + 0x03C)
+#define  HFI_MSG_SESSION_CVP_DMM\
+	(HFI_MSG_SESSION_CVP_START + 0x03D)
+#define  HFI_MSG_SESSION_CVP_SGM_DFS\
+	(HFI_MSG_SESSION_CVP_START + 0x03E)
+#define  HFI_MSG_SESSION_CVP_WARP\
+	(HFI_MSG_SESSION_CVP_START + 0x03F)
+#define  HFI_MSG_SESSION_CVP_DMM_PARAMS\
+	(HFI_MSG_SESSION_CVP_START + 0x040)
+#define  HFI_MSG_SESSION_CVP_WARP_DS_PARAMS\
+	(HFI_MSG_SESSION_CVP_START + 0x041)
+#define  HFI_MSG_SESSION_CVP_SET_SNAPSHOT_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x045)
+#define  HFI_MSG_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x046)
+#define  HFI_MSG_EVENT_NOTIFY_SNAPSHOT_READY\
+	(HFI_MSG_SESSION_CVP_START + 0x047)
+
+#define HFI_MSG_SESSION_CVP_FLUSH\
+	(HFI_MSG_SESSION_CVP_START + 0x004A)
+#define HFI_MSG_SESSION_EVA_START\
+	(HFI_MSG_SESSION_CVP_START + 0x0058)
+#define HFI_MSG_SESSION_EVA_STOP\
+	(HFI_MSG_SESSION_CVP_START + 0x0059)
+
+#define CVP_IFACEQ_MAX_PKT_SIZE       1024
+#define CVP_IFACEQ_MED_PKT_SIZE       768
+#define CVP_IFACEQ_MIN_PKT_SIZE       8
+#define CVP_IFACEQ_VAR_SMALL_PKT_SIZE 100
+#define CVP_IFACEQ_VAR_LARGE_PKT_SIZE 512
+#define CVP_IFACEQ_VAR_HUGE_PKT_SIZE  (1024*12)
+
+/* HFI packet info needed for sanity check */
+#define HFI_DFS_CONFIG_CMD_SIZE	38
+#define HFI_DFS_FRAME_CMD_SIZE	16
+
+#define HFI_DMM_CONFIG_CMD_SIZE	194
+#define HFI_DMM_BASIC_CONFIG_CMD_SIZE	51
+#define HFI_DMM_FRAME_CMD_SIZE	28
+
+#define HFI_PERSIST_CMD_SIZE	11
+
+#define HFI_DS_CONFIG_CMD_SIZE 11
+#define HFI_DS_CMD_SIZE	50
+
+#define HFI_OF_CONFIG_CMD_SIZE 34
+#define HFI_OF_FRAME_CMD_SIZE 24
+
+#define HFI_ODT_CONFIG_CMD_SIZE 23
+#define HFI_ODT_FRAME_CMD_SIZE 33
+
+#define HFI_OD_CONFIG_CMD_SIZE 24
+#define HFI_OD_FRAME_CMD_SIZE 12
+
+#define HFI_NCC_CONFIG_CMD_SIZE 47
+#define HFI_NCC_FRAME_CMD_SIZE 22
+
+#define HFI_ICA_CONFIG_CMD_SIZE 127
+#define HFI_ICA_FRAME_CMD_SIZE 14
+
+#define HFI_HCD_CONFIG_CMD_SIZE 46
+#define HFI_HCD_FRAME_CMD_SIZE 18
+
+#define HFI_DCM_CONFIG_CMD_SIZE 20
+#define HFI_DCM_FRAME_CMD_SIZE 19
+
+#define HFI_PYS_HCD_CONFIG_CMD_SIZE 461
+#define HFI_PYS_HCD_FRAME_CMD_SIZE 66
+
+#define HFI_FD_CONFIG_CMD_SIZE 28
+#define HFI_FD_FRAME_CMD_SIZE  10
+
+
+struct cvp_hfi_cmd_session_flush_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 flush_type;
+};
+
+struct cvp_hfi_cmd_session_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_msg_sys_session_abort_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct cvp_hfi_msg_sys_property_info_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[128];
+};
+
+enum session_flags {
+	SESSION_PAUSE = BIT(1),
+};
+
+struct cvp_hal_session {
+	struct list_head list;
+	void *session_id;
+	u32 flags;
+	void *device;
+};
+
+enum buf_map_type {
+	MAP_PERSIST = 1,
+	UNMAP_PERSIST = 2,
+	MAP_FRAME = 3,
+	MAP_INVALID,
+};
+
+static inline enum buf_map_type cvp_find_map_type(int pkt_type)
+{
+	if (pkt_type == HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS ||
+			pkt_type == HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS ||
+			pkt_type == HFI_CMD_SESSION_CVP_DMM_PARAMS ||
+			pkt_type == HFI_CMD_SESSION_CVP_SET_SNAPSHOT_BUFFERS ||
+			pkt_type == HFI_CMD_SESSION_CVP_WARP_DS_PARAMS ||
+			pkt_type == HFI_CMD_SESSION_EVA_DLFL_CONFIG)
+		return MAP_PERSIST;
+	else if (pkt_type == HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS ||
+			pkt_type ==
+				HFI_CMD_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS)
+		return UNMAP_PERSIST;
+	else
+		return MAP_FRAME;
+}
+
+static inline bool is_params_pkt(int pkt_type)
+{
+	if (pkt_type == HFI_CMD_SESSION_CVP_DMM_PARAMS ||
+		pkt_type == HFI_CMD_SESSION_CVP_WARP_DS_PARAMS)
+		return true;
+
+	return false;
+}
+
+#endif

+ 317 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_hfi_api.h

@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __CVP_HFI_API_H__
+#define __CVP_HFI_API_H__
+
+#include <linux/log2.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/hash.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp_resources.h"
+#include "cvp_hfi_helper.h"
+
+#define CONTAINS(__a, __sz, __t) (\
+	(__t >= __a) && \
+	(__t < __a + __sz) \
+)
+
+#define OVERLAPS(__t, __tsz, __a, __asz) (\
+	(__t <= __a) && \
+	(__t + __tsz >= __a + __asz) \
+)
+
+#define CVP_VERSION_LENGTH 128
+
+/* 16 encoder and 16 decoder sessions */
+#define CVP_MAX_SESSIONS	32
+
+#define HFI_VERSION_MAJOR_MASK 0xFF000000
+#define HFI_VERSION_MAJOR_SHFIT 24
+#define HFI_VERSION_MINOR_MASK 0x00FFFFE0
+#define HFI_VERSION_MINOR_SHIFT 5
+#define HFI_VERSION_BRANCH_MASK 0x0000001F
+#define HFI_VERSION_BRANCH_SHIFT 0
+
+enum cvp_status {
+	CVP_ERR_NONE = 0x0,
+	CVP_ERR_FAIL = 0x80000000,
+	CVP_ERR_ALLOC_FAIL,
+	CVP_ERR_ILLEGAL_OP,
+	CVP_ERR_BAD_PARAM,
+	CVP_ERR_BAD_HANDLE,
+	CVP_ERR_NOT_SUPPORTED,
+	CVP_ERR_BAD_STATE,
+	CVP_ERR_MAX_CLIENTS,
+	CVP_ERR_IFRAME_EXPECTED,
+	CVP_ERR_HW_FATAL,
+	CVP_ERR_BITSTREAM_ERR,
+	CVP_ERR_INDEX_NOMORE,
+	CVP_ERR_SEQHDR_PARSE_FAIL,
+	CVP_ERR_INSUFFICIENT_BUFFER,
+	CVP_ERR_BAD_POWER_STATE,
+	CVP_ERR_NO_VALID_SESSION,
+	CVP_ERR_TIMEOUT,
+	CVP_ERR_CMDQFULL,
+	CVP_ERR_START_CODE_NOT_FOUND,
+	CVP_ERR_NOC_ERROR,
+	CVP_ERR_CLIENT_PRESENT = 0x90000001,
+	CVP_ERR_CLIENT_FATAL,
+	CVP_ERR_CMD_QUEUE_FULL,
+	CVP_ERR_UNUSED = 0x10000000
+};
+
+enum hal_property {
+	HAL_UNUSED_PROPERTY = 0xFFFFFFFF,
+};
+
+enum hal_ssr_trigger_type {
+	SSR_ERR_FATAL = 1,
+	SSR_SW_DIV_BY_ZERO,
+	SSR_HW_WDOG_IRQ,
+	SSR_SESSION_ABORT,
+};
+
+enum hal_intra_refresh_mode {
+	HAL_INTRA_REFRESH_NONE,
+	HAL_INTRA_REFRESH_CYCLIC,
+	HAL_INTRA_REFRESH_RANDOM,
+	HAL_UNUSED_INTRA = 0x10000000,
+};
+
+enum cvp_resource_id {
+	CVP_RESOURCE_NONE,
+	CVP_RESOURCE_SYSCACHE,
+	CVP_UNUSED_RESOURCE = 0x10000000,
+};
+
+struct cvp_resource_hdr {
+	enum cvp_resource_id resource_id;
+	void *resource_handle;
+};
+
+struct cvp_hal_fw_info {
+	char version[CVP_VERSION_LENGTH];
+	phys_addr_t base_addr;
+	int register_base;
+	int register_size;
+	int irq;
+};
+
+enum hal_event_type {
+	HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES,
+	HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES,
+	HAL_EVENT_RELEASE_BUFFER_REFERENCE,
+	HAL_UNUSED_SEQCHG = 0x10000000,
+};
+
+/* HAL Response */
+#define IS_HAL_SYS_CMD(cmd) ((cmd) >= HAL_SYS_INIT_DONE && \
+		(cmd) <= HAL_SYS_ERROR)
+#define IS_HAL_SESSION_CMD(cmd) ((cmd) >= HAL_SESSION_EVENT_CHANGE && \
+		(cmd) <= HAL_SESSION_ERROR)
+enum hal_command_response {
+	HAL_NO_RESP,
+	HAL_SYS_INIT_DONE,
+	HAL_SYS_SET_RESOURCE_DONE,
+	HAL_SYS_RELEASE_RESOURCE_DONE,
+	HAL_SYS_PING_ACK_DONE,
+	HAL_SYS_PC_PREP_DONE,
+	HAL_SYS_IDLE,
+	HAL_SYS_DEBUG,
+	HAL_SYS_WATCHDOG_TIMEOUT,
+	HAL_SYS_ERROR,
+	/* SESSION COMMANDS_DONE */
+	HAL_SESSION_EVENT_CHANGE,
+	HAL_SESSION_INIT_DONE,
+	HAL_SESSION_END_DONE,
+	HAL_SESSION_SET_BUFFER_DONE,
+	HAL_SESSION_ABORT_DONE,
+	HAL_SESSION_START_DONE,
+	HAL_SESSION_STOP_DONE,
+	HAL_SESSION_CVP_OPERATION_CONFIG,
+	HAL_SESSION_FLUSH_DONE,
+	HAL_SESSION_SUSPEND_DONE,
+	HAL_SESSION_RESUME_DONE,
+	HAL_SESSION_SET_PROP_DONE,
+	HAL_SESSION_GET_PROP_DONE,
+	HAL_SESSION_RELEASE_BUFFER_DONE,
+	HAL_SESSION_REGISTER_BUFFER_DONE,
+	HAL_SESSION_UNREGISTER_BUFFER_DONE,
+	HAL_SESSION_RELEASE_RESOURCE_DONE,
+	HAL_SESSION_PROPERTY_INFO,
+	HAL_SESSION_DUMP_NOTIFY,
+	HAL_SESSION_ERROR,
+	HAL_RESPONSE_UNUSED = 0x10000000,
+};
+
+struct msm_cvp_capability {
+	u32 reserved[183];
+};
+
+struct cvp_hal_sys_init_done {
+	u32 dec_codec_supported;
+	u32 enc_codec_supported;
+	u32 codec_count;
+	struct msm_cvp_capability *capabilities;
+	u32 max_sessions_supported;
+};
+
+struct cvp_hal_session_init_done {
+	struct msm_cvp_capability capability;
+};
+
+struct msm_cvp_cb_cmd_done {
+	u32 device_id;
+	void *session_id;
+	enum cvp_status status;
+	u32 size;
+	union {
+		struct cvp_hfi_msg_session_hdr msg_hdr;
+		struct cvp_resource_hdr resource_hdr;
+		struct cvp_hal_sys_init_done sys_init_done;
+		struct cvp_hal_session_init_done session_init_done;
+		u32 buffer_addr;
+	} data;
+};
+
+struct msm_cvp_cb_data_done {
+	u32 device_id;
+	void *session_id;
+	enum cvp_status status;
+	u32 size;
+	u32 client_data;
+};
+
+struct msm_cvp_cb_info {
+	enum hal_command_response response_type;
+	union {
+		struct msm_cvp_cb_cmd_done cmd;
+		struct msm_cvp_cb_data_done data;
+	} response;
+};
+
+enum msm_cvp_hfi_type {
+	CVP_HFI_IRIS,
+};
+
+enum msm_cvp_thermal_level {
+	CVP_THERMAL_NORMAL = 0,
+	CVP_THERMAL_LOW,
+	CVP_THERMAL_HIGH,
+	CVP_THERMAL_CRITICAL
+};
+
+struct msm_cvp_gov_data {
+	struct cvp_bus_vote_data *data;
+	u32 data_count;
+};
+
+enum msm_cvp_power_mode {
+	CVP_POWER_NORMAL = 0,
+	CVP_POWER_LOW,
+	CVP_POWER_TURBO
+};
+
+struct cvp_bus_vote_data {
+	u32 domain;
+	u32 ddr_bw;
+	u32 sys_cache_bw;
+	enum msm_cvp_power_mode power_mode;
+	bool use_sys_cache;
+};
+
+struct cvp_hal_cmd_sys_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+#define call_hfi_op(q, op, args...)			\
+	(((q) && (q)->op) ? ((q)->op(args)) : 0)
+
+#define PKT_NAME_LEN	24
+#define MAX_PKT_IDX	0x200
+
+struct msm_cvp_hfi_defs {
+	unsigned int size;
+	unsigned int type;
+	bool is_config_pkt;
+	bool checksum_enabled;
+	enum hal_command_response resp;
+	char name[PKT_NAME_LEN];
+	bool force_kernel_fence;
+};
+
+struct cvp_hfi_ops {
+	void *hfi_device_data;
+	/*Add function pointers for all the hfi functions below*/
+	int (*core_init)(void *device);
+	int (*core_release)(void *device);
+	int (*core_trigger_ssr)(void *device, enum hal_ssr_trigger_type);
+	int (*session_init)(void *device, void *session_id, void **new_session);
+	int (*session_end)(void *session);
+	int (*session_start)(void *session);
+	int (*session_stop)(void *session);
+	int (*session_abort)(void *session);
+	int (*session_set_buffers)(void *sess, u32 iova, u32 size);
+	int (*session_release_buffers)(void *sess);
+	int (*session_send)(void *sess, struct eva_kmd_hfi_packet *in_pkt);
+	int (*session_flush)(void *sess);
+	int (*scale_clocks)(void *dev, u32 freq);
+	int (*vote_bus)(void *dev, struct bus_info *bus, unsigned long bw);
+	int (*get_fw_info)(void *dev, struct cvp_hal_fw_info *fw_info);
+	int (*session_clean)(void *sess);
+	int (*get_core_capabilities)(void *dev);
+	int (*suspend)(void *dev);
+	int (*resume)(void *dev);
+	int (*flush_debug_queue)(void *dev);
+	int (*noc_error_info)(void *dev);
+	int (*validate_session)(void *sess, const char *func);
+	int (*pm_qos_update)(void *device);
+	int (*debug_hook)(void *device);
+};
+
+typedef void (*hfi_cmd_response_callback) (enum hal_command_response cmd,
+			void *data);
+typedef void (*msm_cvp_callback) (enum hal_command_response response,
+			void *callback);
+struct msm_cvp_fw {
+	int cookie;
+};
+
+int cvp_hfi_process_msg_packet(u32 device_id,
+	void *msg_hdr, struct msm_cvp_cb_info *info);
+
+enum cvp_status cvp_hfi_process_sys_init_done_prop_read(
+	struct cvp_hfi_msg_sys_init_done_packet *pkt,
+	struct cvp_hal_sys_init_done *sys_init_done);
+
+enum cvp_status hfi_process_session_init_done_prop_read(
+	struct cvp_hfi_msg_sys_session_init_done_packet *pkt,
+	struct cvp_hal_session_init_done *session_init_done);
+
+struct cvp_hfi_ops *cvp_hfi_initialize(enum msm_cvp_hfi_type hfi_type,
+		struct msm_cvp_platform_resources *res,
+		hfi_cmd_response_callback callback);
+void cvp_hfi_deinitialize(enum msm_cvp_hfi_type hfi_type,
+			struct cvp_hfi_ops *hdev);
+
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr);
+int get_pkt_fenceoverride(struct cvp_hal_session_cmd_pkt* hdr);
+int get_pkt_index_from_type(u32 pkt_type);
+int get_hfi_version(void);
+unsigned int get_msg_size(struct cvp_hfi_msg_session_hdr *hdr);
+unsigned int get_msg_session_id(void *msg);
+unsigned int get_msg_errorcode(void *msg);
+int get_msg_opconfigs(void *msg, unsigned int *session_id,
+		unsigned int *error_type, unsigned int *config_id);
+extern const struct msm_cvp_hfi_defs cvp_hfi_defs[MAX_PKT_IDX];
+void print_hfi_queue_info(struct cvp_hfi_ops *hdev);
+#endif /*__CVP_HFI_API_H__ */

+ 511 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_hfi_helper.h

@@ -0,0 +1,511 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include "cvp_comm_def.h"
+#ifndef __H_CVP_HFI_HELPER_H__
+#define __H_CVP_HFI_HELPER_H__
+
+#define HFI_COMMON_BASE				(0)
+#define HFI_DOMAIN_BASE_COMMON		(HFI_COMMON_BASE + 0)
+#define HFI_DOMAIN_BASE_CVP			(HFI_COMMON_BASE + 0x04000000)
+
+#define HFI_ARCH_COMMON_OFFSET		(0)
+
+#define  HFI_CMD_START_OFFSET		(0x00010000)
+#define  HFI_MSG_START_OFFSET		(0x00020000)
+
+#define  HFI_ERR_NONE                                   (HFI_COMMON_BASE)        /**< Status: No error */
+#define  HFI_ERR_SYS_FATAL                              (HFI_COMMON_BASE + 0x1)  /**< Fatal system error */
+#define  HFI_ERR_SYS_INVALID_PARAMETER                  (HFI_COMMON_BASE + 0x2)  /**< Invalid system parameter encountered */
+#define  HFI_ERR_SYS_VERSION_MISMATCH                   (HFI_COMMON_BASE + 0x3)  /**< Interface version mismatch */
+#define  HFI_ERR_SYS_INSUFFICIENT_RESOURCES             (HFI_COMMON_BASE + 0x4)  /**< Insufficient system resources */
+#define  HFI_ERR_SYS_MAX_SESSIONS_REACHED               (HFI_COMMON_BASE + 0x5)  /**< Maximum number of sessions reached */
+#define  HFI_ERR_SYS_SESSION_IN_USE                     (HFI_COMMON_BASE + 0x7)  /**< Session ID specified is in use */
+#define  HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE            (HFI_COMMON_BASE + 0x8)  /**< ID is out of range */
+#define  HFI_ERR_SYS_UNSUPPORTED_TRIGCMD                (HFI_COMMON_BASE + 0xA)  /**< Unsupported TRIGCMD command*/
+#define  HFI_ERR_SYS_UNSUPPORTED_RESOURCES              (HFI_COMMON_BASE + 0xB)  /**< Unsupported resource*/
+#define  HFI_ERR_SYS_UNSUPPORT_CMD                      (HFI_COMMON_BASE + 0xC)  /**< Command is not supported*/
+#define  HFI_ERR_SYS_CMDSIZE                            (HFI_COMMON_BASE + 0xD)  /**< command size err*/
+#define  HFI_ERR_SYS_UNSUPPORT_PROPERTY                 (HFI_COMMON_BASE + 0xE)  /**< Unsupported property*/
+#define  HFI_ERR_SYS_INIT_EXPECTED                      (HFI_COMMON_BASE + 0xF)  /**< Upon FW start, first command must be SYS_INIT*/
+#define  HFI_ERR_SYS_INIT_IGNORED                       (HFI_COMMON_BASE + 0x10) /**< After FW started, SYS_INIT will be ignored*/
+#define  HFI_ERR_SYS_MAX_DME_SESSIONS_REACHED           (HFI_COMMON_BASE + 0x11) /**< Maximum DME sessions Reached */
+#define  HFI_ERR_SYS_MAX_FD_SESSIONS_REACHED            (HFI_COMMON_BASE + 0x12) /**< Maximum FD sessions Reached */
+#define  HFI_ERR_SYS_MAX_ODT_SESSIONS_REACHED           (HFI_COMMON_BASE + 0x13) /**< Maximum ODT sessions Reached*/
+#define  HFI_ERR_SYS_MAX_CV_SESSIONS_REACHED            (HFI_COMMON_BASE + 0x14) /**< Maximum CV sessions Reached*/
+#define  HFI_ERR_SYS_INVALID_SESSION_TYPE               (HFI_COMMON_BASE + 0x15) /**< Invalid session TYPE. */
+#define  HFI_ERR_SYS_NOC_ERROR							(HFI_COMMON_BASE + 0x16) /**< NOC Error encountered */
+
+									/**
+									Level 2 Comment: "Session Level Error types"
+									Common HFI_ERROR_SESSION_X values to be used as session level error/warning
+									for event and messages
+									*/
+#define  HFI_ERR_SESSION_FATAL                          (HFI_COMMON_BASE + 0x1001)  /**< Fatal session error */
+#define  HFI_ERR_SESSION_INVALID_PARAMETER              (HFI_COMMON_BASE + 0x1002)  /**< Invalid session parameter */
+#define  HFI_ERR_SESSION_BAD_POINTER                    (HFI_COMMON_BASE + 0x1003)  /**< Bad pointer encountered */
+#define  HFI_ERR_SESSION_INVALID_SESSION_ID             (HFI_COMMON_BASE + 0x1004)  /**< Invalid session ID. eventData2 specifies the session ID. */
+#define  HFI_ERR_SESSION_INVALID_STREAM_ID              (HFI_COMMON_BASE + 0x1005)  /**< Invalid stream ID. eventData2 specifies the stream ID. */
+#define  HFI_ERR_SESSION_INCORRECT_STATE_OPERATION      (HFI_COMMON_BASE + 0x1006)  /**< Incorrect state for specified operation */
+#define  HFI_ERR_SESSION_UNSUPPORTED_PROPERTY           (HFI_COMMON_BASE + 0x1007)  /**< Unsupported property. eventData2 specifies the property index. */
+#define  HFI_ERR_SESSION_UNSUPPORTED_SETTING            (HFI_COMMON_BASE + 0x1008)  /**< Unsupported property setting. eventData2 specifies the property index. */
+#define  HFI_ERR_SESSION_INSUFFICIENT_RESOURCES         (HFI_COMMON_BASE + 0x1009)  /**< Insufficient resources for session */
+#define  HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED  (HFI_COMMON_BASE + 0x100A)  /**< Stream is found to be corrupt; processing is stalled */
+#define  HFI_ERR_SESSION_STREAM_CORRUPT                 (HFI_COMMON_BASE + 0x100B)  /**< Stream is found to be corrupt; processing is recoverable */
+#define  HFI_ERR_SESSION_RESERVED                       (HFI_COMMON_BASE + 0x100C)  /**< Reserved  */
+#define  HFI_ERR_SESSION_UNSUPPORTED_STREAM             (HFI_COMMON_BASE + 0x100D)  /**< Unsupported stream */
+#define  HFI_ERR_SESSION_CMDSIZE                        (HFI_COMMON_BASE + 0x100E)  /**< Command packet size err*/
+#define  HFI_ERR_SESSION_UNSUPPORT_CMD                  (HFI_COMMON_BASE + 0x100F)  /**< Command is not supported*/
+#define  HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE           (HFI_COMMON_BASE + 0x1010)  /**< BufferType is not supported*/
+#define  HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL           (HFI_COMMON_BASE + 0x1011)  /**< Buffer Count is less than default*/
+#define  HFI_ERR_SESSION_INVALID_SCALE_FACTOR           (HFI_COMMON_BASE + 0x1012)  /**< Downscaling not possible */
+#define  HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED          (HFI_COMMON_BASE + 0x1013)  /**< Upscaling not possible */
+#define  HFI_ERR_SESSION_CANNOT_KEEP_ASPECT_RATIO       (HFI_COMMON_BASE + 0x1014)  /**< Cannot maintain aspect ratio */
+#define  HFI_ERR_SESSION_ADDRESS_NOT_ALIGNED            (HFI_COMMON_BASE + 0x1016)   /**Address is not aligned */
+#define  HFI_ERR_SESSION_BUFFERSIZE_TOOSMALL            (HFI_COMMON_BASE + 0x1017)  /**< Buffer Count is less than default*/
+#define  HFI_ERR_SESSION_ABORTED                        (HFI_COMMON_BASE + 0x1018)  /**< error caused by session abort*/
+#define  HFI_ERR_SESSION_BUFFER_ALREADY_SET             (HFI_COMMON_BASE + 0x1019)  /**< Cannot set buffer multiple times without releasing in between. */
+#define  HFI_ERR_SESSION_BUFFER_ALREADY_RELEASED        (HFI_COMMON_BASE + 0x101A)  /**< Cannot release buffer multiple times without setting in between. */
+#define  HFI_ERR_SESSION_END_BUFFER_NOT_RELEASED        (HFI_COMMON_BASE + 0x101B)  /**< Session was ended without properly releasing all buffers */
+#define  HFI_ERR_SESSION_FLUSHED                        (HFI_COMMON_BASE + 0x101C)  /**< Cannot set buffer multiple times without releasing in between. */
+#define  HFI_ERR_SESSION_KERNEL_MAX_STREAMS_REACHED     (HFI_COMMON_BASE + 0x101D) /*Maximum Streams per Kernel reached in a session*/
+#define  HFI_ERR_SESSION_MAX_STREAMS_REACHED            (HFI_COMMON_BASE + 0x101E) /*Maximum Streams Reached in a session*/
+#define  HFI_ERR_SESSION_HW_HANG_DETECTED               (HFI_COMMON_BASE + 0x101F) /*HW hang was detected in one of the HW blocks for a frame*/
+
+#define HFI_EVENT_SYS_ERROR				(HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_SESSION_ERROR			(HFI_COMMON_BASE + 0x2)
+
+#define  HFI_TME_PROFILE_DEFAULT	0x00000001
+#define  HFI_TME_PROFILE_FRC		0x00000002
+#define  HFI_TME_PROFILE_ASW		0x00000004
+#define  HFI_TME_PROFILE_DFS_BOKEH	0x00000008
+
+#define HFI_TME_LEVEL_INTEGER		0x00000001
+
+#define HFI_BUFFER_INPUT				(HFI_COMMON_BASE + 0x1)
+#define HFI_BUFFER_OUTPUT				(HFI_COMMON_BASE + 0x2)
+#define HFI_BUFFER_OUTPUT2				(HFI_COMMON_BASE + 0x3)
+#define HFI_BUFFER_INTERNAL_PERSIST		(HFI_COMMON_BASE + 0x4)
+#define HFI_BUFFER_INTERNAL_PERSIST_1		(HFI_COMMON_BASE + 0x5)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH	(HFI_COMMON_BASE + 0x6)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1	(HFI_COMMON_BASE + 0x7)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2	(HFI_COMMON_BASE + 0x8)
+#define HFI_BUFFER_COMMON_INTERNAL_RECON	(HFI_COMMON_BASE + 0x9)
+#define HFI_BUFFER_EXTRADATA_OUTPUT		(HFI_COMMON_BASE + 0xA)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2		(HFI_COMMON_BASE + 0xB)
+#define HFI_BUFFER_EXTRADATA_INPUT		(HFI_COMMON_BASE + 0xC)
+
+
+#define HFI_PROPERTY_SYS_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_DEBUG_CONFIG		\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x001)
+#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO	\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x002)
+#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ				\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x003)
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR         \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x004)
+#define  HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL     \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x005)
+#define  HFI_PROPERTY_SYS_IMAGE_VERSION    \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x006)
+#define  HFI_PROPERTY_SYS_CONFIG_COVERAGE    \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x007)
+#define  HFI_PROPERTY_SYS_UBWC_CONFIG    \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x008)
+
+#define HFI_DEBUG_MSG_LOW					0x00000001
+#define HFI_DEBUG_MSG_MEDIUM					0x00000002
+#define HFI_DEBUG_MSG_HIGH					0x00000004
+#define HFI_DEBUG_MSG_ERROR					0x00000008
+#define HFI_DEBUG_MSG_FATAL					0x00000010
+#define HFI_DEBUG_MSG_PERF					0x00000020
+
+#define HFI_DEBUG_MODE_QUEUE					0x00000001
+#define HFI_DEBUG_MODE_QDSS					0x00000002
+
+struct cvp_hfi_debug_config {
+	u32 debug_config;
+	u32 debug_mode;
+};
+
+struct cvp_hfi_enable {
+	u32 enable;
+};
+
+#define HFI_RESOURCE_SYSCACHE 0x00000002
+
+struct cvp_hfi_resource_subcache_type {
+	u32 size;
+	u32 sc_id;
+};
+
+struct cvp_hfi_resource_syscache_info_type {
+	u32 num_entries;
+	struct cvp_hfi_resource_subcache_type rg_subcache_entries[1];
+};
+
+#define HFI_CMD_SYS_COMMON_START			\
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + HFI_CMD_START_OFFSET \
+	+ 0x0000)
+#define HFI_CMD_SYS_INIT		(HFI_CMD_SYS_COMMON_START + 0x001)
+#define HFI_CMD_SYS_PC_PREP		(HFI_CMD_SYS_COMMON_START + 0x002)
+#define HFI_CMD_SYS_SET_RESOURCE	(HFI_CMD_SYS_COMMON_START + 0x003)
+#define HFI_CMD_SYS_RELEASE_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x004)
+#define HFI_CMD_SYS_SET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x005)
+#define HFI_CMD_SYS_GET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x006)
+#define HFI_CMD_SYS_SESSION_INIT	(HFI_CMD_SYS_COMMON_START + 0x007)
+#define HFI_CMD_SYS_SESSION_END		(HFI_CMD_SYS_COMMON_START + 0x008)
+#define HFI_CMD_SYS_SET_BUFFERS		(HFI_CMD_SYS_COMMON_START + 0x009)
+#define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_COMMON_START + 0x00A)
+#define HFI_CMD_SYS_TEST_START		(HFI_CMD_SYS_COMMON_START + 0x100)
+
+#define HFI_MSG_SYS_COMMON_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x0000)
+#define HFI_MSG_SYS_INIT_DONE			(HFI_MSG_SYS_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE		(HFI_MSG_SYS_COMMON_START + 0x2)
+#define HFI_MSG_SYS_RELEASE_RESOURCE	(HFI_MSG_SYS_COMMON_START + 0x3)
+#define HFI_MSG_SYS_DEBUG			(HFI_MSG_SYS_COMMON_START + 0x4)
+#define HFI_MSG_SYS_SESSION_INIT_DONE	(HFI_MSG_SYS_COMMON_START + 0x6)
+#define HFI_MSG_SYS_SESSION_END_DONE	(HFI_MSG_SYS_COMMON_START + 0x7)
+#define HFI_MSG_SYS_IDLE		(HFI_MSG_SYS_COMMON_START + 0x8)
+#define HFI_MSG_SYS_COV                 (HFI_MSG_SYS_COMMON_START + 0x9)
+#define HFI_MSG_SYS_PROPERTY_INFO	(HFI_MSG_SYS_COMMON_START + 0xA)
+#define HFI_MSG_SYS_SESSION_ABORT_DONE	(HFI_MSG_SYS_COMMON_START + 0xC)
+#define HFI_MSG_SESSION_SYNC_DONE      (HFI_MSG_SESSION_OX_START + 0xD)
+
+#define HFI_MSG_SESSION_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x1000)
+#define HFI_MSG_EVENT_NOTIFY	(HFI_MSG_SESSION_COMMON_START + 0x1)
+#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE	\
+	(HFI_MSG_SESSION_COMMON_START + 0x2)
+
+#define HFI_CMD_SYS_TEST_SSR	(HFI_CMD_SYS_TEST_START + 0x1)
+#define HFI_TEST_SSR_SW_ERR_FATAL	0x1
+#define HFI_TEST_SSR_SW_DIV_BY_ZERO	0x2
+#define HFI_TEST_SSR_HW_WDOG_IRQ	0x3
+
+struct cvp_hal_cmd_pkt_hdr {
+	u32 size;
+	u32 packet_type;
+};
+
+struct cvp_hal_msg_pkt_hdr {
+	u32 size;
+	u32 packet;
+};
+
+struct cvp_hal_session_cmd_pkt {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct cvp_hfi_cmd_sys_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 arch_type;
+};
+
+struct cvp_hfi_cmd_sys_pc_prep_packet {
+	u32 size;
+	u32 packet_type;
+};
+
+struct cvp_hfi_cmd_sys_set_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 resource_type;
+	u32 rg_resource_data[1];
+};
+
+struct cvp_hfi_cmd_sys_release_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_type;
+	u32 resource_handle;
+};
+
+struct cvp_hfi_cmd_sys_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_cmd_sys_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_cmd_sys_session_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 session_type;
+	u32 session_kmask;
+	u32 session_prio;
+	u32 is_secure;
+	u32 dsp_ac_mask;
+};
+
+struct cvp_hfi_cmd_sys_session_end_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct cvp_hfi_cmd_sys_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 buffer_type;
+	u32 buffer_size;
+	u32 num_buffers;
+	u32 rg_buffer_addr[1];
+};
+
+struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type {
+	u32 size;
+	u32 packet_type;
+	struct {
+		u32 max_channel_override : 1;
+		u32 mal_length_override : 1;
+		u32 hb_override : 1;
+		u32 bank_swzl_level_override : 1;
+		u32 bank_spreading_override : 1;
+		u32 reserved : 27;
+	} override_bit_info;
+	u32 max_channels;
+	u32 mal_length;
+	u32 highest_bank_bit;
+	u32 bank_swzl_level;
+	u32 bank_spreading;
+	u32 reserved[2];
+};
+
+struct cvp_hfi_cmd_session_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_client {
+	u32 transaction_id;
+	u32 data1;
+	u32 data2;
+	u64 kdata;
+	u32 reserved1;
+	u32 reserved2;
+} __packed;
+
+struct cvp_hfi_buf_type {
+	u32 iova;
+	u32 size;
+	u32 offset;
+	u32 flags;
+	u32 reserved1;
+	u32 reserved2;
+	u32 fence_type;
+	u32 input_handle;
+	u32 output_handle;
+};
+
+struct cvp_hfi_cmd_session_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	struct cvp_hfi_client client_data;
+	struct cvp_hfi_buf_type buf_type;
+} __packed;
+
+struct cvp_session_release_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	struct cvp_hfi_client client_data;
+	u32 kernel_type;
+	u32 buffer_type;
+	u32 num_buffers;
+	u32 buffer_idx;
+} __packed;
+
+struct cvp_hfi_cmd_session_hdr {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	struct cvp_hfi_client client_data;
+	u32 stream_idx;
+} __packed;
+
+struct cvp_hfi_msg_session_hdr {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	struct cvp_hfi_client client_data;
+	u32 stream_idx;
+} __packed;
+
+struct cvp_hfi_dumpmsg_session_hdr {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	struct cvp_hfi_client client_data;
+	u32 dump_offset;
+	u32 dump_size;
+} __packed;
+
+#define HFI_MAX_HW_ACTIVATIONS_PER_FRAME (6)
+
+enum hfi_hw_thread {
+	HFI_HW_FDU,
+	HFI_HW_MPU,
+	HFI_HW_OD,
+	HFI_HW_ICA,
+	HFI_HW_VADL,
+	HFI_HW_TOF,
+	HFI_HW_RGE,
+	HFI_HW_XRA,
+	HFI_HW_LSR,
+	HFI_MAX_HW_THREADS
+};
+
+struct cvp_hfi_msg_session_hdr_ext {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	struct cvp_hfi_client client_data;
+	u32 stream_idx;
+	u32 busy_cycles;
+	u32 total_cycles;
+	u32 hw_cycles[HFI_MAX_HW_THREADS][HFI_MAX_HW_ACTIVATIONS_PER_FRAME];
+	u32 fw_cycles[HFI_MAX_HW_ACTIVATIONS_PER_FRAME];
+} __packed;
+
+struct cvp_hfi_buffer_mapping_type {
+	u32 index;
+	u32 device_addr;
+	u32 size;
+};
+
+struct cvp_hfi_cmd_session_sync_process_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 sync_id;
+	u32 rg_data[1];
+};
+
+struct cvp_hfi_msg_event_notify_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 event_id;
+	u32 event_data1;
+	u32 event_data2;
+	u32 rg_ext_event_data[1];
+};
+
+struct cvp_hfi_msg_session_op_cfg_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	struct cvp_hfi_client client_data;
+	u32 stream_idx;
+	u32 op_conf_id;
+} __packed;
+
+struct cvp_hfi_msg_sys_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_msg_sys_pc_prep_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+};
+
+struct cvp_hfi_msg_sys_release_resource_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 error_type;
+};
+
+struct cvp_hfi_msg_sys_session_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct cvp_hfi_msg_sys_session_end_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct cvp_hfi_msg_session_get_sequence_header_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 header_len;
+	u32 sequence_header;
+};
+
+struct cvp_hfi_msg_sys_debug_packet {
+	u32 size;
+	u32 packet_type;
+	u32 msg_type;
+	u32 msg_size;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u8 rg_msg_data[1];
+};
+
+struct cvp_hfi_packet_header {
+	u32 size;
+	u32 packet_type;
+};
+
+struct cvp_hfi_sfr_struct {
+	u32 bufSize;
+	u8 rg_data[1];
+};
+
+struct cvp_hfi_cmd_sys_test_ssr_packet {
+	u32 size;
+	u32 packet_type;
+	u32 trigger_type;
+};
+
+struct cvp_hfi_msg_sys_session_ctrl_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	struct cvp_hfi_client client_data;
+};
+
+#endif

+ 311 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_hfi_io.h

@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __CVP_HFI_IO_H__
+#define __CVP_HFI_IO_H__
+
+#include <linux/io.h>
+
+#define CVP_TOP_BASE_OFFS			0x00000000
+#define CVP_SS_IDLE_STATUS		(CVP_TOP_BASE_OFFS + 0x50)
+
+#define CVP_CPU_BASE_OFFS			0x000A0000
+#define CVP_AON_BASE_OFFS			0x000E0000
+
+#define CVP_CPU_CS_A2HSOFTINTEN		(CVP_CPU_BASE_OFFS + 0x10)
+#define CVP_CPU_CS_A2HSOFTINTENCLR	(CVP_CPU_BASE_OFFS + 0x14)
+#define CVP_CPU_CS_A2HSOFTINT		(CVP_CPU_BASE_OFFS + 0x18)
+#define CVP_CPU_CS_A2HSOFTINTCLR	(CVP_CPU_BASE_OFFS + 0x1C)
+#define CVP_CPU_CS_VMIMSG		(CVP_CPU_BASE_OFFS + 0x34)
+#define CVP_CPU_CS_VMIMSGAG0		(CVP_CPU_BASE_OFFS + 0x38)
+#define CVP_CPU_CS_VMIMSGAG1		(CVP_CPU_BASE_OFFS + 0x3C)
+#define CVP_CPU_CS_VMIMSGAG2		(CVP_CPU_BASE_OFFS + 0x40)
+#define CVP_CPU_CS_VMIMSGAG3		(CVP_CPU_BASE_OFFS + 0x44)
+#define CVP_CPU_CS_SCIACMD			(CVP_CPU_BASE_OFFS + 0x48)
+#define CVP_CPU_CS_AXI4_QOS		(CVP_CPU_BASE_OFFS + 0x13C)
+#define CVP_CPU_CS_H2XSOFTINTEN		(CVP_CPU_BASE_OFFS + 0x148)
+
+/* CVP_CTRL_STATUS */
+#define CVP_CPU_CS_SCIACMDARG0		(CVP_CPU_BASE_OFFS + 0x4C)
+#define CVP_CPU_CS_SCIACMDARG0_BMSK	0xff
+#define CVP_CPU_CS_SCIACMDARG0_SHFT	0x0
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK	0xfe
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_SHFT	0x1
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_BMSK	0x1
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_SHFT	0x0
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY           0x100
+
+/* HFI_QTBL_INFO */
+#define CVP_CPU_CS_SCIACMDARG1		(CVP_CPU_BASE_OFFS + 0x50)
+
+/* HFI_QTBL_ADDR */
+#define CVP_CPU_CS_SCIACMDARG2		(CVP_CPU_BASE_OFFS + 0x54)
+
+/* HFI_VERSION_INFO */
+#define CVP_CPU_CS_SCIACMDARG3		(CVP_CPU_BASE_OFFS + 0x58)
+
+/* CVP_SFR_ADDR */
+#define CVP_CPU_CS_SCIBCMD		(CVP_CPU_BASE_OFFS + 0x5C)
+
+/* CVP_MMAP_ADDR */
+#define CVP_CPU_CS_SCIBCMDARG0		(CVP_CPU_BASE_OFFS + 0x60)
+
+/* CVP_UC_REGION_ADDR */
+#define CVP_CPU_CS_SCIBARG1		(CVP_CPU_BASE_OFFS + 0x64)
+
+/* CVP_UC_REGION_ADDR */
+#define CVP_CPU_CS_SCIBARG2		(CVP_CPU_BASE_OFFS + 0x68)
+
+#define CVP_CPU_CS_SCIBARG3		(CVP_CPU_BASE_OFFS + 0x6C)
+
+#define CVP_CPU_CS_H2ASOFTINTEN		(CVP_CPU_BASE_OFFS + 0x148)
+#define CVP_CPU_CS_H2ASOFTINTENCLR	(CVP_CPU_BASE_OFFS + 0x14c)
+#define CVP_CPU_CS_H2ASOFTINT		(CVP_CPU_BASE_OFFS + 0x150)
+#define CVP_CPU_CS_H2ASOFTINTCLR	(CVP_CPU_BASE_OFFS + 0x154)
+
+#define CVP_AHB_BRIDGE_SYNC_RESET	(CVP_CPU_BASE_OFFS + 0x160)
+
+/* FAL10 Feature Control */
+#define CVP_CPU_CS_X2RPMh		(CVP_CPU_BASE_OFFS + 0x168)
+#define CVP_CPU_CS_X2RPMh_MASK0_BMSK	0x1
+#define CVP_CPU_CS_X2RPMh_MASK0_SHFT	0x0
+#define CVP_CPU_CS_X2RPMh_MASK1_BMSK	0x2
+#define CVP_CPU_CS_X2RPMh_MASK1_SHFT	0x1
+#define CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK	0x4
+#define CVP_CPU_CS_X2RPMh_SWOVERRIDE_SHFT	0x3
+
+#define CVP_CPU_CS_X2RPMh_STATUS	(CVP_CPU_BASE_OFFS + 0x170)
+
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: cvp_wrapper
+ * --------------------------------------------------------------------------
+ */
+#define CVP_WRAPPER_BASE_OFFS		0x000B0000
+
+#define CVP_WRAPPER_HW_VERSION		(CVP_WRAPPER_BASE_OFFS + 0x00)
+#define CVP_WRAPPER_HW_VERSION_MAJOR_VERSION_MASK  0x78000000
+#define CVP_WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28
+#define CVP_WRAPPER_HW_VERSION_MINOR_VERSION_MASK  0xFFF0000
+#define CVP_WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16
+#define CVP_WRAPPER_HW_VERSION_STEP_VERSION_MASK   0xFFFF
+
+#define CVP_WRAPPER_INTR_STATUS	(CVP_WRAPPER_BASE_OFFS + 0x0C)
+#define CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK	0x8
+#define CVP_WRAPPER_INTR_STATUS_A2H_BMSK	0x4
+
+#define CVP_SS_IRQ_MASK    (CVP_TOP_BASE_OFFS + 0x04)
+#define CVP_SS_INTR_BMASK  (0x100)
+#define CVP_WRAPPER_INTR_MASK		(CVP_WRAPPER_BASE_OFFS + 0x10)
+#define CVP_FATAL_INTR_BMSK	(CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK | \
+				CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK )
+#define CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK	0x40
+#define CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK	0x20
+#define CVP_WRAPPER_INTR_MASK_A2HWD_BMSK	0x8
+#define CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK	0x4
+#define CVP_WRAPPER_INTR_MASK_A2HCPU_SHFT	0x2
+
+#define CVP_WRAPPER_INTR_CLEAR		(CVP_WRAPPER_BASE_OFFS + 0x14)
+
+#define CVP_WRAPPER_TZ_BASE_OFFS		0x000C0000
+
+#define CVP_WRAPPER_TZ_CPU_CLOCK_CONFIG	(CVP_WRAPPER_TZ_BASE_OFFS)
+#define CVP_WRAPPER_INTR_CLEAR_A2HWD_BMSK	0x10
+#define CVP_WRAPPER_INTR_CLEAR_A2HWD_SHFT	0x4
+#define CVP_WRAPPER_INTR_CLEAR_A2H_BMSK	0x4
+#define CVP_WRAPPER_INTR_CLEAR_A2H_SHFT	0x2
+#define CVP_WRAPPER_CPU_STATUS		(CVP_WRAPPER_TZ_BASE_OFFS + 0x10)
+#define CVP_WRAPPER_AXI_CLOCK_CONFIG	(CVP_WRAPPER_TZ_BASE_OFFS + 0x14)
+#define CVP_WRAPPER_QNS4PDXFIFO_RESET	(CVP_WRAPPER_TZ_BASE_OFFS + 0x18)
+#define CVP_WRAPPER_CPU_CGC_DIS	(CVP_WRAPPER_BASE_OFFS + 0x2010)
+
+#define CVP_WRAPPER_CPU_CLOCK_CONFIG	(CVP_WRAPPER_BASE_OFFS + 0x50)
+#define CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL	(CVP_WRAPPER_BASE_OFFS + 0x54)
+#define CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS	(CVP_WRAPPER_BASE_OFFS + 0x58)
+#define CVP_WRAPPER_CPU_NOC_LPI_CONTROL		(CVP_WRAPPER_BASE_OFFS + 0x5C)
+#define CVP_WRAPPER_CPU_NOC_LPI_STATUS		(CVP_WRAPPER_BASE_OFFS + 0x60)
+#define CVP_WRAPPER_CORE_CLOCK_CONFIG		(CVP_WRAPPER_BASE_OFFS + 0x88)
+
+#define CVP_CTRL_INIT		CVP_CPU_CS_SCIACMD
+
+#define CVP_CTRL_STATUS	CVP_CPU_CS_SCIACMDARG0
+#define CVP_CTRL_INIT_STATUS__M \
+		CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_BMSK
+#define CVP_CTRL_ERROR_STATUS__M \
+		CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK
+#define CVP_CTRL_INIT_IDLE_MSG_BMSK \
+		CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK
+#define CVP_CTRL_STATUS_PC_READY \
+		CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY
+
+
+#define CVP_QTBL_INFO		CVP_CPU_CS_SCIACMDARG1
+
+#define CVP_QTBL_ADDR		CVP_CPU_CS_SCIACMDARG2
+
+#define CVP_VERSION_INFO	CVP_CPU_CS_SCIACMDARG3
+
+#define CVP_SFR_ADDR		CVP_CPU_CS_SCIBCMD
+#define CVP_MMAP_ADDR		CVP_CPU_CS_SCIBCMDARG0
+#define CVP_UC_REGION_ADDR	CVP_CPU_CS_SCIBARG1
+#define CVP_UC_REGION_SIZE	CVP_CPU_CS_SCIBARG2
+
+/* HFI_DSP_QTBL_ADDR
+ * 31:3 - HFI_DSP_QTBL_ADDR
+ *        4-byte aligned Address
+ */
+#define HFI_DSP_QTBL_ADDR	CVP_CPU_CS_VMIMSG
+
+/* HFI_DSP_UC_REGION_ADDR
+ * 31:20 - HFI_DSP_UC_REGION_ADDR
+ *         1MB aligned address.
+ *         Uncached Region start Address. This region covers
+ *         HFI DSP QTable,
+ *         HFI DSP Queue Headers,
+ *         HFI DSP Queues,
+ */
+#define HFI_DSP_UC_REGION_ADDR	CVP_CPU_CS_VMIMSGAG0
+
+/* HFI_DSP_UC_REGION_SIZE
+ * 31:20 - HFI_DSP_UC_REGION_SIZE
+ *         Multiples of 1MB.
+ *         Size of the DSP_UC_REGION Uncached Region
+ */
+#define HFI_DSP_UC_REGION_SIZE	CVP_CPU_CS_VMIMSGAG1
+
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: vcodec noc error log registers
+ * --------------------------------------------------------------------------
+ */
+#define CVP_NOC_BASE_OFFS		0x000D0000
+#define CVP_NOC_ERR_SWID_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x0)
+#define CVP_NOC_ERR_SWID_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0x4)
+#define CVP_NOC_ERR_MAINCTL_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x8)
+#define CVP_NOC_ERR_ERRVLD_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x10)
+#define CVP_NOC_ERR_ERRCLR_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x18)
+#define CVP_NOC_ERR_ERRLOG0_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x20)
+#define CVP_NOC_ERR_ERRLOG0_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0x24)
+#define CVP_NOC_ERR_ERRLOG1_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x28)
+#define CVP_NOC_ERR_ERRLOG1_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0x2C)
+#define CVP_NOC_ERR_ERRLOG2_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x30)
+#define CVP_NOC_ERR_ERRLOG2_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0x34)
+#define CVP_NOC_ERR_ERRLOG3_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0x38)
+#define CVP_NOC_ERR_ERRLOG3_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0x3C)
+#define CVP_NOC_SBM_FAULTINEN0_LOW	(CVP_NOC_BASE_OFFS + 0x240)
+#define CVP_NOC_SBM_FAULTINSTATUS0_LOW	(CVP_NOC_BASE_OFFS + 0x248)
+#define CVP_NOC_SBM_SENSELN0_LOW	(CVP_NOC_BASE_OFFS + 0x300)
+
+#define CVP_NOC_CORE_BASE_OFFS			0x00010000
+#define CVP_NOC_RGE_NIU_DECCTL_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3108)
+#define CVP_NOC_RGE_NIU_ENCCTL_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3188)
+#define CVP_NOC_GCE_VADL_TOF_NIU_DECCTL_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3508)
+#define CVP_NOC_GCE_VADL_TOF_NIU_ENCCTL_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3588)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0240)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0300)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_HIGH \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0304)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN1_HIGH \
+		(CVP_NOC_CORE_BASE_OFFS + 0x030C)
+#define CVP_NOC_CORE_ERR_SWID_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0000)
+#define CVP_NOC_CORE_ERR_SWID_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0004)
+#define CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0008)
+#define CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0010)
+#define CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0018)
+#define CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0020)
+#define CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0024)
+#define CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0028)
+#define CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x002C)
+#define CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0030)
+#define CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0034)
+#define CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x0038)
+#define CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS \
+		(CVP_NOC_CORE_BASE_OFFS + 0x003C)
+
+#define CVP_NOC_RCG_VNOC_NOC_CLK_FORCECLOCKON_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x2018)
+/* NoC QoS registers */
+#define CVP_NOC_RGE_PRIORITYLUT_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3030)
+#define CVP_NOC_RGE_PRIORITYLUT_HIGH \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3034)
+#define CVP_NOC_RGE_URGENCY_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3038)
+#define CVP_NOC_RGE_DANGERLUT_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3040)
+#define CVP_NOC_RGE_SAFELUT_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3048)
+#define CVP_NOC_GCE_PRIORITYLUT_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3430)
+#define CVP_NOC_GCE_PRIORITYLUT_HIGH \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3434)
+#define CVP_NOC_GCE_URGENCY_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3438)
+#define CVP_NOC_GCE_DANGERLUT_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3440)
+#define CVP_NOC_GCE_SAFELUT_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3448)
+#define CVP_NOC_CDM_PRIORITYLUT_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3830)
+#define CVP_NOC_CDM_PRIORITYLUT_HIGH \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3834)
+#define CVP_NOC_CDM_URGENCY_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3838)
+#define CVP_NOC_CDM_DANGERLUT_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3840)
+#define CVP_NOC_CDM_SAFELUT_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x3848)
+
+
+/* End of NoC Qos */
+
+#define CVP_NOC_RCGCONTROLLER_MAINCTL_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0xC008)
+#define CVP_NOC_RCGCONTROLLER_HYSTERESIS_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0xC010)
+#define CVP_NOC_RESET_REQ \
+		(CVP_NOC_CORE_BASE_OFFS + 0xf000)
+#define CVP_NOC_RESET_ACK \
+		(CVP_NOC_CORE_BASE_OFFS + 0xf004)
+
+
+#define CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL	(CVP_AON_BASE_OFFS + 0x8)
+#define CVP_AON_WRAPPER_CVP_NOC_LPI_STATUS	(CVP_AON_BASE_OFFS + 0xC)
+#define CVP_AON_WRAPPER_CVP_NOC_ARCG_CONTROL	(CVP_AON_BASE_OFFS + 0x14)
+#define CVP_AON_WRAPPER_CVP_NOC_CORE_CLK_CONTROL	(CVP_AON_BASE_OFFS + 0x24)
+#define CVP_AON_WRAPPER_CVP_NOC_CORE_SW_RESET	(CVP_AON_BASE_OFFS + 0x1C)
+#define CVP_AON_WRAPPER_SPARE		(CVP_AON_BASE_OFFS + 0x28)
+
+#define CVP_CC_BASE_OFFS			0xF8000
+#define CVP_CC_MVS1C_GDSCR			(CVP_CC_BASE_OFFS + 0x78)
+#define CVP_CC_MVS1C_CBCR			(CVP_CC_BASE_OFFS + 0x90)
+#define CVP_CC_MVS1_GDSCR			(CVP_CC_BASE_OFFS + 0xCC)
+#define CVP_CC_MVS1_CBCR			(CVP_CC_BASE_OFFS + 0xE0)
+#define CVP_CC_AHB_CBCR			(CVP_CC_BASE_OFFS + 0xF4)
+#define CVP_CC_XO_CBCR				(CVP_CC_BASE_OFFS + 0x124)
+#define CVP_CC_SLEEP_CBCR			(CVP_CC_BASE_OFFS + 0x150)
+
+#define CVP_GCC_VIDEO_AXI1_CBCR		(0x22024)
+
+#endif

+ 343 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_power.c

@@ -0,0 +1,343 @@
+
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_cvp.h"
+#include "cvp_power.h"
+
+static inline int find_max(unsigned long *array, unsigned int num)
+{
+	int i, max = 0;
+
+	for (i = 0; i < num; i++)
+		max = array[i] > max ? array[i] : max;
+
+	return max;
+}
+
+static bool is_subblock_profile_existed(struct msm_cvp_inst *inst)
+{
+	return (inst->prop.cycles[HFI_HW_OD] ||
+			inst->prop.cycles[HFI_HW_MPU] ||
+			inst->prop.cycles[HFI_HW_FDU] ||
+			inst->prop.cycles[HFI_HW_ICA] ||
+			inst->prop.cycles[HFI_HW_VADL] ||
+			inst->prop.cycles[HFI_HW_TOF] ||
+			inst->prop.cycles[HFI_HW_RGE] ||
+			inst->prop.cycles[HFI_HW_XRA] ||
+			inst->prop.cycles[HFI_HW_LSR]);
+}
+
+static char hw_names[HFI_MAX_HW_THREADS][8] = {{"FDU"}, {"MPU"}, {"OD"}, {"ICA"},
+				{"VADL"}, {"TOF"}, {"RGE"}, {"XRA"},
+				{"LSR"}};
+static void aggregate_power_update(struct msm_cvp_core *core,
+	struct cvp_power_level *nrt_pwr,
+	struct cvp_power_level *rt_pwr,
+	unsigned int max_clk_rate)
+{
+	struct msm_cvp_inst *inst;
+	int i, j;
+	unsigned long blocks_sum[2][HFI_MAX_HW_THREADS] = {0};
+	unsigned long fw_sum[2] = {0}, max_cycle[2] = {0}, op_max_cycle[2] = {0};
+	unsigned long op_blocks_max[2][HFI_MAX_HW_THREADS] = {0};
+	unsigned long op_fw_max[2] = {0}, bw_sum[2] = {0}, op_bw_max[2] = {0};
+
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst->state == MSM_CVP_CORE_INVALID ||
+			inst->state == MSM_CVP_CORE_UNINIT ||
+			!is_subblock_profile_existed(inst))
+			continue;
+		if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
+			/* Non-realtime session use index 0 */
+			i = 0;
+		} else {
+			i = 1;
+		}
+		for (j = 0; j < HFI_MAX_HW_THREADS; j++)
+			if (inst->prop.cycles[j])
+				dprintk(CVP_PWR, "pwrUpdate %s %u\n",
+					hw_names[j], inst->prop.cycles[j]);
+
+		for (j = 0; j < HFI_MAX_HW_THREADS; j++)
+			if (inst->prop.op_cycles[j])
+				dprintk(CVP_PWR, "pwrUpdate_OP %s %u\n",
+					hw_names[j], inst->prop.op_cycles[j]);
+
+		dprintk(CVP_PWR, " fw %u fw_o %u\n", inst->prop.fw_cycles,
+						inst->prop.fw_op_cycles);
+
+		for (j = 0; j < HFI_MAX_HW_THREADS; j++)
+			blocks_sum[i][j] += inst->prop.cycles[j];
+
+		fw_sum[i] += inst->prop.fw_cycles;
+
+		for (j = 0; j < HFI_MAX_HW_THREADS; j++)
+			op_blocks_max[i][j] =
+				(op_blocks_max[i][j] >= inst->prop.op_cycles[j]) ?
+				op_blocks_max[i][j] : inst->prop.op_cycles[j];
+
+		op_fw_max[i] =
+			(op_fw_max[i] >= inst->prop.fw_op_cycles) ?
+			op_fw_max[i] : inst->prop.fw_op_cycles;
+
+		bw_sum[i] += inst->prop.ddr_bw;
+
+		op_bw_max[i] =
+			(op_bw_max[i] >= inst->prop.ddr_op_bw) ?
+			op_bw_max[i] : inst->prop.ddr_op_bw;
+
+		for (j = 0; j < HFI_MAX_HW_THREADS; j++) {
+			if (inst->prop.fps[j])
+				dprintk(CVP_PWR, "fps %s %d ", hw_names[j],
+						inst->prop.fps[j]);
+		}
+
+	}
+
+	for (i = 0; i < 2; i++) {
+		max_cycle[i] = find_max(&blocks_sum[i][0], HFI_MAX_HW_THREADS);
+		op_max_cycle[i] = find_max(&op_blocks_max[i][0], HFI_MAX_HW_THREADS);
+
+		op_max_cycle[i] =
+			(op_max_cycle[i] > max_clk_rate) ?
+			max_clk_rate : op_max_cycle[i];
+		bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
+			bw_sum[i] : op_bw_max[i];
+	}
+
+	nrt_pwr->core_sum += max_cycle[0];
+	nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_max_cycle[0]) ?
+			nrt_pwr->op_core_sum : op_max_cycle[0];
+	nrt_pwr->bw_sum += bw_sum[0];
+	rt_pwr->core_sum += max_cycle[1];
+	rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_max_cycle[1]) ?
+			rt_pwr->op_core_sum : op_max_cycle[1];
+	rt_pwr->bw_sum += bw_sum[1];
+}
+
+/**
+ * adjust_bw_freqs(): calculate CVP clock freq and bw required to sustain
+ * required use case.
+ * Bandwidth vote will be best-effort, not returning error if the request
+ * b/w exceeds max limit.
+ * Clock vote from non-realtime sessions will be best effort, not returning
+ * error if the aggreated session clock request exceeds max limit.
+ * Clock vote from realtime session will be hard request. If aggregated
+ * session clock request exceeds max limit, the function will return
+ * error.
+ *
+ * Ensure caller acquires clk_lock!
+ */
+static int adjust_bw_freqs(unsigned int max_bw, unsigned int min_bw)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hdev;
+	struct allowed_clock_rates_table *tbl = NULL;
+	unsigned int tbl_size;
+	unsigned int cvp_min_rate, cvp_max_rate;
+	struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
+	unsigned long tmp, core_sum, op_core_sum, bw_sum;
+	int i;
+
+	core = cvp_driver->cvp_core;
+
+	hdev = core->dev_ops->hfi_device_data;
+	tbl = core->resources.allowed_clks_tbl;
+	tbl_size = core->resources.allowed_clks_tbl_size;
+	cvp_min_rate = tbl[0].clock_rate;
+	cvp_max_rate = tbl[tbl_size - 1].clock_rate;
+
+	aggregate_power_update(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
+	dprintk(CVP_PWR, "PwrUpdate nrt %u %u rt %u %u\n",
+		nrt_pwr.core_sum, nrt_pwr.op_core_sum,
+		rt_pwr.core_sum, rt_pwr.op_core_sum);
+
+	if (rt_pwr.core_sum > cvp_max_rate) {
+		dprintk(CVP_WARN, "%s clk vote out of range %lld\n",
+			__func__, rt_pwr.core_sum);
+		return -ENOTSUPP;
+	}
+
+	core_sum = rt_pwr.core_sum + nrt_pwr.core_sum;
+	op_core_sum = (rt_pwr.op_core_sum >= nrt_pwr.op_core_sum) ?
+		rt_pwr.op_core_sum : nrt_pwr.op_core_sum;
+
+	core_sum = (core_sum >= op_core_sum) ?
+		core_sum : op_core_sum;
+
+	if (core_sum > cvp_max_rate) {
+		core_sum = cvp_max_rate;
+	} else  if (core_sum <= cvp_min_rate) {
+		core_sum = cvp_min_rate;
+	} else {
+		for (i = 1; i < tbl_size; i++)
+			if (core_sum <= tbl[i].clock_rate)
+				break;
+		core_sum = tbl[i].clock_rate;
+	}
+
+	bw_sum = rt_pwr.bw_sum + nrt_pwr.bw_sum;
+	bw_sum = bw_sum >> 10;
+	bw_sum = (bw_sum > max_bw) ? max_bw : bw_sum;
+	bw_sum = (bw_sum < min_bw) ? min_bw : bw_sum;
+
+	dprintk(CVP_PWR, "%s %lld %lld\n", __func__,
+		core_sum, bw_sum);
+
+	tmp = core->curr_freq;
+	core->curr_freq = core_sum;
+	core->orig_core_sum = tmp;
+
+	hdev->clk_freq = core->curr_freq;
+	core->bw_sum = bw_sum;
+
+	return 0;
+}
+
+int msm_cvp_update_power(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *s;
+	struct bus_info *bus = NULL;
+	struct clock_set *clocks;
+	struct clock_info *cl;
+	int bus_count = 0;
+	unsigned int max_bw = 0, min_bw = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	core = inst->core;
+	if (!core || core->state == CVP_CORE_UNINIT) {
+		rc = -ECONNRESET;
+		goto adjust_exit;
+	}
+
+	clocks = &core->resources.clock_set;
+	cl = &clocks->clock_tbl[clocks->count - 1];
+	if (!cl->has_scaling) {
+		dprintk(CVP_ERR, "Cannot scale CVP clock\n");
+		rc = -EINVAL;
+		goto adjust_exit;
+	}
+	for (bus_count = 0; bus_count < core->resources.bus_set.count; bus_count++) {
+		if (!strcmp(core->resources.bus_set.bus_tbl[bus_count].name, "cvp-ddr")) {
+			bus = &core->resources.bus_set.bus_tbl[bus_count];
+			max_bw = bus->range[1];
+			min_bw = max_bw/10;
+		}
+	}
+	if (!bus) {
+		dprintk(CVP_ERR, "bus node is NULL for cvp-ddr\n");
+		rc = -EINVAL;
+		goto adjust_exit;
+	}
+	mutex_lock(&core->clk_lock);
+	rc = adjust_bw_freqs(max_bw, min_bw);
+	mutex_unlock(&core->clk_lock);
+	if (rc)
+		goto adjust_exit;
+
+	rc = msm_cvp_set_clocks(core);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to set clock rate %u %s: %d %s\n",
+			core->curr_freq, cl->name, rc, __func__);
+		core->curr_freq = core->orig_core_sum;
+		goto adjust_exit;
+	}
+	rc = msm_cvp_set_bw(core, bus, core->bw_sum);
+
+adjust_exit:
+	cvp_put_inst(s);
+
+	return rc;
+}
+
+unsigned int msm_cvp_get_hw_aggregate_cycles(enum hfi_hw_thread hwblk)
+{
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *inst;
+	unsigned long cycles_sum = 0;
+
+	core = cvp_driver->cvp_core;
+
+	if (!core) {
+		dprintk(CVP_ERR, "%s: invalid core\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->clk_lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst->state == MSM_CVP_CORE_INVALID ||
+			inst->state == MSM_CVP_CORE_UNINIT ||
+			!is_subblock_profile_existed(inst))
+			continue;
+		switch (hwblk) {
+		case HFI_HW_FDU:
+		{
+			cycles_sum += inst->prop.cycles[HFI_HW_FDU];
+			break;
+		}
+		case HFI_HW_ICA:
+		{
+			cycles_sum += inst->prop.cycles[HFI_HW_ICA];
+			break;
+		}
+		case HFI_HW_MPU:
+		{
+			cycles_sum += inst->prop.cycles[HFI_HW_MPU];
+			break;
+		}
+		case HFI_HW_OD:
+		{
+			cycles_sum += inst->prop.cycles[HFI_HW_OD];
+			break;
+		}
+		case HFI_HW_VADL:
+		{
+			cycles_sum += inst->prop.cycles[HFI_HW_VADL];
+			break;
+		}
+		case HFI_HW_TOF:
+		{
+			cycles_sum += inst->prop.cycles[HFI_HW_TOF];
+			break;
+		}
+		case HFI_HW_RGE:
+		{
+			cycles_sum += inst->prop.cycles[HFI_HW_RGE];
+			break;
+		}
+		case HFI_HW_XRA:
+		{
+			cycles_sum += inst->prop.cycles[HFI_HW_XRA];
+			break;
+		}
+		case HFI_HW_LSR:
+		{
+			cycles_sum += inst->prop.cycles[HFI_HW_LSR];
+			break;
+		}
+		default:
+			dprintk(CVP_ERR, "unrecognized hw block %d\n",
+				hwblk);
+			break;
+		}
+	}
+	mutex_unlock(&core->clk_lock);
+	cycles_sum = cycles_sum&0xFFFFFFFF;
+	return (unsigned int)cycles_sum;
+}
+

+ 23 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_power.h

@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _CVP_POWER_H_
+#define _CVP_POWER_H_
+
+#include "msm_cvp_internal.h"
+#include "msm_cvp_common.h"
+#include "msm_cvp_clocks.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_dsp.h"
+
+struct cvp_power_level {
+	unsigned long core_sum;
+	unsigned long op_core_sum;
+	unsigned long bw_sum;
+};
+
+int msm_cvp_update_power(struct msm_cvp_inst *inst);
+unsigned int msm_cvp_get_hw_aggregate_cycles(enum hfi_hw_thread hwblk);
+#endif

+ 18 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_private.h

@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_V4L2_PRIVATE_H_
+#define _MSM_V4L2_PRIVATE_H_
+
+#include <media/msm_eva_private.h>
+#include "msm_cvp_debug.h"
+
+long cvp_unblocked_ioctl(struct file *filp,
+		unsigned int cmd, unsigned long arg);
+
+long cvp_compat_ioctl(struct file *filp,
+		unsigned int cmd, unsigned long arg);
+
+#endif

+ 663 - 0
qcom/opensource/eva-kernel/msm/eva/cvp_smem.c

@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-direction.h>
+#include <linux/iommu.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/mem-buf.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/qcom-dma-mapping.h>
+#include <linux/version.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_resources.h"
+#include "cvp_core_hfi.h"
+#include "msm_cvp_dsp.h"
+
+static void * __cvp_dma_buf_vmap(struct dma_buf *dbuf)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
+	struct dma_buf_map map;
+#else
+	struct iosys_map map;
+#endif
+	void *dma_map;
+	int err;
+
+	err = dma_buf_vmap(dbuf, &map);
+	dma_map = err ? NULL : map.vaddr;
+	if (!dma_map)
+		dprintk(CVP_ERR, "map to kvaddr failed\n");
+
+	return dma_map;
+}
+
+static void __cvp_dma_buf_vunmap(struct dma_buf *dbuf, void *vaddr)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
+	struct dma_buf_map map = { \
+			.vaddr = vaddr, \
+			.is_iomem = false, \
+	};
+#else
+	struct iosys_map map = { \
+			.vaddr = vaddr, \
+			.is_iomem = false, \
+	};
+#endif
+	if (vaddr)
+		dma_buf_vunmap(dbuf, &map);
+}
+
+static int msm_dma_get_device_address(struct dma_buf *dbuf, u32 align,
+	dma_addr_t *iova, u32 flags, struct msm_cvp_platform_resources *res,
+	struct cvp_dma_mapping_info *mapping_info)
+{
+	int rc = 0;
+	struct dma_buf_attachment *attach;
+	struct sg_table *table = NULL;
+	struct context_bank_info *cb = NULL;
+
+	if (!dbuf || !iova || !mapping_info) {
+		dprintk(CVP_ERR, "Invalid params: %pK, %pK, %pK\n",
+			dbuf, iova, mapping_info);
+		return -EINVAL;
+	}
+
+	if (is_iommu_present(res)) {
+		cb = msm_cvp_smem_get_context_bank(res, flags);
+		if (!cb) {
+			dprintk(CVP_ERR,
+				"%s: Failed to get context bank device\n",
+				 __func__);
+			rc = -EIO;
+			goto mem_map_failed;
+		}
+
+		/* Prepare a dma buf for dma on the given device */
+		attach = dma_buf_attach(dbuf, cb->dev);
+		if (IS_ERR_OR_NULL(attach)) {
+			rc = PTR_ERR(attach) ?: -ENOMEM;
+			dprintk(CVP_ERR, "Failed to attach dmabuf\n");
+			goto mem_buf_attach_failed;
+		}
+		dprintk(CVP_MEM, "%s: CB dev: %s, attach dev: %s, attach: 0x%lx, dbuf: 0x%lx",
+			__func__, dev_name(cb->dev), dev_name(attach->dev), attach, dbuf);
+
+		/*
+		 * Get the scatterlist for the given attachment
+		 * Mapping of sg is taken care by map attachment
+		 */
+		/*
+		 * We do not need dma_map function to perform cache operations
+		 * on the whole buffer size and hence pass skip sync flag.
+		 * We do the required cache operations separately for the
+		 * required buffer size
+		 */
+		attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+		if (flags & SMEM_CAMERA)
+			attach->dma_map_attrs |= DMA_ATTR_QTI_SMMU_PROXY_MAP;
+		if (res->sys_cache_present)
+			attach->dma_map_attrs |=
+				DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+
+		table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+		if (IS_ERR_OR_NULL(table)) {
+			dprintk(CVP_ERR, "Failed to map table %d\n", PTR_ERR(table));
+			dprintk(CVP_ERR,
+				"Mapping detail dma_buf 0x%llx, %s, size %#x\n",
+				dbuf, dbuf->name, dbuf->size);
+			rc = PTR_ERR(table) ?: -ENOMEM;
+			goto mem_map_table_failed;
+		}
+
+		if (table->sgl) {
+			*iova = table->sgl->dma_address;
+		} else {
+			dprintk(CVP_ERR, "sgl is NULL\n");
+			rc = -ENOMEM;
+			goto mem_map_sg_failed;
+		}
+
+		mapping_info->dev = cb->dev;
+		mapping_info->domain = cb->domain;
+		mapping_info->table = table;
+		mapping_info->attach = attach;
+		mapping_info->buf = dbuf;
+		mapping_info->cb_info = (void *)cb;
+
+		dprintk(CVP_MEM, "%s: sg-table: 0x%lx, dbuf: 0x%lx, table->sgl->dma_address: 0x%lx",
+			__func__, table, dbuf, table->sgl->dma_address);
+	} else {
+		dprintk(CVP_MEM, "iommu not present, use phys mem addr\n");
+	}
+
+	return 0;
+mem_map_sg_failed:
+	dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
+mem_map_table_failed:
+	dma_buf_detach(dbuf, attach);
+mem_buf_attach_failed:
+mem_map_failed:
+	return rc;
+}
+
+static int msm_dma_put_device_address(u32 flags,
+	struct cvp_dma_mapping_info *mapping_info)
+{
+	int rc = 0;
+	struct dma_buf_attachment *attach = NULL;
+	struct sg_table *table = NULL;
+	struct context_bank_info *cb = NULL;
+	struct dma_buf *dbuf = NULL;
+
+	if (!mapping_info) {
+		dprintk(CVP_WARN, "Invalid mapping_info\n");
+		return -EINVAL;
+	}
+
+	if (!mapping_info->dev || !mapping_info->table ||
+		!mapping_info->buf || !mapping_info->attach ||
+		!mapping_info->cb_info) {
+		dprintk(CVP_WARN, "Invalid params\n");
+		return -EINVAL;
+	}
+
+	attach = mapping_info->attach;
+	table = mapping_info->table;
+	cb = (struct context_bank_info *) mapping_info->cb_info;
+	dbuf = mapping_info->buf;
+	dprintk(CVP_MEM, "%s: CB dev_name: %s, attach dev_name: %s, attach: 0x%lx, dbuf: 0x%lx",
+		__func__, dev_name(cb->dev), dev_name(attach->dev), attach, dbuf);
+	dprintk(CVP_MEM, "%s: sg-table: 0x%lx, table->sgl->dma_address: 0x%lx",
+		__func__, table, dbuf, table->sgl->dma_address);
+
+	dma_buf_unmap_attachment(mapping_info->attach,
+		mapping_info->table, DMA_BIDIRECTIONAL);
+	dma_buf_detach(mapping_info->buf, mapping_info->attach);
+
+	mapping_info->dev = NULL;
+	mapping_info->domain = NULL;
+	mapping_info->table = NULL;
+	mapping_info->attach = NULL;
+	mapping_info->buf = NULL;
+	mapping_info->cb_info = NULL;
+
+
+	return rc;
+}
+
+struct dma_buf *msm_cvp_smem_get_dma_buf(int fd)
+{
+	struct dma_buf *dma_buf;
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		dprintk(CVP_ERR, "Failed to get dma_buf for %d, error %ld\n",
+				fd, PTR_ERR(dma_buf));
+		dma_buf = NULL;
+	}
+
+	return dma_buf;
+}
+
+void msm_cvp_smem_put_dma_buf(void *dma_buf)
+{
+	if (!dma_buf) {
+		dprintk(CVP_ERR, "%s: NULL dma_buf\n", __func__);
+		return;
+	}
+
+	dma_heap_buffer_free((struct dma_buf *)dma_buf);
+}
+
+int msm_cvp_map_smem(struct msm_cvp_inst *inst,
+			struct msm_cvp_smem *smem,
+			const char *str)
+{
+	int *vmid_list;
+	int *perms_list;
+	int nelems = 0;
+	int i, rc = 0;
+
+	dma_addr_t iova = 0;
+	u32 temp = 0, checksum = 0;
+	u32 align = SZ_4K;
+	struct dma_buf *dma_buf;
+	bool is_config_pkt = false;
+
+	if (!inst || !smem) {
+		dprintk(CVP_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, inst, smem);
+		return -EINVAL;
+	}
+
+	dma_buf = smem->dma_buf;
+	rc = mem_buf_dma_buf_copy_vmperm(dma_buf,
+			&vmid_list, &perms_list, &nelems);
+	if (rc) {
+		dprintk(CVP_ERR, "%s fail to get vmid and perms %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	for (temp = 0; temp < nelems; temp++) {
+		if (vmid_list[temp] == VMID_CP_PIXEL)
+			smem->flags |= (SMEM_SECURE | SMEM_PIXEL);
+		else if (vmid_list[temp] == VMID_CP_NON_PIXEL)
+			smem->flags |= (SMEM_SECURE | SMEM_NON_PIXEL);
+		else if (vmid_list[temp] == VMID_CP_CAMERA ||
+				/* To-do: what if the EVA driver runs in TVM */
+				vmid_list[temp] == VMID_TVM)
+			smem->flags |= (SMEM_SECURE | SMEM_CAMERA);
+		dprintk(CVP_MEM, "inst %pK VM idx %d VM_ID %d fd %d pkt_type %#x\n",
+			inst, temp, vmid_list[temp], smem->fd, smem->pkt_type);
+	}
+
+	rc = msm_dma_get_device_address(dma_buf, align, &iova, smem->flags,
+			&(inst->core->resources), &smem->mapping_info);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to get device address: %d\n", rc);
+		goto exit;
+	}
+	temp = (u32)iova;
+	if ((dma_addr_t)temp != iova) {
+		dprintk(CVP_ERR, "iova(%pa) truncated to %#x", &iova, temp);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	smem->size = dma_buf->size;
+	smem->device_addr = (u32)iova;
+	i = get_pkt_index_from_type(smem->pkt_type);
+	if (i > 0 && smem->pkt_type != HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS
+		&& smem->pkt_type != HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS
+		&& smem->pkt_type != HFI_CMD_SESSION_EVA_DLFL_CONFIG)
+		/* User persist buffer has no feature config info */
+		is_config_pkt = cvp_hfi_defs[i].is_config_pkt;
+
+	if (i > 0 && cvp_hfi_defs[i].checksum_enabled) {
+		dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
+		smem->kvaddr = __cvp_dma_buf_vmap(dma_buf);
+		if (!smem->kvaddr) {
+			dprintk(CVP_WARN, "%s Fail map into kernel\n",
+					__func__);
+			dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
+		} else {
+			for (i = 0; i < 256; i++)
+				checksum += *(u32 *)(smem->kvaddr + i*sizeof(u32));
+			dprintk(CVP_MEM, "Map checksum %#x fd=%d\n",
+				checksum, smem->fd);
+		}
+	}
+	print_smem(CVP_MEM, str, inst, smem);
+	atomic_inc(&inst->smem_count);
+	goto success;
+exit:
+	smem->device_addr = 0x0;
+success:
+	kfree(vmid_list);
+	kfree(perms_list);
+	return rc;
+}
+
+int msm_cvp_unmap_smem(struct msm_cvp_inst *inst,
+		struct msm_cvp_smem *smem,
+		const char *str)
+{
+	int i, rc = 0;
+	u32 checksum = 0;
+	struct dma_buf *dma_buf;
+
+	if (!smem) {
+		dprintk(CVP_ERR, "%s: Invalid params: %pK\n", __func__, smem);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	print_smem(CVP_MEM, str, inst, smem);
+	dma_buf = smem->dma_buf;
+	i = get_pkt_index_from_type(smem->pkt_type);
+	if (i > 0 && cvp_hfi_defs[i].checksum_enabled) {
+		if (!smem->kvaddr) {
+			dprintk(CVP_WARN, "%s DS buf Fail map into kernel\n",
+					__func__);
+			dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
+		} else {
+			for (i = 0; i < 256; i++)
+				checksum += *(u32 *)(smem->kvaddr + i*sizeof(u32));
+			dprintk(CVP_MEM, "Unmap checksum %#x fd=%d\n",
+				checksum, smem->fd);
+			__cvp_dma_buf_vunmap(dma_buf, smem->kvaddr);
+			smem->kvaddr = 0;
+			dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
+		}
+	}
+	rc = msm_dma_put_device_address(smem->flags, &smem->mapping_info);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to put device address: %d\n", rc);
+		goto exit;
+	}
+
+	smem->device_addr = 0x0;
+	atomic_dec(&inst->smem_count);
+
+exit:
+	return rc;
+}
+
+static int alloc_dma_mem(size_t size, u32 align, int map_kernel,
+	struct msm_cvp_platform_resources *res, struct msm_cvp_smem *mem)
+{
+	dma_addr_t iova = 0;
+	int rc = 0;
+	struct dma_buf *dbuf = NULL;
+	struct dma_heap *heap = NULL;
+	struct mem_buf_lend_kernel_arg arg;
+	int vmids[1];
+	int perms[1];
+
+	if (!res) {
+		dprintk(CVP_ERR, "%s: NULL res\n", __func__);
+		return -EINVAL;
+	}
+
+	align = ALIGN(align, SZ_4K);
+	size = ALIGN(size, SZ_4K);
+
+	if (is_iommu_present(res)) {
+		heap = dma_heap_find("qcom,system");
+		dprintk(CVP_MEM, "%s size %zx align %d flag %d\n",
+		__func__, size, align, mem->flags);
+	} else {
+		dprintk(CVP_ERR,
+		"No IOMMU CB: allocate shared memory heap size %zx align %d\n",
+		size, align);
+	}
+
+	dbuf = dma_heap_buffer_alloc(heap, size, 0, 0);
+	if (IS_ERR_OR_NULL(dbuf)) {
+		dprintk(CVP_ERR,
+			"Failed to allocate shared memory = %x bytes, %x %x\n",
+			size, mem->flags, PTR_ERR(dbuf));
+		rc = -ENOMEM;
+		goto fail_shared_mem_alloc;
+	}
+
+	perms[0] = PERM_READ | PERM_WRITE;
+	arg.nr_acl_entries = 1;
+	arg.vmids = vmids;
+	arg.perms = perms;
+
+	if (mem->flags & SMEM_NON_PIXEL) {
+		vmids[0] = VMID_CP_NON_PIXEL;
+		rc = mem_buf_lend(dbuf, &arg);
+	} else if (mem->flags & SMEM_PIXEL) {
+		vmids[0] = VMID_CP_PIXEL;
+		rc = mem_buf_lend(dbuf, &arg);
+	}
+
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to lend dmabuf %d, vmid %d\n",
+			rc, vmids[0]);
+		goto fail_device_address;
+	}
+
+	if (!gfa_cv.dmabuf_f_op)
+		gfa_cv.dmabuf_f_op = (const struct file_operations *)dbuf->file->f_op;
+
+	mem->size = size;
+	mem->dma_buf = dbuf;
+	mem->kvaddr = NULL;
+
+	rc = msm_dma_get_device_address(dbuf, align, &iova, mem->flags,
+			res, &mem->mapping_info);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to get device address: %d\n",
+			rc);
+		goto fail_device_address;
+	}
+	mem->device_addr = (u32)iova;
+	if ((dma_addr_t)mem->device_addr != iova) {
+		dprintk(CVP_ERR, "iova(%pa) truncated to %#x",
+			&iova, mem->device_addr);
+		goto fail_device_address;
+	}
+
+	if (map_kernel) {
+		dma_buf_begin_cpu_access(dbuf, DMA_BIDIRECTIONAL);
+		mem->kvaddr = __cvp_dma_buf_vmap(dbuf);
+		if (!mem->kvaddr) {
+			dprintk(CVP_ERR,
+				"Failed to map shared mem in kernel\n");
+			rc = -EIO;
+			goto fail_map;
+		}
+	}
+
+	dprintk(CVP_MEM,
+		"%s: dma_buf=%pK,iova=%x,size=%d,kvaddr=%pK,flags=%#lx\n",
+		__func__, mem->dma_buf, mem->device_addr, mem->size,
+		mem->kvaddr, mem->flags);
+	return rc;
+
+fail_map:
+	if (map_kernel)
+		dma_buf_end_cpu_access(dbuf, DMA_BIDIRECTIONAL);
+fail_device_address:
+	dma_heap_buffer_free(dbuf);
+fail_shared_mem_alloc:
+	return rc;
+}
+
+static int free_dma_mem(struct msm_cvp_smem *mem)
+{
+	dprintk(CVP_MEM,
+		"%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK\n",
+		__func__, mem->dma_buf, mem->device_addr, mem->size, mem->kvaddr);
+
+	if (mem->device_addr) {
+		msm_dma_put_device_address(mem->flags, &mem->mapping_info);
+		mem->device_addr = 0x0;
+	}
+
+	if (mem->kvaddr) {
+		__cvp_dma_buf_vunmap(mem->dma_buf, mem->kvaddr);
+		mem->kvaddr = NULL;
+		dma_buf_end_cpu_access(mem->dma_buf, DMA_BIDIRECTIONAL);
+	}
+
+	if (mem->dma_buf) {
+		dma_heap_buffer_free(mem->dma_buf);
+		mem->dma_buf = NULL;
+	}
+
+	return 0;
+}
+
+int msm_cvp_smem_alloc(size_t size, u32 align, int map_kernel,
+		void *res, struct msm_cvp_smem *smem)
+{
+	int rc = 0;
+
+	if (!smem || !size) {
+		dprintk(CVP_ERR, "%s: NULL smem or %d size\n",
+			__func__, (u32)size);
+		return -EINVAL;
+	}
+
+	rc = alloc_dma_mem(size, align, map_kernel,
+		(struct msm_cvp_platform_resources *)res, smem);
+
+	return rc;
+}
+
+int msm_cvp_smem_free(struct msm_cvp_smem *smem)
+{
+	int rc = 0;
+
+	if (!smem) {
+		dprintk(CVP_ERR, "NULL smem passed\n");
+		return -EINVAL;
+	}
+	rc = free_dma_mem(smem);
+
+	return rc;
+};
+
+int msm_cvp_smem_cache_operations(struct dma_buf *dbuf,
+	enum smem_cache_ops cache_op, unsigned long offset, unsigned long size)
+{
+	int rc = 0;
+
+	if (!dbuf) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (cache_op) {
+	case SMEM_CACHE_CLEAN:
+	case SMEM_CACHE_CLEAN_INVALIDATE:
+		rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_BIDIRECTIONAL,
+				offset, size);
+		if (rc)
+			break;
+		rc = dma_buf_end_cpu_access_partial(dbuf, DMA_BIDIRECTIONAL,
+				offset, size);
+		break;
+	case SMEM_CACHE_INVALIDATE:
+		rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
+				offset, size);
+		if (rc)
+			break;
+		rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
+				offset, size);
+		break;
+	default:
+		dprintk(CVP_ERR, "%s: cache (%d) operation not supported\n",
+			__func__, cache_op);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+struct context_bank_info *msm_cvp_smem_get_context_bank(
+	struct msm_cvp_platform_resources *res,
+	unsigned int flags)
+{
+	struct context_bank_info *cb = NULL, *match = NULL;
+	char *search_str;
+	char *non_secure_cb = "cvp_hlos";
+	char *secure_nonpixel_cb = "cvp_sec_nonpixel";
+	char *secure_pixel_cb = "cvp_sec_pixel";
+	char *camera_cb = "cvp_camera";
+	char *dsp_cb = "cvp_dsp";
+	bool is_secure = (flags & SMEM_SECURE) ? true : false;
+
+	if (flags & SMEM_PIXEL)
+		search_str = secure_pixel_cb;
+	else if (flags & SMEM_NON_PIXEL)
+		search_str = secure_nonpixel_cb;
+	else if (flags & SMEM_CAMERA)
+		/* Secure Camera pixel buffer */
+		search_str = camera_cb;
+	else if (flags & SMEM_CDSP)
+		search_str = dsp_cb;
+	else
+		search_str = non_secure_cb;
+
+	list_for_each_entry(cb, &res->context_banks, list) {
+		if (cb->is_secure == is_secure &&
+			!strcmp(search_str, cb->name)) {
+			match = cb;
+			break;
+		}
+	}
+
+	if (!match)
+		dprintk(CVP_ERR,
+			"%s: cb not found for flags %x, is_secure %d\n",
+			__func__, flags, is_secure);
+
+	return match;
+}
+
+int msm_cvp_map_ipcc_regs(u32 *iova)
+{
+	struct context_bank_info *cb;
+	struct msm_cvp_core *core;
+	struct cvp_hfi_ops *ops_tbl;
+	struct iris_hfi_device *dev = NULL;
+	phys_addr_t paddr;
+	u32 size;
+
+	core = cvp_driver->cvp_core;
+	if (core) {
+		ops_tbl = core->dev_ops;
+		if (ops_tbl)
+			dev = ops_tbl->hfi_device_data;
+	}
+
+	if (!dev)
+		return -EINVAL;
+
+	paddr = dev->res->ipcc_reg_base;
+	size = dev->res->ipcc_reg_size;
+
+	if (!paddr || !size)
+		return -EINVAL;
+
+	cb = msm_cvp_smem_get_context_bank(dev->res, 0);
+	if (!cb) {
+		dprintk(CVP_ERR, "%s: fail to get context bank\n", __func__);
+		return -EINVAL;
+	}
+	*iova = dma_map_resource(cb->dev, paddr, size, DMA_BIDIRECTIONAL, 0);
+	if (*iova == DMA_MAPPING_ERROR) {
+		dprintk(CVP_WARN, "%s: fail to map IPCC regs\n", __func__);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+int msm_cvp_unmap_ipcc_regs(u32 iova)
+{
+	struct context_bank_info *cb;
+	struct msm_cvp_core *core;
+	struct cvp_hfi_ops *ops_tbl;
+	struct iris_hfi_device *dev = NULL;
+	u32 size;
+
+	core = cvp_driver->cvp_core;
+	if (core) {
+		ops_tbl = core->dev_ops;
+		if (ops_tbl)
+			dev = ops_tbl->hfi_device_data;
+	}
+
+	if (!dev)
+		return -EINVAL;
+
+	size = dev->res->ipcc_reg_size;
+
+	if (!iova || !size)
+		return -EINVAL;
+
+	cb = msm_cvp_smem_get_context_bank(dev->res, 0);
+	if (!cb) {
+		dprintk(CVP_ERR, "%s: fail to get context bank\n", __func__);
+		return -EINVAL;
+	}
+	dma_unmap_resource(cb->dev, iova, size, DMA_BIDIRECTIONAL, 0);
+
+	return 0;
+}

+ 86 - 0
qcom/opensource/eva-kernel/msm/eva/eva_shared_def.h

@@ -0,0 +1,86 @@
+/**
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/**
+ * This file contains definitions that are common to UMD and KMD
+ * but shall not be added to the UAPI to allow for better UAPI
+ * backward compatibility. Identical copies of this file must be
+ * used by both UMD and KMD for desired functioning.
+ */
+
+#ifndef _EVA_SHARED_DEF_H_
+#define _EVA_SHARED_DEF_H_
+
+/**
+ * Structure corresponding to HFI_CVP_BUFFER_TYPE
+ */
+
+struct cvp_buf_type {
+	__s32 fd;
+	__u32 size;
+	__u32 offset;
+	__u32 flags;
+	__u32 reserved1;
+	__u32 reserved2;
+	__u32 fence_type;
+	__u32 input_handle;
+	__u32 output_handle;
+	__u32 debug_flags;
+	__u32 crc;
+};
+
+/**
+ * Structures and macros for Warp-NCC Out-of-Band (OOB) buffer
+ */
+
+#define EVA_KMD_WNCC_MAX_LAYERS               4
+#define EVA_KMD_WNCC_MAX_ADDRESSES            4095
+#define EVA_KMD_WNCC_MAX_SRC_BUFS             2400
+#define EVA_KMD_WNCC_SRC_BUF_ID_OFFSET        1
+
+struct eva_kmd_wncc_metadata {
+	__u64 loc_x_dec : 12;
+	__u64 loc_x_frac : 9;
+	__u64 loc_y_dec : 12;
+	__u64 loc_y_frac : 9;
+	__u64 iova_lsb : 22; /* Populated in KMD */
+	__u64 iova_msb : 10; /* Populated in KMD */
+	__u64 scale_idx : 2;
+	__s64 aff_coeff_3 : 13;
+	__s64 aff_coeff_2 : 13;
+	__s64 aff_coeff_1 : 13;
+	__s64 aff_coeff_0 : 13;
+};
+
+struct eva_kmd_oob_wncc {
+	__u32 metadata_bufs_offset;
+	__u32 num_layers;
+	struct eva_kmd_wncc_layer {
+		__u32 num_addrs;
+		struct eva_kmd_wncc_addr {
+			__u32 buffer_id;
+			__u32 offset;
+		} addrs[EVA_KMD_WNCC_MAX_ADDRESSES];
+	} layers[EVA_KMD_WNCC_MAX_LAYERS];
+};
+
+/**
+ * Structure and macros for Out-of-Band (OOB) buffer
+ * that may accompany HFI packet data
+ */
+
+#define EVA_KMD_OOB_INVALID 0
+#define EVA_KMD_OOB_WNCC    1
+
+struct eva_kmd_oob_buf {
+	__u32 oob_type;
+	union {
+		struct eva_kmd_oob_wncc wncc;
+	};
+};
+
+#endif

+ 463 - 0
qcom/opensource/eva-kernel/msm/eva/hfi_packetization.c

@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "hfi_packetization.h"
+#include "msm_cvp_debug.h"
+
+/* Set up look-up tables to convert HAL_* to HFI_*.
+ *
+ * The tables below mostly take advantage of the fact that most
+ * HAL_* types are defined bitwise. So if we index them normally
+ * when declaring the tables, we end up with huge arrays with wasted
+ * space.  So before indexing them, we apply log2 to use a more
+ * sensible index.
+ */
+
+int cvp_create_pkt_cmd_sys_init(struct cvp_hfi_cmd_sys_init_packet *pkt,
+			   u32 arch_type)
+{
+	int rc = 0;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->packet_type = HFI_CMD_SYS_INIT;
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_init_packet);
+	pkt->arch_type = arch_type;
+	return rc;
+}
+
+int cvp_create_pkt_cmd_sys_pc_prep(struct cvp_hfi_cmd_sys_pc_prep_packet *pkt)
+{
+	int rc = 0;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->packet_type = HFI_CMD_SYS_PC_PREP;
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_pc_prep_packet);
+	return rc;
+}
+
+int cvp_create_pkt_cmd_sys_debug_config(
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+	u32 mode)
+{
+	struct cvp_hfi_debug_config *hfi;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(struct cvp_hfi_debug_config) + sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
+	hfi = (struct cvp_hfi_debug_config *) &pkt->rg_property_data[1];
+	hfi->debug_config = mode;
+	hfi->debug_mode = HFI_DEBUG_MODE_QUEUE;
+	if (msm_cvp_fw_debug_mode
+			<= (HFI_DEBUG_MODE_QUEUE | HFI_DEBUG_MODE_QDSS))
+		hfi->debug_mode = msm_cvp_fw_debug_mode;
+	return 0;
+}
+
+int cvp_create_pkt_cmd_sys_coverage_config(
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+	u32 mode)
+{
+	if (!pkt) {
+		dprintk(CVP_ERR, "In %s(), No input packet\n", __func__);
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE;
+	pkt->rg_property_data[1] = mode;
+	dprintk(CVP_PKT, "Firmware coverage mode %d\n",
+			pkt->rg_property_data[1]);
+	return 0;
+}
+
+int cvp_create_pkt_cmd_sys_set_idle_indicator(
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+	u32 mode)
+{
+	if (!pkt) {
+		dprintk(CVP_ERR, "In %s(), No input packet\n", __func__);
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR;
+	pkt->rg_property_data[1] = mode;
+	dprintk(CVP_PKT, "Firmware idle indicator mode %d\n",
+			pkt->rg_property_data[1]);
+	return 0;
+}
+
+int cvp_create_pkt_cmd_sys_set_resource(
+		struct cvp_hfi_cmd_sys_set_resource_packet *pkt,
+		struct cvp_resource_hdr *res_hdr,
+		void *res_value)
+{
+	int rc = 0;
+	u32 i = 0;
+
+	if (!pkt || !res_hdr || !res_value) {
+		dprintk(CVP_ERR,
+			"Invalid paramas pkt %pK res_hdr %pK res_value %pK\n",
+				pkt, res_hdr, res_value);
+		return -EINVAL;
+	}
+
+	pkt->packet_type = HFI_CMD_SYS_SET_RESOURCE;
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_resource_packet);
+	pkt->resource_handle = hash32_ptr(res_hdr->resource_handle);
+
+	switch (res_hdr->resource_id) {
+	case CVP_RESOURCE_SYSCACHE:
+	{
+		struct cvp_hfi_resource_syscache_info_type *res_sc_info =
+			(struct cvp_hfi_resource_syscache_info_type *)res_value;
+		struct cvp_hfi_resource_subcache_type *res_sc =
+			(struct cvp_hfi_resource_subcache_type *)
+				&(res_sc_info->rg_subcache_entries[0]);
+
+		struct cvp_hfi_resource_syscache_info_type *hfi_sc_info =
+			(struct cvp_hfi_resource_syscache_info_type *)
+				&pkt->rg_resource_data[0];
+
+		struct cvp_hfi_resource_subcache_type *hfi_sc =
+			(struct cvp_hfi_resource_subcache_type *)
+			&(hfi_sc_info->rg_subcache_entries[0]);
+
+		pkt->resource_type = HFI_RESOURCE_SYSCACHE;
+		hfi_sc_info->num_entries = res_sc_info->num_entries;
+
+		pkt->size += (sizeof(struct cvp_hfi_resource_subcache_type))
+				 * hfi_sc_info->num_entries;
+
+		for (i = 0; i < hfi_sc_info->num_entries; i++) {
+			hfi_sc[i] = res_sc[i];
+		dprintk(CVP_PKT, "entry hfi#%d, sc_id %d, size %d\n",
+				 i, hfi_sc[i].sc_id, hfi_sc[i].size);
+		}
+		break;
+	}
+	default:
+		dprintk(CVP_ERR,
+			"Invalid resource_id %d\n", res_hdr->resource_id);
+		rc = -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+int cvp_create_pkt_cmd_sys_release_resource(
+		struct cvp_hfi_cmd_sys_release_resource_packet *pkt,
+		struct cvp_resource_hdr *res_hdr)
+{
+	int rc = 0;
+
+	if (!pkt || !res_hdr) {
+		dprintk(CVP_ERR,
+			"Invalid paramas pkt %pK res_hdr %pK\n",
+				pkt, res_hdr);
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_release_resource_packet);
+	pkt->packet_type = HFI_CMD_SYS_RELEASE_RESOURCE;
+	pkt->resource_handle = hash32_ptr(res_hdr->resource_handle);
+
+	switch (res_hdr->resource_id) {
+	case CVP_RESOURCE_SYSCACHE:
+		pkt->resource_type = HFI_RESOURCE_SYSCACHE;
+		break;
+	default:
+		dprintk(CVP_ERR,
+			 "Invalid resource_id %d\n", res_hdr->resource_id);
+		rc = -ENOTSUPP;
+	}
+
+	dprintk(CVP_PKT,
+		"rel_res: pkt_type 0x%x res_type 0x%x prepared\n",
+		pkt->packet_type, pkt->resource_type);
+
+	return rc;
+}
+
+inline int cvp_create_pkt_cmd_sys_session_init(
+		struct cvp_hfi_cmd_sys_session_init_packet *pkt,
+		struct cvp_hal_session *session)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst = session->session_id;
+
+	if (!pkt || !inst)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_session_init_packet);
+	pkt->packet_type = HFI_CMD_SYS_SESSION_INIT;
+	pkt->session_id = hash32_ptr(session);
+	pkt->session_type = inst->prop.type;
+	pkt->session_kmask = inst->prop.kernel_mask;
+	pkt->session_prio = inst->prop.priority;
+	pkt->is_secure = inst->prop.is_secure;
+	pkt->dsp_ac_mask = inst->prop.dsp_mask;
+
+	return rc;
+}
+
+static int create_pkt_cmd_sys_ubwc_config(
+		struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+		struct msm_cvp_ubwc_config_data *ubwc_config)
+{
+	int rc = 0;
+	struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type *hfi;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type)
+		+ sizeof(u32);
+
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_UBWC_CONFIG;
+	hfi = (struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type *)
+		&pkt->rg_property_data[1];
+
+	hfi->max_channels = ubwc_config->max_channels;
+	hfi->override_bit_info.max_channel_override =
+		ubwc_config->override_bit_info.max_channel_override;
+
+	hfi->mal_length = ubwc_config->mal_length;
+	hfi->override_bit_info.mal_length_override =
+		ubwc_config->override_bit_info.mal_length_override;
+
+	hfi->highest_bank_bit = ubwc_config->highest_bank_bit;
+	hfi->override_bit_info.hb_override =
+		ubwc_config->override_bit_info.hb_override;
+
+	hfi->bank_swzl_level = ubwc_config->bank_swzl_level;
+	hfi->override_bit_info.bank_swzl_level_override =
+		ubwc_config->override_bit_info.bank_swzl_level_override;
+
+	hfi->bank_spreading = ubwc_config->bank_spreading;
+	hfi->override_bit_info.bank_spreading_override =
+		ubwc_config->override_bit_info.bank_spreading_override;
+	hfi->size = sizeof(struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type);
+
+	return rc;
+}
+
+int cvp_create_pkt_cmd_session_cmd(struct cvp_hal_session_cmd_pkt *pkt,
+			int pkt_type, struct cvp_hal_session *session)
+{
+	int rc = 0;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct cvp_hal_session_cmd_pkt);
+	pkt->packet_type = pkt_type;
+	pkt->session_id = hash32_ptr(session);
+
+	return rc;
+}
+
+int cvp_create_pkt_cmd_sys_power_control(
+	struct cvp_hfi_cmd_sys_set_property_packet *pkt, u32 enable)
+{
+	struct cvp_hfi_enable *hfi;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "No input packet\n");
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+		sizeof(struct cvp_hfi_enable) + sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
+	hfi = (struct cvp_hfi_enable *) &pkt->rg_property_data[1];
+	hfi->enable = enable;
+	return 0;
+}
+
+int cvp_create_pkt_cmd_session_set_buffers(
+		void *cmd,
+		struct cvp_hal_session *session,
+		u32 iova,
+		u32 size)
+{
+	int rc = 0;
+	struct cvp_hfi_cmd_session_set_buffers_packet *pkt;
+
+	if (!cmd || !session)
+		return -EINVAL;
+
+	pkt = (struct cvp_hfi_cmd_session_set_buffers_packet *)cmd;
+	pkt->packet_type = HFI_CMD_SESSION_CVP_SET_BUFFERS;
+	pkt->session_id = hash32_ptr(session);
+	pkt->buf_type.iova = iova;
+	pkt->buf_type.size = size;
+	pkt->size = sizeof(struct cvp_hfi_cmd_session_set_buffers_packet);
+
+	return rc;
+}
+
+int cvp_create_pkt_cmd_session_release_buffers(
+		void *cmd,
+		struct cvp_hal_session *session)
+{
+	struct cvp_session_release_buffers_packet *pkt;
+
+	if (!cmd || !session || session == (void *)0xdeadbeef)
+		return -EINVAL;
+
+	pkt = (struct cvp_session_release_buffers_packet *)cmd;
+	pkt->packet_type = HFI_CMD_SESSION_CVP_RELEASE_BUFFERS;
+	pkt->session_id = hash32_ptr(session);
+	pkt->num_buffers = 1;
+	pkt->buffer_type = 0;
+	pkt->size = sizeof(struct cvp_session_release_buffers_packet) +
+			((pkt->num_buffers - 1) * sizeof(u32));
+
+	return 0;
+}
+
+int cvp_create_pkt_cmd_session_send(
+		struct eva_kmd_hfi_packet *out_pkt,
+		struct cvp_hal_session *session,
+		struct eva_kmd_hfi_packet *in_pkt)
+{
+	int def_idx;
+	struct cvp_hal_session_cmd_pkt *ptr =
+		(struct cvp_hal_session_cmd_pkt *)in_pkt;
+
+	if (!out_pkt || !in_pkt || !session)
+		return -EINVAL;
+
+	if (ptr->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int))
+		goto error_hfi_packet;
+
+	if (ptr->session_id != hash32_ptr(session))
+		goto error_hfi_packet;
+
+	def_idx = get_pkt_index(ptr);
+	if (def_idx < 0) {
+		memcpy(out_pkt, in_pkt, ptr->size);
+		return 0;
+	}
+
+	if (cvp_hfi_defs[def_idx].type != ptr->packet_type)
+		goto error_hfi_packet;
+
+	memcpy(out_pkt, in_pkt, ptr->size);
+
+	return 0;
+
+error_hfi_packet:
+	dprintk(CVP_ERR, "%s incorrect packet: size=%d type=%d sessionid=%d\n",
+		__func__, ptr->size, ptr->packet_type, ptr->session_id);
+
+	return -EINVAL;
+}
+
+static int get_hfi_ssr_type(enum hal_ssr_trigger_type type)
+{
+	int rc = HFI_TEST_SSR_HW_WDOG_IRQ;
+
+	switch (type) {
+	case SSR_ERR_FATAL:
+		rc = HFI_TEST_SSR_SW_ERR_FATAL;
+		break;
+	case SSR_SW_DIV_BY_ZERO:
+		rc = HFI_TEST_SSR_SW_DIV_BY_ZERO;
+		break;
+	case SSR_HW_WDOG_IRQ:
+		rc = HFI_TEST_SSR_HW_WDOG_IRQ;
+		break;
+	default:
+		dprintk(CVP_WARN,
+			"SSR trigger type not recognized, using WDOG.\n");
+	}
+	return rc;
+}
+
+int cvp_create_pkt_ssr_cmd(enum hal_ssr_trigger_type type,
+		struct cvp_hfi_cmd_sys_test_ssr_packet *pkt)
+{
+	if (!pkt) {
+		dprintk(CVP_ERR, "Invalid params, device: %pK\n", pkt);
+		return -EINVAL;
+	}
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_test_ssr_packet);
+	pkt->packet_type = HFI_CMD_SYS_TEST_SSR;
+	pkt->trigger_type = get_hfi_ssr_type(type);
+	return 0;
+}
+
+int cvp_create_pkt_cmd_sys_image_version(
+		struct cvp_hfi_cmd_sys_get_property_packet *pkt)
+{
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s invalid param :%pK\n", __func__, pkt);
+		return -EINVAL;
+	}
+	pkt->size = sizeof(struct cvp_hfi_cmd_sys_get_property_packet);
+	pkt->packet_type = HFI_CMD_SYS_GET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
+	return 0;
+}
+
+static struct cvp_hfi_packetization_ops hfi_default = {
+	.sys_init = cvp_create_pkt_cmd_sys_init,
+	.sys_pc_prep = cvp_create_pkt_cmd_sys_pc_prep,
+	.sys_power_control = cvp_create_pkt_cmd_sys_power_control,
+	.sys_set_resource = cvp_create_pkt_cmd_sys_set_resource,
+	.sys_debug_config = cvp_create_pkt_cmd_sys_debug_config,
+	.sys_coverage_config = cvp_create_pkt_cmd_sys_coverage_config,
+	.sys_set_idle_indicator = cvp_create_pkt_cmd_sys_set_idle_indicator,
+	.sys_release_resource = cvp_create_pkt_cmd_sys_release_resource,
+	.sys_image_version = cvp_create_pkt_cmd_sys_image_version,
+	.sys_ubwc_config = create_pkt_cmd_sys_ubwc_config,
+	.ssr_cmd = cvp_create_pkt_ssr_cmd,
+	.session_init = cvp_create_pkt_cmd_sys_session_init,
+	.session_cmd = cvp_create_pkt_cmd_session_cmd,
+	.session_set_buffers =
+		cvp_create_pkt_cmd_session_set_buffers,
+	.session_release_buffers =
+		cvp_create_pkt_cmd_session_release_buffers,
+	.session_send = cvp_create_pkt_cmd_session_send,
+};
+
+struct cvp_hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
+			enum hfi_packetization_type type)
+{
+	dprintk(CVP_HFI, "%s selected\n",
+		type == HFI_PACKETIZATION_4XX ?
+		"4xx packetization" : "Unknown hfi");
+
+	switch (type) {
+	case HFI_PACKETIZATION_4XX:
+		return &hfi_default;
+	}
+
+	return NULL;
+}

+ 75 - 0
qcom/opensource/eva-kernel/msm/eva/hfi_packetization.h

@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+#ifndef __HFI_PACKETIZATION__
+#define __HFI_PACKETIZATION__
+
+#include <linux/types.h>
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi.h"
+#include "cvp_hfi_api.h"
+
+#define call_hfi_pkt_op(q, op, ...)			\
+	(((q) && (q)->pkt_ops && (q)->pkt_ops->op) ?	\
+	((q)->pkt_ops->op(__VA_ARGS__)) : 0)
+
+enum hfi_packetization_type {
+	HFI_PACKETIZATION_4XX,
+};
+
+struct cvp_hfi_packetization_ops {
+	int (*sys_init)(struct cvp_hfi_cmd_sys_init_packet *pkt, u32 arch_type);
+	int (*sys_pc_prep)(struct cvp_hfi_cmd_sys_pc_prep_packet *pkt);
+	int (*sys_power_control)(
+		struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+		u32 enable);
+	int (*sys_set_resource)(
+		struct cvp_hfi_cmd_sys_set_resource_packet *pkt,
+		struct cvp_resource_hdr *resource_hdr,
+		void *resource_value);
+	int (*sys_debug_config)(struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+			u32 mode);
+	int (*sys_coverage_config)(
+			struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+			u32 mode);
+	int (*sys_set_idle_indicator)(
+		struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+		u32 mode);
+	int (*sys_release_resource)(
+		struct cvp_hfi_cmd_sys_release_resource_packet *pkt,
+		struct cvp_resource_hdr *resource_hdr);
+	int (*sys_image_version)(
+			struct cvp_hfi_cmd_sys_get_property_packet *pkt);
+	int (*sys_ubwc_config)(struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+		struct msm_cvp_ubwc_config_data *ubwc_config);
+	int (*ssr_cmd)(enum hal_ssr_trigger_type type,
+		struct cvp_hfi_cmd_sys_test_ssr_packet *pkt);
+	int (*session_init)(
+		struct cvp_hfi_cmd_sys_session_init_packet *pkt,
+		struct cvp_hal_session *session);
+	int (*session_cmd)(struct cvp_hal_session_cmd_pkt *pkt,
+		int pkt_type, struct cvp_hal_session *session);
+	int (*session_set_buffers)(
+		void *pkt,
+		struct cvp_hal_session *session,
+		u32 iova,
+		u32 size);
+	int (*session_release_buffers)(
+		void *pkt,
+		struct cvp_hal_session *session);
+	int (*session_get_buf_req)(
+		struct cvp_hfi_cmd_session_get_property_packet *pkt,
+		struct cvp_hal_session *session);
+	int (*session_sync_process)(
+		struct cvp_hfi_cmd_session_sync_process_packet *pkt,
+		struct cvp_hal_session *session);
+	int (*session_send)(
+			struct eva_kmd_hfi_packet *out_pkt,
+			struct cvp_hal_session *session,
+			struct eva_kmd_hfi_packet *in_pkt);
+};
+
+struct cvp_hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
+		enum hfi_packetization_type);
+#endif

+ 748 - 0
qcom/opensource/eva-kernel/msm/eva/hfi_response_handler.c

@@ -0,0 +1,748 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/hash.h>
+#include <linux/soc/qcom/smem.h>
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_io.h"
+#include "msm_cvp_debug.h"
+#include "cvp_hfi.h"
+#include "msm_cvp_common.h"
+
+extern struct msm_cvp_drv *cvp_driver;
+
+static enum cvp_status hfi_map_err_status(u32 hfi_err)
+{
+	enum cvp_status cvp_err;
+
+	switch (hfi_err) {
+	case HFI_ERR_NONE:
+		cvp_err = CVP_ERR_NONE;
+		break;
+	case HFI_ERR_SYS_FATAL:
+		cvp_err = CVP_ERR_HW_FATAL;
+		break;
+	case HFI_ERR_SYS_NOC_ERROR:
+		cvp_err = CVP_ERR_NOC_ERROR;
+		break;
+	case HFI_ERR_SYS_VERSION_MISMATCH:
+	case HFI_ERR_SYS_INVALID_PARAMETER:
+	case HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE:
+	case HFI_ERR_SESSION_INVALID_PARAMETER:
+	case HFI_ERR_SESSION_INVALID_SESSION_ID:
+	case HFI_ERR_SESSION_INVALID_STREAM_ID:
+		cvp_err = CVP_ERR_BAD_PARAM;
+		break;
+	case HFI_ERR_SYS_INSUFFICIENT_RESOURCES:
+	case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY:
+	case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+	case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES:
+	case HFI_ERR_SESSION_UNSUPPORTED_STREAM:
+		cvp_err = CVP_ERR_NOT_SUPPORTED;
+		break;
+	case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
+		cvp_err = CVP_ERR_MAX_CLIENTS;
+		break;
+	case HFI_ERR_SYS_SESSION_IN_USE:
+		cvp_err = CVP_ERR_CLIENT_PRESENT;
+		break;
+	case HFI_ERR_SESSION_FATAL:
+		cvp_err = CVP_ERR_CLIENT_FATAL;
+		break;
+	case HFI_ERR_SESSION_BAD_POINTER:
+		cvp_err = CVP_ERR_BAD_PARAM;
+		break;
+	case HFI_ERR_SESSION_INCORRECT_STATE_OPERATION:
+		cvp_err = CVP_ERR_BAD_STATE;
+		break;
+	default:
+		cvp_err = CVP_ERR_FAIL;
+		break;
+	}
+	return cvp_err;
+}
+
+static int hfi_process_sys_error(u32 device_id,
+	struct cvp_hfi_msg_event_notify_packet *pkt,
+	struct msm_cvp_cb_info *info)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	cmd_done.device_id = device_id;
+	cmd_done.status = hfi_map_err_status(pkt->event_data1);
+
+	info->response_type = HAL_SYS_ERROR;
+	info->response.cmd = cmd_done;
+	dprintk(CVP_ERR, "Received FW sys error %#x\n", pkt->event_data1);
+
+	return 0;
+}
+
+static int hfi_process_session_error(u32 device_id,
+		struct cvp_hfi_msg_event_notify_packet *pkt,
+		struct msm_cvp_cb_info *info)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->event_data1);
+	cmd_done.size = pkt->event_data2;
+	info->response.cmd = cmd_done;
+	dprintk(CVP_WARN, "Received: SESSION_ERROR with event data 1 2: %#x %#x\n",
+		pkt->event_data1, pkt->event_data2);
+	switch (pkt->event_data1) {
+	/* Ignore below errors */
+	case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
+	case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
+		dprintk(CVP_INFO, "Non Fatal: HFI_EVENT_SESSION_ERROR\n");
+		info->response_type = HAL_RESPONSE_UNUSED;
+		break;
+	default:
+		dprintk(CVP_ERR,
+			"%s: session %x id %#x, data1 %#x, data2 %#x\n",
+			__func__, pkt->session_id, pkt->event_id,
+			pkt->event_data1, pkt->event_data2);
+		info->response_type = HAL_SESSION_ERROR;
+		break;
+	}
+
+	return 0;
+}
+
+static int hfi_process_event_notify(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_event_notify_packet *pkt =
+			(struct cvp_hfi_msg_event_notify_packet *)hdr;
+
+	dprintk(CVP_HFI, "Received: EVENT_NOTIFY\n");
+
+	if (pkt->size < sizeof(struct cvp_hfi_msg_event_notify_packet)) {
+		dprintk(CVP_ERR, "Invalid Params\n");
+		return -E2BIG;
+	}
+
+	switch (pkt->event_id) {
+	case HFI_EVENT_SYS_ERROR:
+		dprintk(CVP_ERR, "HFI_EVENT_SYS_ERROR: %d, %#x\n",
+			pkt->event_data1, pkt->event_data2);
+		return hfi_process_sys_error(device_id, pkt, info);
+
+	case HFI_EVENT_SESSION_ERROR:
+		return hfi_process_session_error(device_id, pkt, info);
+
+	default:
+		*info = (struct msm_cvp_cb_info) {
+			.response_type =  HAL_RESPONSE_UNUSED,
+		};
+
+		return 0;
+	}
+}
+
+static int hfi_process_sys_init_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_init_done_packet *pkt =
+			(struct cvp_hfi_msg_sys_init_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	enum cvp_status status = CVP_ERR_NONE;
+
+	dprintk(CVP_CORE, "RECEIVED: SYS_INIT_DONE\n");
+	if (sizeof(struct cvp_hfi_msg_sys_init_done_packet) > pkt->size) {
+		dprintk(CVP_ERR, "%s: bad_pkt_size: %d\n", __func__,
+				pkt->size);
+		return -E2BIG;
+	}
+	if (!pkt->num_properties) {
+		dprintk(CVP_CORE,
+				"hal_process_sys_init_done: no_properties\n");
+		goto err_no_prop;
+	}
+
+	status = hfi_map_err_status(pkt->error_type);
+	if (status) {
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, status, pkt->packet_type, pkt->error_type);
+		goto err_no_prop;
+	}
+
+err_no_prop:
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = NULL;
+	cmd_done.status = (u32)status;
+	cmd_done.size = sizeof(struct cvp_hal_sys_init_done);
+
+	info->response_type = HAL_SYS_INIT_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+enum cvp_status cvp_hfi_process_sys_init_done_prop_read(
+	struct cvp_hfi_msg_sys_init_done_packet *pkt,
+	struct cvp_hal_sys_init_done *sys_init_done)
+{
+	enum cvp_status status = CVP_ERR_NONE;
+	u32 rem_bytes, num_properties;
+	u8 *data_ptr;
+
+	if (!pkt || !sys_init_done) {
+		dprintk(CVP_ERR,
+			"hfi_msg_sys_init_done: Invalid input\n");
+		return CVP_ERR_FAIL;
+	}
+
+	rem_bytes = pkt->size - sizeof(struct
+			cvp_hfi_msg_sys_init_done_packet) + sizeof(u32);
+
+	if (!rem_bytes) {
+		dprintk(CVP_ERR,
+			"hfi_msg_sys_init_done: missing_prop_info\n");
+		return CVP_ERR_FAIL;
+	}
+
+	status = hfi_map_err_status(pkt->error_type);
+	if (status) {
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, status, pkt->packet_type, pkt->error_type);
+		return status;
+	}
+
+	data_ptr = (u8 *) &pkt->rg_property_data[0];
+	num_properties = pkt->num_properties;
+	dprintk(CVP_HFI,
+		"%s: data_start %pK, num_properties %#x\n",
+		__func__, data_ptr, num_properties);
+
+	sys_init_done->capabilities = NULL;
+	return status;
+}
+
+static int hfi_process_session_init_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_init_done_packet *pkt =
+			(struct cvp_hfi_msg_sys_session_init_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	struct cvp_hal_session_init_done session_init_done = { {0} };
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_INIT_DONE[%x]\n", pkt->session_id);
+
+	if (sizeof(struct cvp_hfi_msg_sys_session_init_done_packet)
+			> pkt->size) {
+		dprintk(CVP_ERR,
+			"hal_process_session_init_done: bad_pkt_size\n");
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	if (cmd_done.status)
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+	cmd_done.data.session_init_done = session_init_done;
+	cmd_done.size = sizeof(struct cvp_hal_session_init_done);
+
+	info->response_type = HAL_SESSION_INIT_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+static int hfi_process_session_end_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_end_done_packet *pkt =
+			(struct cvp_hfi_msg_sys_session_end_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_END_DONE[%#x]\n", pkt->session_id);
+
+	if (!pkt || pkt->size !=
+		sizeof(struct cvp_hfi_msg_sys_session_end_done_packet)) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size\n", __func__);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	if (cmd_done.status)
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_END_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_abort_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_abort_done_packet *pkt =
+		(struct cvp_hfi_msg_sys_session_abort_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_ABORT_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (!pkt || pkt->size !=
+		sizeof(struct cvp_hfi_msg_sys_session_abort_done_packet)) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size: %d\n",
+				__func__, pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	if (cmd_done.status)
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_ABORT_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_set_buf_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_session_hdr *pkt =
+			(struct cvp_hfi_msg_session_hdr *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	unsigned int pkt_size = get_msg_size(pkt);
+
+	if (!pkt || pkt->size < pkt_size) {
+		dprintk(CVP_ERR, "bad packet/packet size %d\n",
+				pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	dprintk(CVP_SESS, "RECEIVED:CVP_SET_BUFFER_DONE[%#x]\n",
+			pkt->session_id);
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)get_msg_session_id(pkt);
+	cmd_done.status = hfi_map_err_status(get_msg_errorcode(pkt));
+	if (cmd_done.status)
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_SET_BUFFER_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_flush_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_ctrl_done_packet *pkt =
+		(struct cvp_hfi_msg_sys_session_ctrl_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_FLUSH_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (!pkt || pkt->size <
+		sizeof(struct cvp_hfi_msg_sys_session_ctrl_done_packet)) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size: %d\n",
+				__func__, pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	if (cmd_done.status)
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_FLUSH_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_start_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_ctrl_done_packet *pkt =
+		(struct cvp_hfi_msg_sys_session_ctrl_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_START_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (!pkt || pkt->size <
+		sizeof(struct cvp_hfi_msg_sys_session_ctrl_done_packet)) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size: %d\n",
+				__func__, pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	if (cmd_done.status)
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_START_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_stop_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_session_ctrl_done_packet *pkt =
+		(struct cvp_hfi_msg_sys_session_ctrl_done_packet *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_STOP_DONE[%#x]\n",
+			pkt->session_id);
+
+	if (!pkt || pkt->size <
+		sizeof(struct cvp_hfi_msg_sys_session_ctrl_done_packet)) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size: %d\n",
+				__func__, pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	if (cmd_done.status)
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_STOP_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+
+static int hfi_process_session_rel_buf_done(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_session_hdr *pkt =
+			(struct cvp_hfi_msg_session_hdr *)hdr;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	unsigned int pkt_size = get_msg_size(pkt);
+
+	if (!pkt || pkt->size < pkt_size) {
+		dprintk(CVP_ERR, "bad packet/packet size %d\n",
+				pkt ? pkt->size : 0);
+		return -E2BIG;
+	}
+	dprintk(CVP_SESS, "RECEIVED:CVP_RELEASE_BUFFER_DONE[%#x]\n",
+			pkt->session_id);
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)get_msg_session_id(pkt);
+	cmd_done.status = hfi_map_err_status(get_msg_errorcode(pkt));
+	if (cmd_done.status)
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_RELEASE_BUFFER_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
+	unsigned int session_id)
+{
+	struct msm_cvp_inst *inst = NULL;
+	bool match = false;
+	int count = 0;
+
+	if (!core || !session_id)
+		return NULL;
+
+retry:
+	if (mutex_trylock(&core->lock)) {
+		list_for_each_entry(inst, &core->instances, list) {
+			if (hash32_ptr(inst->session) == session_id) {
+				match = true;
+				break;
+			}
+		}
+
+		inst = match && kref_get_unless_zero(&inst->kref) ? inst : NULL;
+		mutex_unlock(&core->lock);
+	} else {
+		if (core->state == CVP_CORE_UNINIT)
+			return NULL;
+		usleep_range(100, 200);
+		count++;
+		if (count < 1000)
+			goto retry;
+		else
+			dprintk(CVP_ERR, "timeout locking core mutex\n");
+	}
+
+	return inst;
+
+}
+
+static int hfi_process_session_dump_notify(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct msm_cvp_inst *inst = NULL;
+	struct msm_cvp_core *core;
+	struct cvp_session_prop *session_prop;
+	unsigned int session_id;
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	struct cvp_hfi_dumpmsg_session_hdr *pkt =
+			(struct cvp_hfi_dumpmsg_session_hdr *)hdr;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size > sizeof(struct cvp_hfi_dumpmsg_session_hdr)) {
+		dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
+		return -E2BIG;
+	}
+	session_id = get_msg_session_id(pkt);
+	core = cvp_driver->cvp_core;
+	inst = cvp_get_inst_from_id(core, session_id);
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+	session_prop = &inst->prop;
+	session_prop->dump_offset = pkt->dump_offset;
+	session_prop->dump_size = pkt->dump_size;
+
+	dprintk(CVP_SESS, "RECEIVED: SESSION_DUMP[%x]\n", session_id);
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	if (cmd_done.status)
+		dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+			__func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+	cmd_done.size = 0;
+
+	info->response_type = HAL_SESSION_DUMP_NOTIFY;
+	info->response.cmd = cmd_done;
+
+	cvp_put_inst(inst);
+	return 0;
+}
+
+static int hfi_process_session_cvp_msg(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_session_hdr *pkt =
+			(struct cvp_hfi_msg_session_hdr *)hdr;
+	struct cvp_session_msg *sess_msg;
+	struct msm_cvp_inst *inst = NULL;
+	struct msm_cvp_core *core;
+	unsigned int session_id;
+	struct cvp_session_queue *sq;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int)) {
+		dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
+		return -E2BIG;
+	}
+	session_id = get_msg_session_id(pkt);
+	core = cvp_driver->cvp_core;
+	inst = cvp_get_inst_from_id(core, session_id);
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	if (pkt->client_data.kdata & FENCE_BIT)
+		sq = &inst->session_queue_fence;
+	else
+		sq = &inst->session_queue;
+
+	sess_msg = cvp_kmem_cache_zalloc(&cvp_driver->msg_cache, GFP_KERNEL);
+	if (sess_msg == NULL) {
+		dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__);
+		goto error_no_mem;
+	}
+
+	memcpy(&sess_msg->pkt, pkt, get_msg_size(pkt));
+
+	dprintk(CVP_HFI,
+		"%s: Received msg %x cmd_done.status=%d sessionid=%x\n",
+		__func__, pkt->packet_type,
+		hfi_map_err_status(get_msg_errorcode(pkt)), session_id);
+
+	spin_lock(&sq->lock);
+	if (sq->msg_count >= MAX_NUM_MSGS_PER_SESSION) {
+		dprintk(CVP_ERR, "Reached session queue size limit\n");
+		goto error_handle_msg;
+	}
+	list_add_tail(&sess_msg->node, &sq->msgs);
+	sq->msg_count++;
+	spin_unlock(&sq->lock);
+
+	wake_up_all(&sq->wq);
+
+	info->response_type = HAL_NO_RESP;
+
+	cvp_put_inst(inst);
+	return 0;
+
+error_handle_msg:
+	spin_unlock(&sq->lock);
+	cvp_kmem_cache_free(&cvp_driver->msg_cache, sess_msg);
+error_no_mem:
+	cvp_put_inst(inst);
+	return -ENOMEM;
+}
+
+static void hfi_process_sys_get_prop_image_version(
+		struct cvp_hfi_msg_sys_property_info_packet *pkt)
+{
+	int i = 0;
+	const u32 version_string_size = 128;
+	u8 *str_image_version;
+	int req_bytes;
+
+	req_bytes = pkt->size - sizeof(*pkt);
+	if (req_bytes < (signed int)version_string_size ||
+			!pkt->rg_property_data[1] ||
+			pkt->num_properties > 1) {
+		dprintk(CVP_ERR, "%s: bad_pkt: %d\n", __func__, req_bytes);
+		return;
+	}
+	str_image_version = (u8 *)&pkt->rg_property_data[1];
+	/*
+	 * The version string returned by firmware includes null
+	 * characters at the start and in between. Replace the null
+	 * characters with space, to print the version info.
+	 */
+	for (i = 0; i < version_string_size; i++) {
+		if (str_image_version[i] != '\0')
+			cvp_driver->fw_version[i] = str_image_version[i];
+		else
+			cvp_driver->fw_version[i] = ' ';
+	}
+	cvp_driver->fw_version[i - 1] = '\0';
+	dprintk(CVP_HFI, "F/W version: %s\n", cvp_driver->fw_version);
+}
+
+static int hfi_process_sys_property_info(u32 device_id,
+		void *hdr, struct msm_cvp_cb_info *info)
+{
+	struct cvp_hfi_msg_sys_property_info_packet *pkt =
+			(struct cvp_hfi_msg_sys_property_info_packet *)hdr;
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size > sizeof(*pkt)) {
+		dprintk(CVP_ERR,
+				"%s: bad_pkt_size %d\n", __func__, pkt->size);
+		return -E2BIG;
+	} else if (!pkt->num_properties) {
+		dprintk(CVP_WARN,
+				"%s: no_properties\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (pkt->rg_property_data[0]) {
+	case HFI_PROPERTY_SYS_IMAGE_VERSION:
+		hfi_process_sys_get_prop_image_version(pkt);
+
+		*info = (struct msm_cvp_cb_info) {
+			.response_type =  HAL_RESPONSE_UNUSED,
+		};
+		return 0;
+	default:
+		dprintk(CVP_WARN,
+				"%s: unknown_prop_id: %x\n",
+				__func__, pkt->rg_property_data[0]);
+		return -ENOTSUPP;
+	}
+
+}
+
+int cvp_hfi_process_msg_packet(u32 device_id, void *hdr,
+			struct msm_cvp_cb_info *info)
+{
+	typedef int (*pkt_func_def)(u32, void *, struct msm_cvp_cb_info *info);
+	pkt_func_def pkt_func = NULL;
+	struct cvp_hal_msg_pkt_hdr *msg_hdr = (struct cvp_hal_msg_pkt_hdr *)hdr;
+
+	if (!info || !msg_hdr || msg_hdr->size < CVP_IFACEQ_MIN_PKT_SIZE) {
+		dprintk(CVP_ERR, "%s: bad packet/packet size\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_HFI, "Received HFI MSG with type %#x\n", msg_hdr->packet);
+	switch (msg_hdr->packet) {
+	case HFI_MSG_EVENT_NOTIFY:
+		pkt_func = (pkt_func_def)hfi_process_event_notify;
+		break;
+	case  HFI_MSG_SYS_INIT_DONE:
+		pkt_func = (pkt_func_def)hfi_process_sys_init_done;
+		break;
+	case HFI_MSG_SYS_SESSION_INIT_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_init_done;
+		break;
+	case HFI_MSG_SYS_PROPERTY_INFO:
+		pkt_func = (pkt_func_def)hfi_process_sys_property_info;
+		break;
+	case HFI_MSG_SYS_SESSION_END_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_end_done;
+		break;
+	case HFI_MSG_SESSION_CVP_SET_BUFFERS:
+		pkt_func = (pkt_func_def) hfi_process_session_set_buf_done;
+		break;
+	case HFI_MSG_SESSION_CVP_RELEASE_BUFFERS:
+		pkt_func = (pkt_func_def)hfi_process_session_rel_buf_done;
+		break;
+	case HFI_MSG_SYS_SESSION_ABORT_DONE:
+		pkt_func = (pkt_func_def)hfi_process_session_abort_done;
+		break;
+	case HFI_MSG_SESSION_CVP_FLUSH:
+		pkt_func = (pkt_func_def)hfi_process_session_flush_done;
+		break;
+	case HFI_MSG_SESSION_EVA_START:
+		pkt_func = (pkt_func_def)hfi_process_session_start_done;
+		break;
+	case HFI_MSG_SESSION_EVA_STOP:
+		pkt_func = (pkt_func_def)hfi_process_session_stop_done;
+		break;
+	case HFI_MSG_EVENT_NOTIFY_SNAPSHOT_READY:
+		pkt_func = (pkt_func_def)hfi_process_session_dump_notify;
+		break;
+	default:
+		dprintk(CVP_HFI, "Use default msg handler: %#x\n",
+				msg_hdr->packet);
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_msg;
+		break;
+	}
+
+	return pkt_func ?
+		pkt_func(device_id, hdr, info) : -ENOTSUPP;
+}

+ 1708 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp.c

@@ -0,0 +1,1708 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "msm_cvp.h"
+#include "cvp_hfi.h"
+#include "cvp_core_hfi.h"
+#include "msm_cvp_buf.h"
+#include "cvp_comm_def.h"
+#include "cvp_power.h"
+#include "cvp_hfi_api.h"
+static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
+	struct eva_kmd_hfi_packet *in_pkt,
+	unsigned int in_offset,
+	unsigned int in_buf_num);
+
+int msm_cvp_get_session_info(struct msm_cvp_inst *inst, u32 *session)
+{
+	int rc = 0;
+	struct msm_cvp_inst *s;
+
+	if (!inst || !inst->core || !session) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	*session = hash32_ptr(inst->session);
+	dprintk(CVP_SESS, "%s: id 0x%x\n", __func__, *session);
+
+	cvp_put_inst(s);
+	return rc;
+}
+
+
+
+static bool cvp_msg_pending(struct cvp_session_queue *sq,
+				struct cvp_session_msg **msg, u64 *ktid)
+{
+	struct cvp_session_msg *mptr = NULL, *dummy;
+	bool result = false;
+
+	if (!sq)
+		return false;
+	spin_lock(&sq->lock);
+	if (sq->state == QUEUE_INIT || sq->state == QUEUE_INVALID) {
+		/* The session is being deleted */
+		spin_unlock(&sq->lock);
+		*msg = NULL;
+		return true;
+	}
+	result = list_empty(&sq->msgs);
+	if (!result) {
+		mptr = list_first_entry(&sq->msgs,
+				struct cvp_session_msg,
+				node);
+		if (!ktid) {
+			if (mptr) {
+				list_del_init(&mptr->node);
+				sq->msg_count--;
+			}
+		} else {
+			result = true;
+			list_for_each_entry_safe(mptr, dummy, &sq->msgs, node) {
+				if (*ktid == mptr->pkt.client_data.kdata) {
+					list_del_init(&mptr->node);
+					sq->msg_count--;
+					result = false;
+					break;
+				}
+			}
+			if (result)
+				mptr = NULL;
+		}
+	}
+	spin_unlock(&sq->lock);
+	*msg = mptr;
+	return !result;
+}
+
+static int cvp_wait_process_message(struct msm_cvp_inst *inst,
+				struct cvp_session_queue *sq, u64 *ktid,
+				unsigned long timeout,
+				struct eva_kmd_hfi_packet *out)
+{
+	struct cvp_session_msg *msg = NULL;
+	struct cvp_hfi_msg_session_hdr *hdr;
+	int rc = 0;
+
+	if (wait_event_timeout(sq->wq,
+		cvp_msg_pending(sq, &msg, ktid), timeout) == 0) {
+		dprintk(CVP_WARN, "session queue wait timeout\n");
+		if (inst && inst->core && inst->core->dev_ops &&
+				inst->state != MSM_CVP_CORE_INVALID)
+			print_hfi_queue_info(inst->core->dev_ops);
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+	if (msg == NULL) {
+		dprintk(CVP_WARN, "%s: queue state %d, msg cnt %d\n", __func__,
+					sq->state, sq->msg_count);
+
+		if (inst->state >= MSM_CVP_CLOSE_DONE ||
+				(sq->state != QUEUE_ACTIVE &&
+				sq->state != QUEUE_START)) {
+			rc = -ECONNRESET;
+			goto exit;
+		}
+
+		msm_cvp_comm_kill_session(inst);
+		goto exit;
+	}
+
+	if (!out) {
+		cvp_kmem_cache_free(&cvp_driver->msg_cache, msg);
+		goto exit;
+	}
+
+	hdr = (struct cvp_hfi_msg_session_hdr *)&msg->pkt;
+	memcpy(out, &msg->pkt, get_msg_size(hdr));
+	if (hdr->client_data.kdata >= ARRAY_SIZE(cvp_hfi_defs))
+		msm_cvp_unmap_frame(inst, hdr->client_data.kdata);
+	cvp_kmem_cache_free(&cvp_driver->msg_cache, msg);
+
+exit:
+	return rc;
+}
+
+static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
+			struct eva_kmd_hfi_packet *out_pkt)
+{
+	unsigned long wait_time;
+	struct cvp_session_queue *sq;
+	struct msm_cvp_inst *s;
+	int rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	wait_time = msecs_to_jiffies(
+		inst->core->resources.msm_cvp_hw_rsp_timeout);
+	sq = &inst->session_queue;
+
+	rc = cvp_wait_process_message(inst, sq, NULL, wait_time, out_pkt);
+
+	cvp_put_inst(inst);
+	return rc;
+}
+
+static int msm_cvp_session_process_hfi(
+	struct msm_cvp_inst *inst,
+	struct eva_kmd_hfi_packet *in_pkt,
+	unsigned int in_offset,
+	unsigned int in_buf_num)
+{
+	int pkt_idx, rc = 0;
+
+	unsigned int offset = 0, buf_num = 0, signal;
+	struct cvp_session_queue *sq;
+	struct msm_cvp_inst *s;
+	struct cvp_hfi_cmd_session_hdr *pkt_hdr;
+	bool is_config_pkt;
+
+
+	if (!inst || !inst->core || !in_pkt) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (inst->state == MSM_CVP_CORE_INVALID) {
+		dprintk(CVP_ERR, "sess %pK INVALIDim reject new HFIs\n", inst);
+		return -ECONNRESET;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	pkt_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	dprintk(CVP_CMD, "%s: "
+		"pkt_type %08x sess_id %08x trans_id %u ktid %llu\n",
+		__func__, pkt_hdr->packet_type,
+		pkt_hdr->session_id,
+		pkt_hdr->client_data.transaction_id,
+		pkt_hdr->client_data.kdata & (FENCE_BIT - 1));
+
+	pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
+	if (pkt_idx < 0) {
+		dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
+				in_pkt->pkt_data[0],
+				in_pkt->pkt_data[1]);
+		goto exit;
+	} else {
+		signal = cvp_hfi_defs[pkt_idx].resp;
+		is_config_pkt = cvp_hfi_defs[pkt_idx].is_config_pkt;
+	}
+
+	if (is_config_pkt)
+		pr_info_ratelimited(CVP_DBG_TAG "inst %pK config %s\n", "sess",
+			inst, cvp_hfi_defs[pkt_idx].name);
+
+	if (signal == HAL_NO_RESP) {
+		/* Frame packets are not allowed before session starts*/
+		sq = &inst->session_queue;
+		spin_lock(&sq->lock);
+		if ((sq->state != QUEUE_START && !is_config_pkt) ||
+			(sq->state >= QUEUE_INVALID)) {
+			/*
+			 * A init packet is allowed in case of
+			 * QUEUE_ACTIVE, QUEUE_START, QUEUE_STOP
+			 * A frame packet is only allowed in case of
+			 * QUEUE_START
+			 */
+			spin_unlock(&sq->lock);
+			dprintk(CVP_ERR, "%s: invalid queue state %d\n",
+				__func__, sq->state);
+			rc = -EINVAL;
+			goto exit;
+		}
+		spin_unlock(&sq->lock);
+	}
+
+	if (in_offset && in_buf_num) {
+		offset = in_offset;
+		buf_num = in_buf_num;
+	}
+	if (!is_buf_param_valid(buf_num, offset)) {
+		dprintk(CVP_ERR, "Incorrect buffer num and offset in cmd\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = msm_cvp_proc_oob(inst, in_pkt);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: failed to process OOB buffer", __func__);
+		goto exit;
+	}
+
+	rc = cvp_enqueue_pkt(inst, in_pkt, offset, buf_num);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to enqueue pkt, inst %pK "
+			"pkt_type %08x ktid %llu transaction_id %u\n",
+			inst, pkt_hdr->packet_type,
+			pkt_hdr->client_data.kdata,
+			pkt_hdr->client_data.transaction_id);
+	}
+
+exit:
+	cvp_put_inst(inst);
+	return rc;
+}
+
+static bool cvp_fence_wait(struct cvp_fence_queue *q,
+			struct cvp_fence_command **fence,
+			enum queue_state *state)
+{
+	struct cvp_fence_command *f;
+
+	if (!q)
+		return false;
+
+	*fence = NULL;
+
+	while (!mutex_trylock(&q->lock))
+		usleep_range(100, 200);
+	*state = q->state;
+	if (*state != QUEUE_START) {
+		mutex_unlock(&q->lock);
+		return true;
+	}
+
+	if (list_empty(&q->wait_list)) {
+		mutex_unlock(&q->lock);
+		return false;
+	}
+
+	f = list_first_entry(&q->wait_list, struct cvp_fence_command, list);
+	list_del_init(&f->list);
+	list_add_tail(&f->list, &q->sched_list);
+
+	mutex_unlock(&q->lock);
+	*fence = f;
+
+	return true;
+}
+
+static int cvp_fence_proc(struct msm_cvp_inst *inst,
+			struct cvp_fence_command *fc,
+			struct cvp_hfi_cmd_session_hdr *pkt)
+{
+	int rc = 0;
+	unsigned long timeout;
+	u64 ktid;
+	int synx_state = SYNX_STATE_SIGNALED_SUCCESS;
+	struct cvp_hfi_ops *ops_tbl;
+	struct cvp_session_queue *sq;
+	u32 hfi_err = HFI_ERR_NONE;
+	struct cvp_hfi_msg_session_hdr_ext hdr;
+	struct iris_hfi_device *device;
+
+	dprintk(CVP_SYNX, "%s %s\n", current->comm, __func__);
+
+	if (!inst || !inst->core)
+		return -EINVAL;
+
+	ops_tbl = inst->core->dev_ops;
+	sq = &inst->session_queue_fence;
+	ktid = pkt->client_data.kdata;
+
+	rc = inst->core->synx_ftbl->cvp_synx_ops(inst, CVP_INPUT_SYNX,
+			fc, &synx_state);
+	if (rc) {
+		msm_cvp_unmap_frame(inst, pkt->client_data.kdata);
+		goto exit;
+	}
+
+	rc = call_hfi_op(ops_tbl, session_send, (void *)inst->session,
+			(struct eva_kmd_hfi_packet *)pkt);
+	if (rc) {
+		dprintk(CVP_ERR, "%s %s: Failed in call_hfi_op %d, %x\n",
+			current->comm, __func__, pkt->size, pkt->packet_type);
+		synx_state = SYNX_STATE_SIGNALED_CANCEL;
+		goto exit;
+	}
+
+	timeout = msecs_to_jiffies(
+			inst->core->resources.msm_cvp_hw_rsp_timeout);
+	rc = cvp_wait_process_message(inst, sq, &ktid, timeout,
+				(struct eva_kmd_hfi_packet *)&hdr);
+
+	hfi_err = hdr.error_type;
+	if (rc) {
+		dprintk(CVP_ERR, "%s %s: cvp_wait_process_message rc %d\n",
+			current->comm, __func__, rc);
+		synx_state = SYNX_STATE_SIGNALED_CANCEL;
+		goto exit;
+	}
+	if (hfi_err == HFI_ERR_SESSION_FLUSHED) {
+		dprintk(CVP_SYNX, "%s %s: cvp_wait_process_message flushed\n",
+			current->comm, __func__);
+		synx_state = SYNX_STATE_SIGNALED_CANCEL;
+	} else if (hfi_err == HFI_ERR_SESSION_STREAM_CORRUPT) {
+		dprintk(CVP_INFO, "%s %s: cvp_wait_process_msg non-fatal %d\n",
+		current->comm, __func__, hfi_err);
+		synx_state = SYNX_STATE_SIGNALED_SUCCESS;
+	} else if (hfi_err == HFI_ERR_SESSION_HW_HANG_DETECTED) {
+		dprintk(CVP_ERR, "%s %s: cvp_wait_process_message hfi HW hang err %d\n",
+			current->comm, __func__, hfi_err);
+		synx_state = SYNX_STATE_SIGNALED_CANCEL;
+		device = ops_tbl->hfi_device_data;
+		cvp_dump_csr(device);
+	} else if (hfi_err != HFI_ERR_NONE) {
+		dprintk(CVP_ERR, "%s %s: cvp_wait_process_message hfi err %d\n",
+			current->comm, __func__, hfi_err);
+		synx_state = SYNX_STATE_SIGNALED_CANCEL;
+	}
+
+exit:
+	rc = inst->core->synx_ftbl->cvp_synx_ops(inst, CVP_OUTPUT_SYNX,
+			fc, &synx_state);
+	return rc;
+}
+
+static int cvp_alloc_fence_data(struct cvp_fence_command **f, u32 size)
+{
+	struct cvp_fence_command *fcmd;
+	int alloc_size = sizeof(struct cvp_hfi_msg_session_hdr_ext);
+
+	fcmd = kzalloc(sizeof(struct cvp_fence_command), GFP_KERNEL);
+	if (!fcmd)
+		return -ENOMEM;
+
+	alloc_size = (alloc_size >= size) ? alloc_size : size;
+	fcmd->pkt = kzalloc(alloc_size, GFP_KERNEL);
+	if (!fcmd->pkt) {
+		kfree(fcmd);
+		return -ENOMEM;
+	}
+
+	*f = fcmd;
+	return 0;
+}
+
+static void cvp_free_fence_data(struct cvp_fence_command *f)
+{
+	kfree(f->pkt);
+	f->pkt = NULL;
+	kfree(f);
+	f = NULL;
+}
+
+static int cvp_fence_thread(void *data)
+{
+	int rc = 0, num_fences;
+	struct msm_cvp_inst *inst;
+	struct cvp_fence_queue *q;
+	enum queue_state state;
+	struct cvp_fence_command *f;
+	struct cvp_hfi_cmd_session_hdr *pkt;
+	u32 *synx;
+	u64 ktid = 0;
+
+	dprintk(CVP_SYNX, "Enter %s\n", current->comm);
+
+	inst = (struct msm_cvp_inst *)data;
+	if (!inst || !inst->core || !inst->core->dev_ops) {
+		dprintk(CVP_ERR, "%s invalid inst %pK\n", current->comm, inst);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	q = &inst->fence_cmd_queue;
+
+wait:
+	dprintk(CVP_SYNX, "%s starts wait\n", current->comm);
+
+	f = NULL;
+	wait_event_interruptible(q->wq, cvp_fence_wait(q, &f, &state));
+	if (state != QUEUE_START)
+		goto exit;
+
+	if (!f) {
+		usleep_range(100, 200);
+		goto wait;
+	}
+
+	pkt = f->pkt;
+	synx = (u32 *)f->synx;
+
+	num_fences = f->num_fences - f->output_index;
+	/*
+	 * If there is output fence, go through fence path
+	 * Otherwise, go through non-fenced path
+	 */
+	if (num_fences)
+		ktid = pkt->client_data.kdata & (FENCE_BIT - 1);
+
+	dprintk(CVP_SYNX, "%s pkt type %d on ktid %llu frameID %llu\n",
+		current->comm, pkt->packet_type, ktid, f->frame_id);
+
+	rc = cvp_fence_proc(inst, f, pkt);
+
+	mutex_lock(&q->lock);
+	inst->core->synx_ftbl->cvp_release_synx(inst, f);
+	list_del_init(&f->list);
+	state = q->state;
+	mutex_unlock(&q->lock);
+
+	dprintk(CVP_SYNX, "%s done with %d ktid %llu frameID %llu rc %d\n",
+		current->comm, pkt->packet_type, ktid, f->frame_id, rc);
+
+	cvp_free_fence_data(f);
+
+	if (rc && state != QUEUE_START)
+		goto exit;
+
+	goto wait;
+
+exit:
+	dprintk(CVP_SYNX, "%s exit\n", current->comm);
+	cvp_put_inst(inst);
+	return rc;
+}
+
+static int msm_cvp_session_process_hfi_fence(struct msm_cvp_inst *inst,
+					struct eva_kmd_arg *arg)
+{
+	dprintk(CVP_WARN, "Deprecated IOCTL command %s\n", __func__);
+	return -EINVAL;
+}
+
+
+static int cvp_populate_fences( struct eva_kmd_hfi_packet *in_pkt,
+	unsigned int offset, unsigned int num, struct msm_cvp_inst *inst)
+{
+	u32 i, buf_offset, fence_cnt;
+	struct eva_kmd_fence fences[MAX_HFI_FENCE_SIZE];
+	struct cvp_fence_command *f;
+	struct cvp_hfi_cmd_session_hdr *cmd_hdr;
+	struct cvp_fence_queue *q;
+	enum op_mode mode;
+	struct cvp_buf_type *buf;
+	bool override;
+	unsigned int total_fence_count = 0;
+
+	int rc = 0;
+	cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	if (!offset || !num)
+		return 0;
+
+	if (offset < (sizeof(struct cvp_hfi_cmd_session_hdr)/sizeof(u32))) {
+		dprintk(CVP_ERR, "%s: Incorrect offset in cmd %d\n", __func__, offset);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	override = get_pkt_fenceoverride((struct cvp_hal_session_cmd_pkt*)in_pkt);
+
+	dprintk(CVP_SYNX, "%s:Fence Override is %d\n",__func__, override);
+	dprintk(CVP_SYNX, "%s:Kernel Fence is %d\n", __func__, cvp_kernel_fence_enabled);
+
+	q = &inst->fence_cmd_queue;
+
+	mutex_lock(&q->lock);
+	mode = q->mode;
+	mutex_unlock(&q->lock);
+
+	if (mode == OP_DRAINING) {
+		dprintk(CVP_SYNX, "%s: flush in progress\n", __func__);
+		rc = -EBUSY;
+		goto exit;
+	}
+
+	cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	rc = cvp_alloc_fence_data((&f), cmd_hdr->size);
+	if (rc) {
+		dprintk(CVP_ERR,"%s: Failed to alloc fence data", __func__);
+		goto exit;
+	}
+
+	f->type = cmd_hdr->packet_type;
+	f->mode = OP_NORMAL;
+	f->signature = 0xFEEDFACE;
+	f->num_fences = 0;
+	f->output_index = 0;
+	buf_offset = offset;
+
+	if (cvp_kernel_fence_enabled == 0)
+	{
+		goto soc_fence;
+	}
+	else if (cvp_kernel_fence_enabled == 1)
+	{
+		goto kernel_fence;
+	}
+	else if (cvp_kernel_fence_enabled == 2)
+	{
+		if (override == true)
+			goto kernel_fence;
+		else if (override == false)
+			goto soc_fence;
+		else
+		{
+			dprintk(CVP_ERR, "%s: invalid params", __func__);
+			rc = -EINVAL;
+			goto free_exit;
+		}
+	}
+	else
+	{
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		rc = -EINVAL;
+		goto free_exit;
+	}
+
+soc_fence:
+	for (i = 0; i < num; i++) {
+		buf = (struct cvp_buf_type*)&in_pkt->pkt_data[buf_offset];
+		buf_offset += sizeof(*buf) >> 2;
+
+		if (buf->input_handle || buf->output_handle) {
+			f->num_fences++;
+			if (buf->input_handle)
+				f->output_index++;
+		}
+	}
+	f->signature = 0xB0BABABE;
+	if (f->num_fences)
+		goto fence_cmd_queue;
+
+	goto free_exit;
+
+
+kernel_fence:
+	/* First pass to find INPUT synx handles */
+	for (i = 0; i < num; i++) {
+		buf = (struct cvp_buf_type *)&in_pkt->pkt_data[buf_offset];
+		buf_offset += sizeof(*buf) >> 2;
+
+		if (buf->input_handle) {
+			/* Check fence_type? */
+			fences[f->num_fences].h_synx = buf->input_handle;
+			f->num_fences++;
+			buf->fence_type &= ~INPUT_FENCE_BITMASK;
+			buf->input_handle = 0;
+			total_fence_count++;
+		}
+		if (buf->output_handle)
+			total_fence_count++;
+	}
+	if (total_fence_count > MAX_HFI_FENCE_SIZE) {
+		dprintk(CVP_ERR, "Invalid total_fence_count %d\n", total_fence_count);
+		rc = -EINVAL;
+		goto free_exit;
+	}
+	f->output_index = f->num_fences;
+
+	dprintk(CVP_SYNX, "%s:Input Fence passed - Number of Fences is %d\n",
+			__func__, f->num_fences);
+
+	/*
+	 * Second pass to find OUTPUT synx handle
+	 * If no of fences is 0 dont execute the below portion until line 911, return 0
+	 */
+	buf_offset = offset;
+	for (i = 0; i < num; i++) {
+		buf = (struct cvp_buf_type*)&in_pkt->pkt_data[buf_offset];
+		buf_offset += sizeof(*buf) >> 2;
+
+		if (buf->output_handle) {
+			/* Check fence_type? */
+			fences[f->num_fences].h_synx = buf->output_handle;
+			f->num_fences++;
+			buf->fence_type &= ~OUTPUT_FENCE_BITMASK;
+			buf->output_handle = 0;
+		}
+	}
+	dprintk(CVP_SYNX, "%s:Output Fence passed - Number of Fences is %d\n",
+			__func__, f->num_fences);
+
+	if (f->num_fences == 0)
+		goto free_exit;
+
+	rc = inst->core->synx_ftbl->cvp_import_synx(inst, f, (u32*)fences);
+	if (rc) {
+		dprintk(CVP_ERR,"%s: Failed to import fences", __func__);
+		goto free_exit;
+	}
+
+fence_cmd_queue:
+	fence_cnt = f->num_fences;
+	memcpy(f->pkt, cmd_hdr, cmd_hdr->size);
+	f->pkt->client_data.kdata |= FENCE_BIT;
+
+	mutex_lock(&q->lock);
+	list_add_tail(&f->list, &inst->fence_cmd_queue.wait_list);
+	mutex_unlock(&q->lock);
+
+	wake_up(&inst->fence_cmd_queue.wq);
+
+	return fence_cnt;
+
+free_exit:
+	cvp_free_fence_data(f);
+exit:
+	return rc;
+}
+
+
+static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
+	struct eva_kmd_hfi_packet *in_pkt,
+	unsigned int in_offset,
+	unsigned int in_buf_num)
+{
+	struct cvp_hfi_ops *ops_tbl;
+	struct cvp_hfi_cmd_session_hdr *cmd_hdr;
+	int pkt_type, rc = 0;
+	enum buf_map_type map_type;
+
+	ops_tbl = inst->core->dev_ops;
+
+	pkt_type = in_pkt->pkt_data[1];
+	map_type = cvp_find_map_type(pkt_type);
+
+	cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	/* The kdata will be overriden by transaction ID if the cmd has buf */
+	cmd_hdr->client_data.kdata = 0;
+	dprintk(CVP_CMD, "%s: "
+		"pkt_type %08x sess_id %08x trans_id %u ktid %llu\n",
+		__func__, cmd_hdr->packet_type,
+		cmd_hdr->session_id,
+		cmd_hdr->client_data.transaction_id,
+		cmd_hdr->client_data.kdata & (FENCE_BIT - 1));
+
+	if (map_type == MAP_PERSIST)
+		rc = msm_cvp_map_user_persist(inst, in_pkt, in_offset, in_buf_num);
+	else if (map_type == UNMAP_PERSIST)
+		rc = msm_cvp_unmap_user_persist(inst, in_pkt, in_offset, in_buf_num);
+	else
+		rc = msm_cvp_map_frame(inst, in_pkt, in_offset, in_buf_num);
+
+	if (rc)
+		return rc;
+
+	rc = cvp_populate_fences(in_pkt, in_offset, in_buf_num, inst);
+	if (rc == 0) {
+		rc = call_hfi_op(ops_tbl, session_send, (void *)inst->session,
+			in_pkt);
+		if (rc) {
+			dprintk(CVP_ERR,"%s: Failed in call_hfi_op %d, %x\n",
+					__func__, in_pkt->pkt_data[0],
+					in_pkt->pkt_data[1]);
+			if (map_type == MAP_FRAME)
+				msm_cvp_unmap_frame(inst,
+					cmd_hdr->client_data.kdata);
+		}
+	} else if (rc > 0) {
+		dprintk(CVP_SYNX, "Going fenced path\n");
+		rc = 0;
+	} else {
+		dprintk(CVP_ERR,"%s: Failed to populate fences\n",
+			__func__);
+		if (map_type == MAP_FRAME)
+			msm_cvp_unmap_frame(inst, cmd_hdr->client_data.kdata);
+	}
+
+	return rc;
+}
+
+static inline int div_by_1dot5(unsigned int a)
+{
+	unsigned long i = a << 1;
+
+	return (unsigned int) i/3;
+}
+
+int msm_cvp_session_delete(struct msm_cvp_inst *inst)
+{
+	return 0;
+}
+
+int msm_cvp_session_create(struct msm_cvp_inst *inst)
+{
+	int rc = 0, rc1 = 0;
+	struct cvp_session_queue *sq;
+
+	if (!inst || !inst->core)
+		return -EINVAL;
+
+	if (inst->state >= MSM_CVP_CLOSE_DONE)
+		return -ECONNRESET;
+
+	if (inst->state != MSM_CVP_CORE_INIT_DONE ||
+		inst->state > MSM_CVP_OPEN_DONE) {
+		dprintk(CVP_ERR,
+			"%s Incorrect CVP state %d to create session\n",
+			__func__, inst->state);
+		return -EINVAL;
+	}
+
+	rc = msm_cvp_comm_try_state(inst, MSM_CVP_OPEN_DONE);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to move instance to open done state\n");
+		goto fail_create;
+	}
+
+	rc = cvp_comm_set_arp_buffers(inst);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"Failed to set ARP buffers\n");
+		goto fail_init;
+	}
+
+	inst->core->synx_ftbl->cvp_sess_init_synx(inst);
+	sq = &inst->session_queue;
+	spin_lock(&sq->lock);
+	sq->state = QUEUE_ACTIVE;
+	spin_unlock(&sq->lock);
+	return rc;
+
+fail_init:
+	rc1 = msm_cvp_comm_try_state(inst, MSM_CVP_CLOSE_DONE);
+	if (rc1)
+		dprintk(CVP_ERR, "%s: close failed\n", __func__);
+fail_create:
+	return rc;
+}
+
+static int session_state_check_init(struct msm_cvp_inst *inst)
+{
+	mutex_lock(&inst->lock);
+	if (inst->state == MSM_CVP_OPEN || inst->state == MSM_CVP_OPEN_DONE) {
+		mutex_unlock(&inst->lock);
+		return 0;
+	}
+	mutex_unlock(&inst->lock);
+
+	return msm_cvp_session_create(inst);
+}
+
+static int cvp_fence_thread_start(struct msm_cvp_inst *inst)
+{
+	u32 tnum = 0;
+	u32 i = 0;
+	int rc = 0;
+	char tname[16];
+	struct task_struct *thread;
+	struct cvp_fence_queue *q;
+	struct cvp_session_queue *sq;
+
+	if (!inst->prop.fthread_nr)
+		return 0;
+
+	q = &inst->fence_cmd_queue;
+	mutex_lock(&q->lock);
+	q->state = QUEUE_START;
+	mutex_unlock(&q->lock);
+
+	for (i = 0; i < inst->prop.fthread_nr; ++i) {
+		if (!cvp_get_inst_validate(inst->core, inst)) {
+			rc = -ECONNRESET;
+			goto exit;
+		}
+
+		snprintf(tname, sizeof(tname), "fthread_%d", tnum++);
+		thread = kthread_run(cvp_fence_thread, inst, tname);
+		if (!thread) {
+			dprintk(CVP_ERR, "%s create %s fail", __func__, tname);
+			rc = -ECHILD;
+			goto exit;
+		}
+	}
+
+	sq = &inst->session_queue_fence;
+	spin_lock(&sq->lock);
+	sq->state = QUEUE_START;
+	spin_unlock(&sq->lock);
+
+exit:
+	if (rc) {
+		mutex_lock(&q->lock);
+		q->state = QUEUE_STOP;
+		mutex_unlock(&q->lock);
+		wake_up_all(&q->wq);
+	}
+	return rc;
+}
+
+static int cvp_fence_thread_stop(struct msm_cvp_inst *inst)
+{
+	struct cvp_fence_queue *q;
+	struct cvp_session_queue *sq;
+
+	if (!inst->prop.fthread_nr)
+		return 0;
+
+	q = &inst->fence_cmd_queue;
+
+	mutex_lock(&q->lock);
+	q->state = QUEUE_STOP;
+	mutex_unlock(&q->lock);
+
+	sq = &inst->session_queue_fence;
+	spin_lock(&sq->lock);
+	sq->state = QUEUE_STOP;
+	spin_unlock(&sq->lock);
+
+	wake_up_all(&q->wq);
+	wake_up_all(&sq->wq);
+
+	return 0;
+}
+
+int msm_cvp_session_start(struct msm_cvp_inst *inst,
+		struct eva_kmd_arg *arg)
+{
+	struct cvp_session_queue *sq;
+	struct cvp_hfi_ops *ops_tbl;
+	int rc;
+	enum queue_state old_state;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	sq = &inst->session_queue;
+	spin_lock(&sq->lock);
+	if (sq->msg_count) {
+		dprintk(CVP_ERR, "session start failed queue not empty%d\n",
+			sq->msg_count);
+		spin_unlock(&sq->lock);
+		rc = -EINVAL;
+		goto exit;
+	}
+	old_state = sq->state;
+	sq->state = QUEUE_START;
+	spin_unlock(&sq->lock);
+
+	ops_tbl = inst->core->dev_ops;
+	if (inst->prop.type == HFI_SESSION_FD
+		|| inst->prop.type == HFI_SESSION_DMM) {
+		spin_lock(&inst->core->resources.pm_qos.lock);
+		inst->core->resources.pm_qos.off_vote_cnt++;
+		spin_unlock(&inst->core->resources.pm_qos.lock);
+		call_hfi_op(ops_tbl, pm_qos_update, ops_tbl->hfi_device_data);
+	}
+	/*
+	 * cvp_fence_thread_start will increment reference to instance.
+	 * It guarantees the EVA session won't be deleted. Use of session
+	 * functions, such as session_start requires the session to be valid.
+	 */
+	rc = cvp_fence_thread_start(inst);
+	if (rc)
+		goto restore_state;
+
+	/* Send SESSION_START command */
+	rc = call_hfi_op(ops_tbl, session_start, (void *)inst->session);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: session start failed rc %d\n",
+				__func__, rc);
+		goto stop_thread;
+	}
+
+	/* Wait for FW response */
+	rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_START_DONE);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
+				__func__, rc);
+		goto stop_thread;
+	}
+
+	pr_info_ratelimited(CVP_DBG_TAG "session %llx (%#x) started\n",
+		"sess", inst, hash32_ptr(inst->session));
+
+	return 0;
+
+stop_thread:
+	cvp_fence_thread_stop(inst);
+restore_state:
+	spin_lock(&sq->lock);
+	sq->state = old_state;
+	spin_unlock(&sq->lock);
+exit:
+	return rc;
+}
+
+int msm_cvp_session_stop(struct msm_cvp_inst *inst,
+		struct eva_kmd_arg *arg)
+{
+	struct cvp_session_queue *sq;
+	struct eva_kmd_session_control *sc = NULL;
+	struct msm_cvp_inst *s;
+	struct cvp_hfi_ops *ops_tbl;
+	int rc = 0;
+	int curr_sq_state = -1;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (arg)
+		sc = &arg->data.session_ctrl;
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	sq = &inst->session_queue;
+	curr_sq_state = sq->state;
+
+	spin_lock(&sq->lock);
+	if (sq->state != QUEUE_START) {
+		spin_unlock(&sq->lock);
+		dprintk(CVP_ERR,
+			"%s: Stop not allowed - curr state %d,  inst %llx, sess %llx, %s type %d\n",
+			__func__, sq->state, inst, inst->session, inst->proc_name,
+			inst->session_type);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	if (sq->state == QUEUE_STOP) {
+		spin_unlock(&sq->lock);
+		dprintk(CVP_WARN,
+				"%s: Double stop session - inst %llx, sess %llx, %s of type %d\n",
+				__func__, inst, inst->session, inst->proc_name, inst->session_type);
+		return rc;
+	}
+
+	if (sq->msg_count) {
+		dprintk(CVP_ERR, "session stop incorrect: queue not empty%d\n",
+			sq->msg_count);
+		if (sc)
+			sc->ctrl_data[0] = sq->msg_count;
+		spin_unlock(&sq->lock);
+		rc =  -EUCLEAN;
+		goto exit;
+	}
+	sq->state = QUEUE_STOP;
+
+	pr_info_ratelimited(CVP_DBG_TAG "Stop session: %pK session_id = %#x\n",
+			"sess", inst, hash32_ptr(inst->session));
+	spin_unlock(&sq->lock);
+
+	ops_tbl = inst->core->dev_ops;
+	/* Send SESSION_STOP command */
+	rc = call_hfi_op(ops_tbl, session_stop, (void *)inst->session);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: session stop failed rc %d\n",
+				__func__, rc);
+		spin_lock(&sq->lock);
+		sq->state = curr_sq_state;
+		spin_unlock(&sq->lock);
+		goto stop_thread;
+	}
+
+	/* Wait for FW response */
+	rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_STOP_DONE);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
+				__func__, rc);
+		spin_lock(&sq->lock);
+		sq->state = curr_sq_state;
+		spin_unlock(&sq->lock);
+		goto stop_thread;
+	}
+
+stop_thread:
+	wake_up_all(&inst->session_queue.wq);
+
+	cvp_fence_thread_stop(inst);
+exit:
+	cvp_put_inst(s);
+	return rc;
+}
+
+int msm_cvp_session_queue_stop(struct msm_cvp_inst *inst)
+{
+	struct cvp_session_queue *sq;
+
+	sq = &inst->session_queue;
+
+	spin_lock(&sq->lock);
+
+	if (sq->state == QUEUE_STOP) {
+		spin_unlock(&sq->lock);
+		return 0;
+	}
+
+	dprintk(CVP_SESS, "Stop session queue: %pK session_id = %#x\n",
+			inst, hash32_ptr(inst->session));
+	spin_unlock(&sq->lock);
+
+	wake_up_all(&inst->session_queue.wq);
+
+	return cvp_fence_thread_stop(inst);
+}
+
+static int msm_cvp_session_ctrl(struct msm_cvp_inst *inst,
+		struct eva_kmd_arg *arg)
+{
+	struct eva_kmd_session_control *ctrl = &arg->data.session_ctrl;
+	int rc = 0;
+	unsigned int ctrl_type;
+
+	ctrl_type = ctrl->ctrl_type;
+
+	if (!inst && ctrl_type != SESSION_CREATE) {
+		dprintk(CVP_ERR, "%s invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (ctrl_type) {
+	case SESSION_STOP:
+		rc = msm_cvp_session_stop(inst, arg);
+		break;
+	case SESSION_START:
+		rc = msm_cvp_session_start(inst, arg);
+		break;
+	case SESSION_CREATE:
+		rc = msm_cvp_session_create(inst);
+		break;
+	case SESSION_DELETE:
+		rc = msm_cvp_session_delete(inst);
+		break;
+	case SESSION_INFO:
+	default:
+		dprintk(CVP_ERR, "%s Unsupported session ctrl%d\n",
+			__func__, ctrl->ctrl_type);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int msm_cvp_get_sysprop(struct msm_cvp_inst *inst,
+		struct eva_kmd_arg *arg)
+{
+	struct eva_kmd_sys_properties *props = &arg->data.sys_properties;
+	struct cvp_hfi_ops *ops_tbl;
+	struct iris_hfi_device *hfi;
+	struct cvp_session_prop *session_prop;
+	int i, rc = 0;
+
+	if (!inst || !inst->core || !inst->core->dev_ops) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	ops_tbl = inst->core->dev_ops;
+	hfi = ops_tbl->hfi_device_data;
+
+	if (props->prop_num > MAX_KMD_PROP_NUM_PER_PACKET) {
+		dprintk(CVP_ERR, "Too many properties %d to get\n",
+			props->prop_num);
+		return -E2BIG;
+	}
+
+	session_prop = &inst->prop;
+
+	for (i = 0; i < props->prop_num; i++) {
+		switch (props->prop_data[i].prop_type) {
+		case EVA_KMD_PROP_HFI_VERSION:
+		{
+			props->prop_data[i].data = hfi->version;
+			break;
+		}
+		case EVA_KMD_PROP_SESSION_DUMPOFFSET:
+		{
+			props->prop_data[i].data =
+				session_prop->dump_offset;
+			break;
+		}
+		case EVA_KMD_PROP_SESSION_DUMPSIZE:
+		{
+			props->prop_data[i].data =
+				session_prop->dump_size;
+			break;
+		}
+		case EVA_KMD_PROP_SESSION_ERROR:
+		{
+			get_dma_buf(hfi->sfr.mem_data.dma_buf);
+			rc = dma_buf_fd(hfi->sfr.mem_data.dma_buf, O_RDONLY | O_CLOEXEC);
+			if (rc < 0) {
+				dprintk(CVP_WARN, "Failed get dma_buf fd %d\n", rc);
+				dma_buf_put(hfi->sfr.mem_data.dma_buf);
+				break;
+			}
+
+			props->prop_data[i].data = rc;
+			rc = 0;
+			break;
+		}
+		case EVA_KMD_PROP_PWR_FDU:
+		{
+			props->prop_data[i].data =
+				msm_cvp_get_hw_aggregate_cycles(HFI_HW_FDU);
+			break;
+		}
+		case EVA_KMD_PROP_PWR_ICA:
+		{
+			props->prop_data[i].data =
+				msm_cvp_get_hw_aggregate_cycles(HFI_HW_ICA);
+			break;
+		}
+		case EVA_KMD_PROP_PWR_OD:
+		{
+			props->prop_data[i].data =
+				msm_cvp_get_hw_aggregate_cycles(HFI_HW_OD);
+			break;
+		}
+		case EVA_KMD_PROP_PWR_MPU:
+		{
+			props->prop_data[i].data =
+				msm_cvp_get_hw_aggregate_cycles(HFI_HW_MPU);
+			break;
+		}
+		case EVA_KMD_PROP_PWR_VADL:
+		{
+			props->prop_data[i].data =
+				msm_cvp_get_hw_aggregate_cycles(HFI_HW_VADL);
+			break;
+		}
+		case EVA_KMD_PROP_PWR_TOF:
+		{
+			props->prop_data[i].data =
+				msm_cvp_get_hw_aggregate_cycles(HFI_HW_TOF);
+			break;
+		}
+		case EVA_KMD_PROP_PWR_RGE:
+		{
+			props->prop_data[i].data =
+				msm_cvp_get_hw_aggregate_cycles(HFI_HW_RGE);
+			break;
+		}
+		case EVA_KMD_PROP_PWR_XRA:
+		{
+			props->prop_data[i].data =
+				msm_cvp_get_hw_aggregate_cycles(HFI_HW_XRA);
+			break;
+		}
+		case EVA_KMD_PROP_PWR_LSR:
+		{
+			props->prop_data[i].data =
+				msm_cvp_get_hw_aggregate_cycles(HFI_HW_LSR);
+			break;
+		}
+		default:
+			dprintk(CVP_ERR, "unrecognized sys property %d\n",
+				props->prop_data[i].prop_type);
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+
+static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
+		struct eva_kmd_arg *arg)
+{
+	struct eva_kmd_sys_properties *props = &arg->data.sys_properties;
+	struct eva_kmd_sys_property *prop_array;
+	struct cvp_session_prop *session_prop;
+	int i, rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (props->prop_num > MAX_KMD_PROP_NUM_PER_PACKET) {
+		dprintk(CVP_ERR, "Too many properties %d to set\n",
+			props->prop_num);
+		return -E2BIG;
+	}
+
+	prop_array = &arg->data.sys_properties.prop_data[0];
+	session_prop = &inst->prop;
+
+	for (i = 0; i < props->prop_num; i++) {
+		switch (prop_array[i].prop_type) {
+		case EVA_KMD_PROP_SESSION_TYPE:
+			session_prop->type = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_SESSION_KERNELMASK:
+			session_prop->kernel_mask = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_SESSION_PRIORITY:
+			session_prop->priority = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_SESSION_SECURITY:
+			session_prop->is_secure = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_SESSION_DSPMASK:
+			session_prop->dsp_mask = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FDU:
+			session_prop->cycles[HFI_HW_FDU] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_ICA:
+			session_prop->cycles[HFI_HW_ICA] =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case EVA_KMD_PROP_PWR_OD:
+			session_prop->cycles[HFI_HW_OD] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_MPU:
+			session_prop->cycles[HFI_HW_MPU] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_VADL:
+			session_prop->cycles[HFI_HW_VADL] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_TOF:
+			session_prop->cycles[HFI_HW_TOF] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_RGE:
+			session_prop->cycles[HFI_HW_RGE] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_XRA:
+			session_prop->cycles[HFI_HW_XRA] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_LSR:
+			session_prop->cycles[HFI_HW_LSR] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FW:
+			session_prop->fw_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case EVA_KMD_PROP_PWR_DDR:
+			session_prop->ddr_bw = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_SYSCACHE:
+			session_prop->ddr_cache = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FDU_OP:
+			session_prop->op_cycles[HFI_HW_FDU] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_ICA_OP:
+			session_prop->op_cycles[HFI_HW_ICA] =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case EVA_KMD_PROP_PWR_OD_OP:
+			session_prop->op_cycles[HFI_HW_OD] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_MPU_OP:
+			session_prop->op_cycles[HFI_HW_MPU] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_VADL_OP:
+			session_prop->op_cycles[HFI_HW_VADL] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_TOF_OP:
+			session_prop->op_cycles[HFI_HW_TOF] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_RGE_OP:
+			session_prop->op_cycles[HFI_HW_RGE] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_XRA_OP:
+			session_prop->op_cycles[HFI_HW_XRA] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_LSR_OP:
+			session_prop->op_cycles[HFI_HW_LSR] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FW_OP:
+			session_prop->fw_op_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case EVA_KMD_PROP_PWR_DDR_OP:
+			session_prop->ddr_op_bw = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_SYSCACHE_OP:
+			session_prop->ddr_op_cache = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FPS_FDU:
+			session_prop->fps[HFI_HW_FDU] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FPS_MPU:
+			session_prop->fps[HFI_HW_MPU] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FPS_OD:
+			session_prop->fps[HFI_HW_OD] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FPS_ICA:
+			session_prop->fps[HFI_HW_ICA] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FPS_VADL:
+			session_prop->fps[HFI_HW_VADL] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FPS_TOF:
+			session_prop->fps[HFI_HW_TOF] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FPS_RGE:
+			session_prop->fps[HFI_HW_RGE] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FPS_XRA:
+			session_prop->fps[HFI_HW_XRA] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_PWR_FPS_LSR:
+			session_prop->fps[HFI_HW_LSR] = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_SESSION_DUMPOFFSET:
+			session_prop->dump_offset = prop_array[i].data;
+			break;
+		case EVA_KMD_PROP_SESSION_DUMPSIZE:
+			session_prop->dump_size = prop_array[i].data;
+			break;
+		default:
+			dprintk(CVP_ERR,
+				"unrecognized sys property to set %d\n",
+				prop_array[i].prop_type);
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+
+static int cvp_drain_fence_sched_list(struct msm_cvp_inst *inst)
+{
+	unsigned long wait_time;
+	struct cvp_fence_queue *q;
+	struct cvp_fence_command *f;
+	int rc = 0;
+	int count = 0, max_count = 0;
+	u64 ktid;
+
+	q = &inst->fence_cmd_queue;
+
+	if (!q)
+		return -EINVAL;
+
+	if (list_empty(&q->sched_list))
+		return rc;
+
+	mutex_lock(&q->lock);
+	list_for_each_entry(f, &q->sched_list, list) {
+		ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
+		dprintk(CVP_SYNX, "%s: frame %llu %llu is in sched_list\n",
+			__func__, ktid, f->frame_id);
+		++count;
+	}
+	mutex_unlock(&q->lock);
+	wait_time = count * 1000;
+	wait_time *= inst->core->resources.msm_cvp_hw_rsp_timeout;
+
+	dprintk(CVP_SYNX, "%s: wait %d us for %d fence command\n",
+			__func__, wait_time, count);
+
+	count = 0;
+	max_count = wait_time / 100;
+
+retry:
+	mutex_lock(&q->lock);
+	if (list_empty(&q->sched_list)) {
+		mutex_unlock(&q->lock);
+		return rc;
+	}
+
+	mutex_unlock(&q->lock);
+	usleep_range(100, 200);
+	++count;
+	if (count < max_count) {
+		goto retry;
+	} else {
+		rc = -ETIMEDOUT;
+		dprintk(CVP_ERR, "%s: timed out!\n", __func__);
+	}
+
+	return rc;
+}
+
+static void cvp_clean_fence_queue(struct msm_cvp_inst *inst, int synx_state)
+{
+	struct cvp_fence_queue *q;
+	struct cvp_fence_command *f, *d;
+	u64 ktid;
+
+	q = &inst->fence_cmd_queue;
+
+	if (!q)
+		return;
+
+	mutex_lock(&q->lock);
+	q->mode = OP_DRAINING;
+
+	if (list_empty(&q->wait_list))
+		goto check_sched;
+
+	list_for_each_entry_safe(f, d, &q->wait_list, list) {
+		ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
+
+		dprintk(CVP_SYNX, "%s: (%#x) flush frame %llu %llu wait_list\n",
+			__func__, hash32_ptr(inst->session), ktid, f->frame_id);
+
+		list_del_init(&f->list);
+		msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
+		inst->core->synx_ftbl->cvp_cancel_synx(inst, CVP_OUTPUT_SYNX,
+			f, synx_state);
+		inst->core->synx_ftbl->cvp_release_synx(inst, f);
+		cvp_free_fence_data(f);
+	}
+
+check_sched:
+	if (list_empty(&q->sched_list)) {
+		mutex_unlock(&q->lock);
+		return;
+	}
+
+	list_for_each_entry(f, &q->sched_list, list) {
+		ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
+
+		dprintk(CVP_SYNX, "%s: (%#x)flush frame %llu %llu sched_list\n",
+			__func__, hash32_ptr(inst->session), ktid, f->frame_id);
+		inst->core->synx_ftbl->cvp_cancel_synx(inst, CVP_INPUT_SYNX,
+			f, synx_state);
+	}
+
+	mutex_unlock(&q->lock);
+}
+
+int cvp_clean_session_queues(struct msm_cvp_inst *inst)
+{
+	struct cvp_fence_queue *q;
+	u32 count = 0, max_retries = 100;
+
+	q = &inst->fence_cmd_queue;
+	mutex_lock(&q->lock);
+	if (q->state == QUEUE_START || q->state == QUEUE_ACTIVE) {
+		mutex_unlock(&q->lock);
+		cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_CANCEL);
+	} else {
+		dprintk(CVP_WARN, "Incorrect fence cmd queue state %d\n",
+			q->state);
+		mutex_unlock(&q->lock);
+	}
+
+	cvp_fence_thread_stop(inst);
+
+	/* Waiting for all output synx sent */
+retry:
+	mutex_lock(&q->lock);
+	if (list_empty(&q->sched_list)) {
+		mutex_unlock(&q->lock);
+		return 0;
+	}
+	mutex_unlock(&q->lock);
+	usleep_range(500, 1000);
+	if (++count > max_retries)
+		return -EBUSY;
+
+	goto retry;
+}
+
+static int cvp_flush_all(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct msm_cvp_inst *s;
+	struct cvp_fence_queue *q;
+	struct cvp_hfi_ops *ops_tbl;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	dprintk(CVP_SESS, "session %llx (%#x)flush all starts\n",
+			inst, hash32_ptr(inst->session));
+	q = &inst->fence_cmd_queue;
+	ops_tbl = inst->core->dev_ops;
+
+	cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_CANCEL);
+
+	dprintk(CVP_SESS, "%s: (%#x) send flush to fw\n",
+			__func__, hash32_ptr(inst->session));
+
+	/* Send flush to FW */
+	rc = call_hfi_op(ops_tbl, session_flush, (void *)inst->session);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: continue flush without fw. rc %d\n",
+		__func__, rc);
+		goto exit;
+	}
+
+	/* Wait for FW response */
+	rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_FLUSH_DONE);
+	if (rc)
+		dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
+		__func__, rc);
+
+	dprintk(CVP_SESS, "%s: (%#x) received flush from fw\n",
+			__func__, hash32_ptr(inst->session));
+
+exit:
+	rc = cvp_drain_fence_sched_list(inst);
+
+	mutex_lock(&q->lock);
+	q->mode = OP_NORMAL;
+	mutex_unlock(&q->lock);
+
+	cvp_put_inst(s);
+	return rc;
+}
+
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg)
+{
+	int rc = 0;
+
+	if (!inst || !arg) {
+		dprintk(CVP_ERR, "%s: invalid args\n", __func__);
+		return -EINVAL;
+	}
+	dprintk(CVP_HFI, "%s: arg->type = %x", __func__, arg->type);
+
+	if (arg->type != EVA_KMD_SESSION_CONTROL &&
+		arg->type != EVA_KMD_SET_SYS_PROPERTY &&
+		arg->type != EVA_KMD_GET_SYS_PROPERTY) {
+
+		rc = session_state_check_init(inst);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"Incorrect session state %d for command %#x",
+				inst->state, arg->type);
+			return rc;
+		}
+	}
+
+	switch (arg->type) {
+	case EVA_KMD_GET_SESSION_INFO:
+	{
+		struct eva_kmd_session_info *session =
+			(struct eva_kmd_session_info *)&arg->data.session;
+
+		rc = msm_cvp_get_session_info(inst, &session->session_id);
+		break;
+	}
+	case EVA_KMD_UPDATE_POWER:
+	{
+		rc = msm_cvp_update_power(inst);
+		break;
+	}
+	case EVA_KMD_REGISTER_BUFFER:
+	{
+		struct eva_kmd_buffer *buf =
+			(struct eva_kmd_buffer *)&arg->data.regbuf;
+
+		rc = msm_cvp_register_buffer(inst, buf);
+		break;
+	}
+	case EVA_KMD_UNREGISTER_BUFFER:
+	{
+		struct eva_kmd_buffer *buf =
+			(struct eva_kmd_buffer *)&arg->data.unregbuf;
+
+		rc = msm_cvp_unregister_buffer(inst, buf);
+		break;
+	}
+	case EVA_KMD_RECEIVE_MSG_PKT:
+	{
+		struct eva_kmd_hfi_packet *out_pkt =
+			(struct eva_kmd_hfi_packet *)&arg->data.hfi_pkt;
+		rc = msm_cvp_session_receive_hfi(inst, out_pkt);
+		break;
+	}
+	case EVA_KMD_SEND_CMD_PKT:
+	{
+		struct eva_kmd_hfi_packet *in_pkt =
+			(struct eva_kmd_hfi_packet *)&arg->data.hfi_pkt;
+
+		rc = msm_cvp_session_process_hfi(inst, in_pkt,
+				arg->buf_offset, arg->buf_num);
+		break;
+	}
+	case EVA_KMD_SEND_FENCE_CMD_PKT:
+	{
+		rc = msm_cvp_session_process_hfi_fence(inst, arg);
+		break;
+	}
+	case EVA_KMD_SESSION_CONTROL:
+		rc = msm_cvp_session_ctrl(inst, arg);
+		break;
+	case EVA_KMD_GET_SYS_PROPERTY:
+		rc = msm_cvp_get_sysprop(inst, arg);
+		break;
+	case EVA_KMD_SET_SYS_PROPERTY:
+		rc = msm_cvp_set_sysprop(inst, arg);
+		break;
+	case EVA_KMD_FLUSH_ALL:
+		rc = cvp_flush_all(inst);
+		break;
+	case EVA_KMD_FLUSH_FRAME:
+		dprintk(CVP_WARN, "EVA_KMD_FLUSH_FRAME IOCTL deprecated\n");
+		rc = 0;
+		break;
+	default:
+		dprintk(CVP_HFI, "%s: unknown arg type %#x\n",
+				__func__, arg->type);
+		rc = -ENOTSUPP;
+		break;
+	}
+
+	return rc;
+}
+
+int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hal_session *session;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	dprintk(CVP_SESS, "%s: inst %pK (%#x)\n", __func__,
+		inst, hash32_ptr(inst->session));
+
+	session = (struct cvp_hal_session *)inst->session;
+	if (!session || session == (void *)0xdeadbeef)
+		return rc;
+
+	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CLOSE_DONE);
+	if (rc)
+		dprintk(CVP_ERR, "%s: close failed\n", __func__);
+
+	rc = msm_cvp_session_deinit_buffers(inst);
+	return rc;
+}
+
+int msm_cvp_session_init(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_SESS, "%s: inst %pK (%#x)\n", __func__,
+		inst, hash32_ptr(inst->session));
+
+	/* set default frequency */
+	inst->clk_data.min_freq = 1000;
+	inst->clk_data.ddr_bw = 1000;
+	inst->clk_data.sys_cache_bw = 1000;
+
+	inst->prop.type = 1;
+	inst->prop.kernel_mask = 0xFFFFFFFF;
+	inst->prop.priority = 0;
+	inst->prop.is_secure = 0;
+	inst->prop.dsp_mask = 0;
+	inst->prop.fthread_nr = 3;
+
+	return rc;
+}

+ 49 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp.h

@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _MSM_CVP_H_
+#define _MSM_CVP_H_
+
+#include "msm_cvp_internal.h"
+#include "msm_cvp_common.h"
+#include "msm_cvp_clocks.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_dsp.h"
+#include "eva_shared_def.h"
+
+static inline bool is_buf_param_valid(u32 buf_num, u32 offset)
+{
+	int max_buf_num;
+
+	max_buf_num = sizeof(struct eva_kmd_hfi_packet) /
+			sizeof(struct cvp_buf_type);
+
+	if (buf_num > max_buf_num)
+		return false;
+	if ((offset > U32_MAX/sizeof(u32)) ||
+			(offset*sizeof(u32) > U32_MAX - buf_num * sizeof(struct cvp_buf_type)))
+		return false;
+	if ((offset * sizeof(u32) + buf_num * sizeof(struct cvp_buf_type)) >
+			sizeof(struct eva_kmd_hfi_packet))
+		return false;
+
+
+
+	return true;
+}
+
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg);
+int msm_cvp_session_init(struct msm_cvp_inst *inst);
+int msm_cvp_session_deinit(struct msm_cvp_inst *inst);
+int msm_cvp_session_queue_stop(struct msm_cvp_inst *inst);
+int msm_cvp_session_create(struct msm_cvp_inst *inst);
+int msm_cvp_session_delete(struct msm_cvp_inst *inst);
+int msm_cvp_session_start(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg);
+int msm_cvp_session_stop(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg);
+int msm_cvp_get_session_info(struct msm_cvp_inst *inst, u32 *session);
+int msm_cvp_update_power(struct msm_cvp_inst *inst);
+int cvp_clean_session_queues(struct msm_cvp_inst *inst);
+#endif

+ 2480 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_buf.c

@@ -0,0 +1,2480 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pid.h>
+#include <linux/fdtable.h>
+#include <linux/rcupdate.h>
+#include <linux/fs.h>
+#include <linux/dma-buf.h>
+#include <linux/sched/task.h>
+#include <linux/version.h>
+#include "msm_cvp_common.h"
+#include "cvp_hfi_api.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_core.h"
+#include "msm_cvp_dsp.h"
+#include "eva_shared_def.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
+#define eva_buf_map dma_buf_map
+#define _buf_map_set_vaddr dma_buf_map_set_vaddr
+#else
+#define eva_buf_map iosys_map
+#define _buf_map_set_vaddr iosys_map_set_vaddr
+#endif
+
+#define CLEAR_USE_BITMAP(idx, inst) \
+	do { \
+		clear_bit(idx, &inst->dma_cache.usage_bitmap); \
+		dprintk(CVP_MEM, "clear %x bit %d dma_cache bitmap 0x%llx\n", \
+			hash32_ptr(inst->session), smem->bitmap_index, \
+			inst->dma_cache.usage_bitmap); \
+	} while (0)
+
+#define SET_USE_BITMAP(idx, inst) \
+	do { \
+		set_bit(idx, &inst->dma_cache.usage_bitmap); \
+		dprintk(CVP_MEM, "Set %x bit %d dma_cache bitmap 0x%llx\n", \
+			hash32_ptr(inst->session), idx, \
+			inst->dma_cache.usage_bitmap); \
+	} while (0)
+
+struct cvp_oob_pool wncc_buf_pool;
+
+static void _wncc_print_cvpwnccbufs_table(struct msm_cvp_inst* inst);
+static int _wncc_unmap_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
+	struct eva_kmd_oob_wncc *wncc_oob,
+	struct eva_kmd_wncc_metadata** wncc_metadata);
+
+void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log);
+
+int print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
+		struct msm_cvp_smem *smem)
+{
+	int i;
+	char name[PKT_NAME_LEN] = "Unknown";
+
+
+	if (!(tag & msm_cvp_debug))
+		return 0;
+
+	if (!inst || !smem) {
+		dprintk(CVP_ERR, "Invalid inst 0x%llx or smem 0x%llx\n",
+				inst, smem);
+		return -EINVAL;
+	}
+
+	if (smem->dma_buf) {
+		i = get_pkt_index_from_type(smem->pkt_type);
+		if (i > 0)
+			strlcpy(name, cvp_hfi_defs[i].name, PKT_NAME_LEN);
+
+		if (!atomic_read(&smem->refcount))
+			dprintk(tag,
+				" UNUSED mapping %s: 0x%llx size %d iova %#x idx %d pkt_type %s buf_idx %#x fd %d\n",
+				str, smem->dma_buf,
+				smem->size, smem->device_addr, smem->bitmap_index, name, smem->buf_idx, smem->fd);
+		else
+			dprintk(tag,
+				"%s: %x : 0x%llx size %d flags %#x iova %#x idx %d ref %d pkt_type %s buf_idx %#x fd %d\n",
+				str, hash32_ptr(inst->session), smem->dma_buf,
+				smem->size, smem->flags, smem->device_addr,
+				smem->bitmap_index, atomic_read(&smem->refcount),
+				name, smem->buf_idx, smem->fd);
+	}
+	return 0;
+}
+
+static void print_internal_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf)
+{
+	if (!(tag & msm_cvp_debug) || !inst || !cbuf)
+		return;
+
+	if (cbuf->smem->dma_buf) {
+		dprintk(tag,
+		"%s: %x : fd %d off %d 0x%llx %s size %d iova %#x\n",
+		str, hash32_ptr(inst->session), cbuf->fd,
+		cbuf->offset, cbuf->smem->dma_buf, cbuf->smem->dma_buf->name,
+		cbuf->size, cbuf->smem->device_addr);
+	} else {
+		dprintk(tag,
+		"%s: %x : idx %2d fd %d off %d size %d iova %#x\n",
+		str, hash32_ptr(inst->session), cbuf->index, cbuf->fd,
+		cbuf->offset, cbuf->size, cbuf->smem->device_addr);
+	}
+}
+
+void print_cvp_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
+		struct cvp_internal_buf *cbuf)
+{
+	if (!inst || !cbuf) {
+		dprintk(CVP_ERR,
+			"%s Invalid params inst %pK, cbuf %pK\n",
+			str, inst, cbuf);
+		return;
+	}
+
+	print_smem(tag, str, inst, cbuf->smem);
+}
+
+static void _log_smem(struct inst_snapshot *snapshot, struct msm_cvp_inst *inst,
+		struct msm_cvp_smem *smem, bool logging)
+{
+
+	if (print_smem(CVP_ERR, "bufdump", inst, smem))
+		return;
+	if (!logging || !snapshot)
+		return;
+	if (snapshot && snapshot->smem_index < MAX_ENTRIES) {
+		struct smem_data *s;
+		s = &snapshot->smem_log[snapshot->smem_index];
+		snapshot->smem_index++;
+		s->size = smem->size;
+		s->flags = smem->flags;
+		s->device_addr = smem->device_addr;
+		s->bitmap_index = smem->bitmap_index;
+		s->refcount = atomic_read(&smem->refcount);
+		s->pkt_type = smem->pkt_type;
+		s->buf_idx = smem->buf_idx;
+	}
+}
+
+static void _log_buf(struct inst_snapshot *snapshot, enum smem_prop prop,
+		struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf,
+		bool logging)
+{
+	struct cvp_buf_data *buf = NULL;
+	u32 index;
+	print_cvp_buffer(CVP_ERR, "bufdump", inst, cbuf);
+	if (!logging)
+		return;
+	if (snapshot) {
+		if (prop == SMEM_CDSP && snapshot->dsp_index < MAX_ENTRIES) {
+			index = snapshot->dsp_index;
+			buf = &snapshot->dsp_buf_log[index];
+			snapshot->dsp_index++;
+		} else if (prop == SMEM_PERSIST &&
+				snapshot->persist_index < MAX_ENTRIES) {
+			index = snapshot->persist_index;
+			buf = &snapshot->persist_buf_log[index];
+			snapshot->persist_index++;
+		}
+		if (buf) {
+			buf->device_addr = cbuf->smem->device_addr;
+			buf->size = cbuf->size;
+		}
+	}
+}
+
+void print_client_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst, struct eva_kmd_buffer *cbuf)
+{
+	if (!(tag & msm_cvp_debug) || !str || !inst || !cbuf)
+		return;
+
+	dprintk(tag,
+		"%s: %x : idx %2d fd %d off %d size %d type %d flags 0x%x reserved[0] %u\n",
+		str, hash32_ptr(inst->session), cbuf->index, cbuf->fd,
+		cbuf->offset, cbuf->size, cbuf->type, cbuf->flags,
+		cbuf->reserved[0]);
+}
+
+static bool __is_buf_valid(struct msm_cvp_inst *inst,
+		struct eva_kmd_buffer *buf)
+{
+	struct cvp_hal_session *session;
+	struct cvp_internal_buf *cbuf = (struct cvp_internal_buf *)0xdeadbeef;
+	bool found = false;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return false;
+	}
+
+	if (buf->fd < 0) {
+		dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
+		return false;
+	}
+
+	if (buf->offset) {
+		dprintk(CVP_ERR,
+			"%s: offset is deprecated, set to 0.\n",
+			__func__);
+		return false;
+	}
+
+	session = (struct cvp_hal_session *)inst->session;
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
+		if (cbuf->fd == buf->fd) {
+			if (cbuf->size != buf->size) {
+				dprintk(CVP_ERR, "%s: buf size mismatch\n",
+					__func__);
+				mutex_unlock(&inst->cvpdspbufs.lock);
+				return false;
+			}
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&inst->cvpdspbufs.lock);
+	if (found) {
+		print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
+		return false;
+	}
+
+	return true;
+}
+
+static struct file *msm_cvp_fget(unsigned int fd, struct task_struct *task,
+			fmode_t mask, unsigned int refs)
+{
+	struct files_struct *files = task->files;
+	struct file *file;
+
+	if (!files)
+		return NULL;
+
+	rcu_read_lock();
+loop:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0))
+	file = fcheck_files(files, fd);
+#else
+	file = files_lookup_fd_rcu(files, fd);
+#endif
+	if (file) {
+		/* File object ref couldn't be taken.
+		 * dup2() atomicity guarantee is the reason
+		 * we loop to catch the new file (or NULL pointer)
+		 */
+		if (file->f_mode & mask)
+			file = NULL;
+		else if (!get_file_rcu(file))
+			goto loop;
+	}
+	rcu_read_unlock();
+
+	return file;
+}
+
+static struct dma_buf *cvp_dma_buf_get(struct file *file, int fd,
+			struct task_struct *task)
+{
+	if (file->f_op != gfa_cv.dmabuf_f_op) {
+		dprintk(CVP_WARN, "fd doesn't refer to dma_buf\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return file->private_data;
+}
+
+int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
+{
+	int rc = 0;
+	struct cvp_internal_buf *cbuf = NULL;
+	struct msm_cvp_smem *smem = NULL;
+	struct dma_buf *dma_buf = NULL;
+	struct file *file;
+
+	if (!__is_buf_valid(inst, buf))
+		return -EINVAL;
+
+	if (!inst->task)
+		return -EINVAL;
+
+	file = msm_cvp_fget(buf->fd, inst->task, FMODE_PATH, 1);
+	if (file == NULL) {
+		dprintk(CVP_WARN, "%s fail to get file from fd %d %s\n", __func__, buf->fd, inst->proc_name);
+		return -EINVAL;
+	}
+
+	dma_buf = cvp_dma_buf_get(
+			file,
+			buf->fd,
+			inst->task);
+	if (dma_buf == ERR_PTR(-EINVAL)) {
+		dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (dma_buf->size < buf->size) {
+		dprintk(CVP_ERR, "%s DSP client buffer too large %d > %d\n",
+			__func__, buf->size, dma_buf->size);
+		rc =  -EINVAL;
+		goto exit;
+	}
+
+	dprintk(CVP_MEM, "dma_buf from internal %llu\n", dma_buf);
+
+	cbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
+	if (!cbuf) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
+	if (!smem) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	smem->dma_buf = dma_buf;
+	smem->bitmap_index = MAX_DMABUF_NUMS;
+	smem->pkt_type = 0;
+	smem->buf_idx = 0;
+	smem->fd = buf->fd;
+	dprintk(CVP_MEM, "%s: dma_buf = %llx\n", __func__, dma_buf);
+	rc = msm_cvp_map_smem(inst, smem, "map dsp");
+	if (rc) {
+		print_client_buffer(CVP_ERR, "map failed", inst, buf);
+		goto exit;
+	}
+
+	atomic_inc(&smem->refcount);
+	cbuf->smem = smem;
+	cbuf->fd = buf->fd;
+	cbuf->size = buf->size;
+	cbuf->offset = buf->offset;
+	cbuf->ownership = CLIENT;
+	cbuf->index = buf->index;
+
+	buf->reserved[0] = (uint32_t)smem->device_addr;
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	list_add_tail(&cbuf->list, &inst->cvpdspbufs.list);
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	return rc;
+
+exit:
+	fput(file);
+	if (smem) {
+		if (smem->device_addr)
+			msm_cvp_unmap_smem(inst, smem, "unmap dsp");
+		msm_cvp_smem_put_dma_buf(smem->dma_buf);
+		cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+	}
+	if (cbuf)
+		cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
+	return rc;
+}
+
+int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
+{
+	int rc = 0;
+	bool found;
+	struct cvp_internal_buf *cbuf = (struct cvp_internal_buf *)0xdeadbeef;
+	struct cvp_hal_session *session;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	session = (struct cvp_hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	found = false;
+	list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
+		if (cbuf->fd == buf->fd) {
+			found = true;
+			break;
+		}
+	}
+	if (!found) {
+		mutex_unlock(&inst->cvpdspbufs.lock);
+		print_client_buffer(CVP_ERR, "invalid", inst, buf);
+		return -EINVAL;
+	}
+
+	if (cbuf->smem->device_addr) {
+		u64 idx = inst->unused_dsp_bufs.ktid;
+		inst->unused_dsp_bufs.smem[idx] = *(cbuf->smem);
+		inst->unused_dsp_bufs.nr++;
+		inst->unused_dsp_bufs.nr =
+			(inst->unused_dsp_bufs.nr > MAX_FRAME_BUFFER_NUMS)?
+			MAX_FRAME_BUFFER_NUMS : inst->unused_dsp_bufs.nr;
+		inst->unused_dsp_bufs.ktid = ++idx % MAX_FRAME_BUFFER_NUMS;
+
+		msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
+		msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
+		atomic_dec(&cbuf->smem->refcount);
+	}
+	list_del(&cbuf->list);
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	cvp_kmem_cache_free(&cvp_driver->smem_cache, cbuf->smem);
+	cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
+	return rc;
+}
+
+int msm_cvp_map_buf_wncc(struct msm_cvp_inst *inst,
+	struct eva_kmd_buffer *buf)
+{
+	int rc = 0, i;
+	bool found = false;
+	struct cvp_internal_buf* cbuf = (struct cvp_internal_buf *)0xdeadbeef;
+	struct msm_cvp_smem* smem = NULL;
+	struct dma_buf* dma_buf = NULL;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return -EINVAL;
+	}
+
+	if (!inst->session) {
+		dprintk(CVP_ERR, "%s: invalid session", __func__);
+		return -EINVAL;
+	}
+
+	if (buf->index) {
+		dprintk(CVP_ERR, "%s: buf index is NOT 0 fd=%d",
+			__func__, buf->fd);
+		return -EINVAL;
+	}
+
+	if (buf->fd < 0) {
+		dprintk(CVP_ERR, "%s: invalid fd = %d", __func__, buf->fd);
+		return -EINVAL;
+	}
+
+	if (buf->offset) {
+		dprintk(CVP_ERR, "%s: offset is not supported, set to 0.",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&inst->cvpwnccbufs.lock);
+	list_for_each_entry(cbuf, &inst->cvpwnccbufs.list, list) {
+		if (cbuf->fd == buf->fd) {
+			if (cbuf->size != buf->size) {
+				dprintk(CVP_ERR, "%s: buf size mismatch",
+					__func__);
+				mutex_unlock(&inst->cvpwnccbufs.lock);
+				return -EINVAL;
+			}
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&inst->cvpwnccbufs.lock);
+	if (found) {
+		print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
+		return -EINVAL;
+	}
+
+	dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
+	if (!dma_buf) {
+		dprintk(CVP_ERR, "%s: invalid fd = %d", __func__, buf->fd);
+		return -EINVAL;
+	}
+
+	cbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
+	if (!cbuf) {
+		msm_cvp_smem_put_dma_buf(dma_buf);
+		return -ENOMEM;
+	}
+
+	smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
+	if (!smem) {
+		cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
+		msm_cvp_smem_put_dma_buf(dma_buf);
+		return -ENOMEM;
+	}
+
+	smem->dma_buf = dma_buf;
+	smem->bitmap_index = MAX_DMABUF_NUMS;
+	smem->pkt_type = 0;
+	smem->buf_idx = 0;
+	smem->fd = buf->fd;
+	dprintk(CVP_MEM, "%s: dma_buf = %llx", __func__, dma_buf);
+	rc = msm_cvp_map_smem(inst, smem, "map wncc");
+	if (rc) {
+		dprintk(CVP_ERR, "%s: map failed", __func__);
+		print_client_buffer(CVP_ERR, __func__, inst, buf);
+		goto exit;
+	}
+
+	cbuf->smem = smem;
+	cbuf->fd = buf->fd;
+	cbuf->size = buf->size;
+	cbuf->offset = buf->offset;
+	cbuf->ownership = CLIENT;
+	cbuf->index = buf->index;
+
+	/* Added for PreSil/RUMI testing */
+#ifdef USE_PRESIL
+	dprintk(CVP_DBG,
+		"wncc buffer is %x for cam_presil_send_buffer"
+		" with MAP_ADDR_OFFSET %x",
+		(u64)(smem->device_addr) - MAP_ADDR_OFFSET, MAP_ADDR_OFFSET);
+	cam_presil_send_buffer((u64)smem->dma_buf, 0,
+		(u32)cbuf->offset, (u32)cbuf->size,
+		(u64)(smem->device_addr) - MAP_ADDR_OFFSET);
+#endif
+
+	mutex_lock(&inst->cvpwnccbufs.lock);
+	if (inst->cvpwnccbufs_table == NULL) {
+		inst->cvpwnccbufs_table =
+			(struct msm_cvp_wncc_buffer*) kzalloc(
+				sizeof(struct msm_cvp_wncc_buffer) *
+				EVA_KMD_WNCC_MAX_SRC_BUFS,
+				GFP_KERNEL);
+		if (!inst->cvpwnccbufs_table) {
+			mutex_unlock(&inst->cvpwnccbufs.lock);
+			goto exit;
+		}
+	}
+
+	for (i = 0; i < EVA_KMD_WNCC_MAX_SRC_BUFS; i++)
+	{
+		if (inst->cvpwnccbufs_table[i].iova == 0)
+		{
+			list_add_tail(&cbuf->list, &inst->cvpwnccbufs.list);
+			inst->cvpwnccbufs_num++;
+			inst->cvpwnccbufs_table[i].fd = buf->fd;
+			inst->cvpwnccbufs_table[i].iova = smem->device_addr;
+			inst->cvpwnccbufs_table[i].size = smem->size;
+
+			/* buf reserved[0] used to store wncc src buf id */
+			buf->reserved[0] = i + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
+			/* cbuf ktid used to store wncc src buf id */
+			cbuf->ktid = i + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
+
+			dprintk(CVP_MEM, "%s: wncc buf iova: 0x%08X",
+				__func__, inst->cvpwnccbufs_table[i].iova);
+			break;
+		}
+	}
+	if (i == EVA_KMD_WNCC_MAX_SRC_BUFS) {
+		dprintk(CVP_ERR,
+			"%s: wncc buf table full - max (%u) already registered",
+			__func__, EVA_KMD_WNCC_MAX_SRC_BUFS);
+		/* _wncc_print_cvpwnccbufs_table(inst); */
+		mutex_unlock(&inst->cvpwnccbufs.lock);
+		rc = -EDQUOT;
+		goto exit;
+	}
+	mutex_unlock(&inst->cvpwnccbufs.lock);
+
+	return rc;
+
+exit:
+	if (smem->device_addr)
+		msm_cvp_unmap_smem(inst, smem, "unmap wncc");
+	msm_cvp_smem_put_dma_buf(smem->dma_buf);
+	cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
+	cbuf = NULL;
+	cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+	smem = NULL;
+	return rc;
+}
+
+int msm_cvp_unmap_buf_wncc(struct msm_cvp_inst *inst,
+	struct eva_kmd_buffer *buf)
+{
+	int rc = 0;
+	bool found;
+	struct cvp_internal_buf *cbuf = (struct cvp_internal_buf *)0xdeadbeef;
+	uint32_t buf_id, buf_idx;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return -EINVAL;
+	}
+
+	if (!inst->session) {
+		dprintk(CVP_ERR, "%s: invalid session", __func__);
+		return -EINVAL;
+	}
+
+	if (buf->index) {
+		dprintk(CVP_ERR, "%s: buf index is NOT 0 fd=%d",
+			__func__, buf->fd);
+		return -EINVAL;
+	}
+
+	buf_id = buf->reserved[0];
+	if (buf_id < EVA_KMD_WNCC_SRC_BUF_ID_OFFSET || buf_id >=
+		(EVA_KMD_WNCC_MAX_SRC_BUFS + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET)) {
+		dprintk(CVP_ERR, "%s: invalid buffer id %d",
+			__func__, buf->reserved[0]);
+		return -EINVAL;
+	}
+
+	mutex_lock(&inst->cvpwnccbufs.lock);
+	if (inst->cvpwnccbufs_num == 0) {
+		dprintk(CVP_ERR, "%s: no wncc buffers currently mapped", __func__);
+		mutex_unlock(&inst->cvpwnccbufs.lock);
+		return -EINVAL;
+	}
+
+	buf_idx = buf_id - EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
+	if (inst->cvpwnccbufs_table[buf_idx].iova == 0) {
+		dprintk(CVP_ERR, "%s: buffer id %d not found",
+			__func__, buf_id);
+		mutex_unlock(&inst->cvpwnccbufs.lock);
+		return -EINVAL;
+	}
+
+	buf->fd = inst->cvpwnccbufs_table[buf_idx].fd;
+	found = false;
+	list_for_each_entry(cbuf, &inst->cvpwnccbufs.list, list) {
+		if (cbuf->fd == buf->fd) {
+			found = true;
+			break;
+		}
+	}
+	if (!found) {
+		dprintk(CVP_ERR, "%s: buffer id %d not found",
+			__func__, buf_id);
+		print_client_buffer(CVP_ERR, __func__, inst, buf);
+		_wncc_print_cvpwnccbufs_table(inst);
+		mutex_unlock(&inst->cvpwnccbufs.lock);
+		return -EINVAL;
+	}
+	if (cbuf->smem->device_addr) {
+		u64 idx = inst->unused_wncc_bufs.ktid;
+		inst->unused_wncc_bufs.smem[idx] = *(cbuf->smem);
+		inst->unused_wncc_bufs.nr++;
+		inst->unused_wncc_bufs.nr =
+			(inst->unused_wncc_bufs.nr > NUM_WNCC_BUFS)?
+			NUM_WNCC_BUFS : inst->unused_wncc_bufs.nr;
+		inst->unused_wncc_bufs.ktid = ++idx % NUM_WNCC_BUFS;
+	}
+
+	if (cbuf->smem->device_addr) {
+		msm_cvp_unmap_smem(inst, cbuf->smem, "unmap wncc");
+		msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
+	}
+
+	list_del(&cbuf->list);
+	inst->cvpwnccbufs_table[buf_idx].fd = 0;
+	inst->cvpwnccbufs_table[buf_idx].iova = 0;
+	inst->cvpwnccbufs_table[buf_idx].size = 0;
+	inst->cvpwnccbufs_num--;
+	if (inst->cvpwnccbufs_num == 0) {
+		kfree(inst->cvpwnccbufs_table);
+		inst->cvpwnccbufs_table = NULL;
+	}
+	mutex_unlock(&inst->cvpwnccbufs.lock);
+
+	cvp_kmem_cache_free(&cvp_driver->smem_cache, cbuf->smem);
+	cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
+	return rc;
+}
+
+static void _wncc_print_oob(struct eva_kmd_oob_wncc* wncc_oob)
+{
+	u32 i, j;
+
+	if (!wncc_oob) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return;
+	}
+
+	dprintk(CVP_DBG, "%s: wncc OOB --", __func__);
+	dprintk(CVP_DBG, "%s: num_layers: %u", __func__, wncc_oob->num_layers);
+	for (i = 0; i < wncc_oob->num_layers; i++) {
+		dprintk(CVP_DBG, "%s:   layers[%u].num_addrs: %u",
+			__func__, i, wncc_oob->layers[i].num_addrs);
+
+		for (j = 0; j < wncc_oob->layers[i].num_addrs; j++) {
+			dprintk(CVP_DBG,
+				"%s:    layers[%u].addrs[%u]: %04u 0x%08x",
+				__func__, i, j,
+				wncc_oob->layers[i].addrs[j].buffer_id,
+				wncc_oob->layers[i].addrs[j].offset);
+		}
+	}
+}
+
+static void _wncc_print_cvpwnccbufs_table(struct msm_cvp_inst* inst)
+{
+	u32 i, entries = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return;
+	}
+
+	if (inst->cvpwnccbufs_num == 0) {
+		dprintk(CVP_DBG, "%s: wncc buffer look-up table is empty",
+			__func__);
+		return;
+	}
+
+	if (!inst->cvpwnccbufs_table) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return;
+	}
+
+	dprintk(CVP_DBG, "%s: wncc buffer table:", __func__);
+	for (i = 0; i < EVA_KMD_WNCC_MAX_SRC_BUFS &&
+		entries < inst->cvpwnccbufs_num; i++) {
+		if (inst->cvpwnccbufs_table[i].iova != 0) {
+			dprintk(CVP_DBG,
+				"%s: buf_idx=%04d --> "
+				"fd=%03d, iova=0x%08x, size=%d",
+				__func__, i,
+				inst->cvpwnccbufs_table[i].fd,
+				inst->cvpwnccbufs_table[i].iova,
+				inst->cvpwnccbufs_table[i].size);
+			entries++;
+		}
+	}
+}
+
+static void _wncc_print_metadata_buf(u32 num_layers, u32 num_addrs,
+	struct eva_kmd_wncc_metadata** wncc_metadata)
+{
+	u32 i, j, iova;
+
+	if (num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS ||
+		!wncc_metadata) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return;
+	}
+
+	dprintk(CVP_DBG, "%s: wncc metadata buffers --", __func__);
+	dprintk(CVP_DBG, "%s: num_layers: %u", __func__, num_layers);
+	dprintk(CVP_DBG, "%s: num_addrs:  %u", __func__, num_addrs);
+	for (i = 0; i < num_layers; i++) {
+		for (j = 0; j < num_addrs; j++) {
+			iova = (wncc_metadata[i][j].iova_msb << 22) |
+				wncc_metadata[i][j].iova_lsb;
+			dprintk(CVP_DBG,
+				"%s:   wncc_metadata[%u][%u]: "
+				"%4u %3u %4u %3u 0x%08x %1u %4d %4d %4d %4d",
+				__func__, i, j,
+				wncc_metadata[i][j].loc_x_dec,
+				wncc_metadata[i][j].loc_x_frac,
+				wncc_metadata[i][j].loc_y_dec,
+				wncc_metadata[i][j].loc_y_frac,
+				iova,
+				wncc_metadata[i][j].scale_idx,
+				wncc_metadata[i][j].aff_coeff_3,
+				wncc_metadata[i][j].aff_coeff_2,
+				wncc_metadata[i][j].aff_coeff_1,
+				wncc_metadata[i][j].aff_coeff_0);
+		}
+	}
+}
+
+static int _wncc_copy_oob_from_user(struct eva_kmd_hfi_packet* in_pkt,
+	struct eva_kmd_oob_wncc* wncc_oob)
+{
+	int rc = 0;
+	u32 oob_type = 0;
+	struct eva_kmd_oob_buf* oob_buf_u;
+	struct eva_kmd_oob_wncc* wncc_oob_u;
+	struct eva_kmd_oob_wncc* wncc_oob_k;
+	unsigned int i;
+	u32 num_addrs;
+
+	if (!in_pkt || !wncc_oob) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return -EINVAL;
+	}
+
+	oob_buf_u = in_pkt->oob_buf;
+	if (!access_ok(oob_buf_u, sizeof(*oob_buf_u))) {
+		dprintk(CVP_ERR, "%s: invalid OOB buf pointer", __func__);
+		return -EINVAL;
+	}
+
+	if (!access_ok(&oob_buf_u->oob_type, sizeof(oob_buf_u->oob_type))) {
+		dprintk(CVP_ERR,
+			"%s: bad OOB buf pointer, oob_type inaccessible",
+			__func__);
+		return -EINVAL;
+	}
+	rc = get_user(oob_type, &oob_buf_u->oob_type);
+	if (rc)
+		return rc;
+	if (oob_type != EVA_KMD_OOB_WNCC) {
+		dprintk(CVP_ERR, "%s: incorrect OOB type (%d) for wncc",
+			__func__, oob_type);
+		return -EINVAL;
+	}
+
+	wncc_oob_u = &oob_buf_u->wncc;
+	wncc_oob_k = wncc_oob;
+
+	if (!access_ok(&wncc_oob_u->metadata_bufs_offset,
+		sizeof(wncc_oob_u->metadata_bufs_offset))) {
+		dprintk(CVP_ERR,
+			"%s: bad OOB buf pointer, wncc.metadata_bufs_offset inaccessible",
+			__func__);
+		return -EINVAL;
+	}
+	rc = get_user(wncc_oob_k->metadata_bufs_offset,
+		&wncc_oob_u->metadata_bufs_offset);
+	if (rc)
+		return rc;
+	if (wncc_oob_k->metadata_bufs_offset > ((sizeof(in_pkt->pkt_data)
+		- sizeof(struct cvp_buf_type)) / sizeof(__u32))) {
+		dprintk(CVP_ERR, "%s: invalid wncc metadata bufs offset",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!access_ok(&wncc_oob_u->num_layers,
+		sizeof(wncc_oob_u->num_layers))) {
+		dprintk(CVP_ERR,
+			"%s: bad OOB buf pointer, wncc.num_layers inaccessible",
+			__func__);
+		return -EINVAL;
+	}
+	rc = get_user(wncc_oob_k->num_layers, &wncc_oob_u->num_layers);
+	if (rc)
+		return rc;
+	if (wncc_oob_k->num_layers < 1 ||
+		wncc_oob_k->num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
+		dprintk(CVP_ERR, "%s: invalid wncc num layers", __func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < wncc_oob_k->num_layers; i++) {
+
+		if (!access_ok(&wncc_oob_u->layers[i].num_addrs,
+			sizeof(wncc_oob_u->layers[i].num_addrs))) {
+			dprintk(CVP_ERR,
+				"%s: bad OOB buf pointer, wncc.layers[%u].num_addrs inaccessible",
+				__func__, i);
+			return -EINVAL;
+		}
+		rc = get_user(wncc_oob_k->layers[i].num_addrs,
+			&wncc_oob_u->layers[i].num_addrs);
+		if (rc)
+			break;
+
+		num_addrs = wncc_oob_k->layers[i].num_addrs;
+		if (num_addrs < 1 || num_addrs > EVA_KMD_WNCC_MAX_ADDRESSES) {
+			dprintk(CVP_ERR,
+				"%s: invalid wncc num addrs for layer %u",
+				__func__, i);
+			rc = -EINVAL;
+			break;
+		}
+
+		if (!access_ok(wncc_oob_u->layers[i].addrs,
+				num_addrs * sizeof(struct eva_kmd_wncc_addr)) ||
+			!access_ok(&wncc_oob_u->layers[i].addrs[num_addrs - 1],
+				sizeof(struct eva_kmd_wncc_addr))) {
+			dprintk(CVP_ERR,
+				"%s: bad OOB buf pointer, wncc.layers[%u].addrs inaccessible",
+				__func__, i);
+			return -EINVAL;
+		}
+		rc = copy_from_user(wncc_oob_k->layers[i].addrs,
+			wncc_oob_u->layers[i].addrs,
+			num_addrs * sizeof(struct eva_kmd_wncc_addr));
+		if (rc)
+			break;
+	}
+
+	if (false)
+		_wncc_print_oob(wncc_oob);
+
+	return rc;
+}
+
+static int _wncc_map_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
+	struct eva_kmd_oob_wncc *wncc_oob,
+	struct eva_kmd_wncc_metadata** wncc_metadata)
+{
+	int rc = 0, i;
+	struct cvp_buf_type* wncc_metadata_bufs;
+	struct dma_buf* dmabuf;
+	struct eva_buf_map map;
+	__u32 num_layers, metadata_bufs_offset;
+	_buf_map_set_vaddr(&map, (void *)0xdeadbeaf);
+
+	if (!in_pkt || !wncc_metadata || !wncc_oob) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return -EINVAL;
+	}
+
+	num_layers = wncc_oob->num_layers;
+	metadata_bufs_offset = wncc_oob->metadata_bufs_offset;
+	if (num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
+		dprintk(CVP_ERR, "%s: invalid wncc num layers", __func__);
+		return -EINVAL;
+	}
+	if (metadata_bufs_offset > ((sizeof(in_pkt->pkt_data)
+		- num_layers * sizeof(struct cvp_buf_type)) / sizeof(__u32))) {
+		dprintk(CVP_ERR, "%s: invalid wncc metadata bufs offset",
+			__func__);
+		return -EINVAL;
+	}
+
+	wncc_metadata_bufs = (struct cvp_buf_type*)
+		&in_pkt->pkt_data[metadata_bufs_offset];
+	for (i = 0; i < num_layers; i++) {
+		dmabuf = dma_buf_get(wncc_metadata_bufs[i].fd);
+		if (IS_ERR(dmabuf)) {
+			rc = PTR_ERR(dmabuf);
+			dprintk(CVP_ERR,
+				"%s: dma_buf_get() failed for "
+				"wncc_metadata_bufs[%d], rc %d",
+				__func__, i, rc);
+			break;
+		}
+
+		if (dmabuf->size < wncc_oob->layers[i].num_addrs *
+			sizeof(struct eva_kmd_wncc_metadata)) {
+			dprintk(CVP_ERR,
+				"%s: wncc_metadata_bufs[%d] size insufficient for num addrs in oob",
+				__func__, i);
+			dma_buf_put(dmabuf);
+			rc = -EINVAL;
+			break;
+		}
+
+		rc = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: dma_buf_begin_cpu_access() failed "
+				"for wncc_metadata_bufs[%d], rc %d",
+				__func__, i, rc);
+			dma_buf_put(dmabuf);
+			break;
+		}
+
+		rc = dma_buf_vmap(dmabuf, &map);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: dma_buf_vmap() failed for "
+				"wncc_metadata_bufs[%d]",
+				__func__, i);
+			dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
+			dma_buf_put(dmabuf);
+			break;
+		}
+		dprintk(CVP_DBG,
+			"%s: wncc_metadata_bufs[%d] map.is_iomem is %d",
+			__func__, i, map.is_iomem);
+		wncc_metadata[i] = (struct eva_kmd_wncc_metadata*)map.vaddr;
+
+		dma_buf_put(dmabuf);
+	}
+
+	if (rc)
+		_wncc_unmap_metadata_bufs(in_pkt, wncc_oob, wncc_metadata);
+
+	return rc;
+}
+
+static int _wncc_unmap_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
+	struct eva_kmd_oob_wncc *wncc_oob,
+	struct eva_kmd_wncc_metadata** wncc_metadata)
+{
+	int rc = 0, i;
+	struct cvp_buf_type* wncc_metadata_bufs;
+	struct dma_buf* dmabuf;
+	struct eva_buf_map map;
+	__u32 num_layers, metadata_bufs_offset;
+
+	if (!in_pkt || !wncc_metadata || !wncc_oob) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return -EINVAL;
+	}
+
+	num_layers = wncc_oob->num_layers;
+	metadata_bufs_offset = wncc_oob->metadata_bufs_offset;
+	if (num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
+		dprintk(CVP_ERR, "%s: invalid wncc num layers", __func__);
+		return -EINVAL;
+	}
+	if (metadata_bufs_offset > ((sizeof(in_pkt->pkt_data)
+		- num_layers * sizeof(struct cvp_buf_type)) / sizeof(__u32))) {
+		dprintk(CVP_ERR, "%s: invalid wncc metadata bufs offset",
+			__func__);
+		return -EINVAL;
+	}
+
+	wncc_metadata_bufs = (struct cvp_buf_type*)
+		&in_pkt->pkt_data[metadata_bufs_offset];
+	for (i = 0; i < num_layers; i++) {
+		if (!wncc_metadata[i]) {
+			continue;
+		}
+
+		dmabuf = dma_buf_get(wncc_metadata_bufs[i].fd);
+		if (IS_ERR(dmabuf)) {
+			rc = -PTR_ERR(dmabuf);
+			dprintk(CVP_ERR,
+				"%s: dma_buf_get() failed for "
+				"wncc_metadata_bufs[%d], rc %d",
+				__func__, i, rc);
+			break;
+		}
+
+		_buf_map_set_vaddr(&map, wncc_metadata[i]);
+		dma_buf_vunmap(dmabuf, &map);
+		wncc_metadata[i] = NULL;
+
+		rc = dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
+		dma_buf_put(dmabuf);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: dma_buf_end_cpu_access() failed "
+				"for wncc_metadata_bufs[%d], rc %d",
+				__func__, i, rc);
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int init_wncc_bufs(void)
+{
+	int i;
+
+	for (i = 0; i < NUM_WNCC_BUFS; i++) {
+		wncc_buf_pool.bufs[i] = (struct eva_kmd_oob_wncc*)kzalloc(
+				sizeof(struct eva_kmd_oob_wncc), GFP_KERNEL);
+		if (!wncc_buf_pool.bufs[i]) {
+			i--;
+			goto exit_fail;
+		}
+	}
+	wncc_buf_pool.used_bitmap = 0;
+	wncc_buf_pool.allocated = true;
+	return 0;
+
+exit_fail:
+	while (i >= 0) {
+		kfree(wncc_buf_pool.bufs[i]);
+		i--;
+	}
+	return -ENOMEM;
+}
+
+static int alloc_wncc_buf(struct wncc_oob_buf *wob)
+{
+	int rc, i;
+
+	mutex_lock(&wncc_buf_pool.lock);
+	if (!wncc_buf_pool.allocated) {
+		rc = init_wncc_bufs();
+		if (rc) {
+			mutex_unlock(&wncc_buf_pool.lock);
+			return rc;
+		}
+	}
+
+	for (i = 0; i < NUM_WNCC_BUFS; i++) {
+		if (!(wncc_buf_pool.used_bitmap & BIT(i))) {
+			wncc_buf_pool.used_bitmap |= BIT(i);
+			wob->bitmap_idx = i;
+			wob->buf = wncc_buf_pool.bufs[i];
+			mutex_unlock(&wncc_buf_pool.lock);
+			return 0;
+		}
+	}
+	mutex_unlock(&wncc_buf_pool.lock);
+	wob->bitmap_idx = 0xff;
+	wob->buf = (struct eva_kmd_oob_wncc*)kzalloc(
+			sizeof(struct eva_kmd_oob_wncc), GFP_KERNEL);
+	if (!wob->buf)
+		rc = -ENOMEM;
+	else
+		rc = 0;
+
+	return rc;
+}
+
+static void free_wncc_buf(struct wncc_oob_buf *wob)
+{
+	if (!wob)
+		return;
+
+	if (wob->bitmap_idx == 0xff) {
+		kfree(wob->buf);
+		return;
+	}
+
+	if (wob->bitmap_idx < NUM_WNCC_BUFS) {
+		mutex_lock(&wncc_buf_pool.lock);
+		wncc_buf_pool.used_bitmap &= ~BIT(wob->bitmap_idx);
+		memset(wob->buf, 0, sizeof(struct eva_kmd_oob_wncc));
+		wob->buf = NULL;
+		mutex_unlock(&wncc_buf_pool.lock);
+	}
+}
+
+static int msm_cvp_proc_oob_wncc(struct msm_cvp_inst* inst,
+	struct eva_kmd_hfi_packet* in_pkt)
+{
+	int rc = 0;
+	struct eva_kmd_oob_wncc* wncc_oob;
+	struct wncc_oob_buf wob;
+	struct eva_kmd_wncc_metadata* wncc_metadata[EVA_KMD_WNCC_MAX_LAYERS];
+	unsigned int i, j;
+	bool empty = false;
+	u32 buf_id, buf_idx, buf_offset, iova;
+
+	if (!inst || !inst->core || !in_pkt) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return -EINVAL;
+	}
+
+	rc = alloc_wncc_buf(&wob);
+	if (rc)
+		return -ENOMEM;
+
+	wncc_oob = wob.buf;
+	rc = _wncc_copy_oob_from_user(in_pkt, wncc_oob);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: OOB buf copying failed", __func__);
+		goto exit;
+	}
+
+	memset(wncc_metadata, 0,
+		sizeof(*wncc_metadata) * EVA_KMD_WNCC_MAX_LAYERS);
+	rc = _wncc_map_metadata_bufs(in_pkt, wncc_oob, wncc_metadata);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: failed to map wncc metadata bufs",
+			__func__);
+		goto exit;
+	}
+
+	mutex_lock(&inst->cvpwnccbufs.lock);
+	if (inst->cvpwnccbufs_num == 0 || inst->cvpwnccbufs_table == NULL) {
+		dprintk(CVP_ERR, "%s: no wncc bufs currently mapped", __func__);
+		empty = true;
+		rc = -EINVAL;
+	}
+
+	for (i = 0; !empty && i < wncc_oob->num_layers; i++) {
+		for (j = 0; j < wncc_oob->layers[i].num_addrs; j++) {
+			buf_id = wncc_oob->layers[i].addrs[j].buffer_id;
+			if (buf_id < EVA_KMD_WNCC_SRC_BUF_ID_OFFSET ||
+				buf_id >= (EVA_KMD_WNCC_SRC_BUF_ID_OFFSET +
+					EVA_KMD_WNCC_MAX_SRC_BUFS)) {
+				dprintk(CVP_ERR,
+					"%s: invalid wncc buf id %u "
+					"in layer #%u address #%u",
+					__func__, buf_id, i, j);
+				rc = -EINVAL;
+				break;
+			}
+
+			buf_idx = buf_id - EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
+			if (inst->cvpwnccbufs_table[buf_idx].iova == 0) {
+				dprintk(CVP_ERR,
+					"%s: unmapped wncc buf id %u "
+					"in layer #%u address #%u",
+					__func__, buf_id, i, j);
+				/* _wncc_print_cvpwnccbufs_table(inst); */
+				rc = -EINVAL;
+				break;
+			}
+
+			buf_offset = wncc_oob->layers[i].addrs[j].offset;
+			if (buf_offset >=
+				inst->cvpwnccbufs_table[buf_idx].size) {
+				/* NOTE: This buffer offset validation is
+				 * not comprehensive since wncc src image
+				 * resolution information is not known to
+				 * KMD. UMD is responsible for comprehensive
+				 * validation.
+				 */
+				dprintk(CVP_ERR,
+					"%s: invalid wncc buf offset %u "
+					"in layer #%u address #%u",
+					__func__, buf_offset, i, j);
+				rc = -EINVAL;
+				break;
+			}
+
+			iova = inst->cvpwnccbufs_table[buf_idx].iova +
+				buf_offset;
+			wncc_metadata[i][j].iova_lsb = iova;
+			wncc_metadata[i][j].iova_msb = iova >> 22;
+		}
+	}
+	mutex_unlock(&inst->cvpwnccbufs.lock);
+
+	if (false)
+		_wncc_print_metadata_buf(wncc_oob->num_layers,
+			wncc_oob->layers[0].num_addrs, wncc_metadata);
+
+	if (_wncc_unmap_metadata_bufs(in_pkt, wncc_oob, wncc_metadata)) {
+		dprintk(CVP_ERR, "%s: failed to unmap wncc metadata bufs",
+			__func__);
+	}
+
+exit:
+	free_wncc_buf(&wob);
+	return rc;
+}
+
+int msm_cvp_proc_oob(struct msm_cvp_inst* inst,
+	struct eva_kmd_hfi_packet* in_pkt)
+{
+	int rc = 0;
+	struct cvp_hfi_cmd_session_hdr* cmd_hdr =
+		(struct cvp_hfi_cmd_session_hdr*)in_pkt;
+
+	if (!inst || !inst->core || !in_pkt) {
+		dprintk(CVP_ERR, "%s: invalid params", __func__);
+		return -EINVAL;
+	}
+
+	switch (cmd_hdr->packet_type) {
+	case HFI_CMD_SESSION_CVP_WARP_NCC_FRAME:
+		rc = msm_cvp_proc_oob_wncc(inst, in_pkt);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+void msm_cvp_cache_operations(struct msm_cvp_smem *smem, u32 type,
+				u32 offset, u32 size)
+{
+	enum smem_cache_ops cache_op;
+
+	if (msm_cvp_cacheop_disabled)
+		return;
+
+	if (!smem) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return;
+	}
+
+	switch (type) {
+	case EVA_KMD_BUFTYPE_INPUT:
+		cache_op = SMEM_CACHE_CLEAN;
+		break;
+	case EVA_KMD_BUFTYPE_OUTPUT:
+		cache_op = SMEM_CACHE_INVALIDATE;
+		break;
+	default:
+		cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
+	}
+
+	dprintk(CVP_MEM,
+		"%s: cache operation enabled for dma_buf: %llx, cache_op: %d, offset: %d, size: %d\n",
+		__func__, smem->dma_buf, cache_op, offset, size);
+	msm_cvp_smem_cache_operations(smem->dma_buf, cache_op, offset, size);
+}
+
+static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
+				struct dma_buf *dma_buf,
+				u32 pkt_type)
+{
+	struct msm_cvp_smem *smem;
+	struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef;
+	struct cvp_internal_buf *buf = (struct cvp_internal_buf *)0xdeadbeef;
+	int i;
+
+	if (inst->dma_cache.nr > MAX_DMABUF_NUMS)
+		return NULL;
+
+	mutex_lock(&inst->dma_cache.lock);
+	for (i = 0; i < inst->dma_cache.nr; i++)
+		if (inst->dma_cache.entries[i]->dma_buf == dma_buf) {
+			SET_USE_BITMAP(i, inst);
+			smem = inst->dma_cache.entries[i];
+			smem->bitmap_index = i;
+			smem->pkt_type = pkt_type;
+			atomic_inc(&smem->refcount);
+			/*
+			 * If we find it, it means we already increased
+			 * refcount before, so we put it to avoid double
+			 * incremental.
+			 */
+			msm_cvp_smem_put_dma_buf(smem->dma_buf);
+			mutex_unlock(&inst->dma_cache.lock);
+			print_smem(CVP_MEM, "found in cache", inst, smem);
+			return smem;
+		}
+
+	mutex_unlock(&inst->dma_cache.lock);
+
+	/* earch persist list */
+	mutex_lock(&inst->persistbufs.lock);
+	list_for_each_entry(buf, &inst->persistbufs.list, list) {
+		smem = buf->smem;
+		if (smem && smem->dma_buf == dma_buf) {
+			atomic_inc(&smem->refcount);
+			mutex_unlock(&inst->persistbufs.lock);
+			print_smem(CVP_MEM, "found in persist", inst, smem);
+			return smem;
+		}
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+
+	/* Search frame list */
+	mutex_lock(&inst->frames.lock);
+	list_for_each_entry(frame, &inst->frames.list, list) {
+		for (i = 0; i < frame->nr; i++) {
+			smem = frame->bufs[i].smem;
+			if (smem && smem->dma_buf == dma_buf) {
+				atomic_inc(&smem->refcount);
+				mutex_unlock(&inst->frames.lock);
+				print_smem(CVP_MEM, "found in frame",
+					inst, smem);
+				return smem;
+			}
+		}
+	}
+	mutex_unlock(&inst->frames.lock);
+
+	return NULL;
+}
+
+static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
+				struct msm_cvp_smem *smem)
+{
+	unsigned int i;
+	struct msm_cvp_smem *smem2;
+
+	mutex_lock(&inst->dma_cache.lock);
+	if (inst->dma_cache.nr < MAX_DMABUF_NUMS) {
+		inst->dma_cache.entries[inst->dma_cache.nr] = smem;
+		SET_USE_BITMAP(inst->dma_cache.nr, inst);
+		smem->bitmap_index = inst->dma_cache.nr;
+		inst->dma_cache.nr++;
+		i = smem->bitmap_index;
+	} else {
+		i = find_first_zero_bit(&inst->dma_cache.usage_bitmap,
+				MAX_DMABUF_NUMS);
+		if (i < MAX_DMABUF_NUMS) {
+			smem2 = inst->dma_cache.entries[i];
+			msm_cvp_unmap_smem(inst, smem2, "unmap cpu");
+			msm_cvp_smem_put_dma_buf(smem2->dma_buf);
+			cvp_kmem_cache_free(&cvp_driver->smem_cache, smem2);
+
+			inst->dma_cache.entries[i] = smem;
+			smem->bitmap_index = i;
+			SET_USE_BITMAP(i, inst);
+		} else {
+			dprintk(CVP_WARN,
+			"%s: reached limit, fallback to buf mapping list\n"
+			, __func__);
+			atomic_inc(&smem->refcount);
+			mutex_unlock(&inst->dma_cache.lock);
+			return -ENOMEM;
+		}
+	}
+
+	atomic_inc(&smem->refcount);
+	mutex_unlock(&inst->dma_cache.lock);
+	dprintk(CVP_MEM, "Add entry %d into cache\n", i);
+
+	return 0;
+}
+
+static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
+						struct cvp_buf_type *buf,
+						bool is_persist,
+						u32 pkt_type)
+{
+	int rc = 0, found = 1;
+	struct msm_cvp_smem *smem = NULL;
+	struct dma_buf *dma_buf = NULL;
+
+	if (buf->fd < 0) {
+		dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
+		return NULL;
+	}
+
+	dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
+	if (!dma_buf) {
+		dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
+		return NULL;
+	}
+
+	if (is_persist) {
+		smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
+		if (!smem)
+			return NULL;
+
+		smem->dma_buf = dma_buf;
+		smem->bitmap_index = MAX_DMABUF_NUMS;
+		smem->pkt_type = pkt_type;
+		smem->flags |= SMEM_PERSIST;
+		smem->fd = buf->fd;
+		atomic_inc(&smem->refcount);
+		rc = msm_cvp_map_smem(inst, smem, "map cpu");
+		if (rc)
+			goto exit;
+		if (!IS_CVP_BUF_VALID(buf, smem)) {
+			dprintk(CVP_ERR,
+				"%s: invalid offset %d or size %d persist\n",
+				__func__, buf->offset, buf->size);
+			goto exit2;
+		}
+		return smem;
+	}
+
+	smem = msm_cvp_session_find_smem(inst, dma_buf, pkt_type);
+	if (!smem) {
+		found = 0;
+		smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
+		if (!smem)
+			return NULL;
+
+		smem->dma_buf = dma_buf;
+		smem->bitmap_index = MAX_DMABUF_NUMS;
+		smem->pkt_type = pkt_type;
+		smem->fd = buf->fd;
+		if (is_params_pkt(pkt_type))
+			smem->flags |= SMEM_PERSIST;
+		rc = msm_cvp_map_smem(inst, smem, "map cpu");
+		if (rc)
+			goto exit;
+		if (!IS_CVP_BUF_VALID(buf, smem)) {
+			dprintk(CVP_ERR,
+				"%s: invalid buf %d %d fd %d dma 0x%llx %s %d type %#x\n",
+				__func__, buf->offset, buf->size, buf->fd,
+				dma_buf, dma_buf->name, dma_buf->size, pkt_type);
+			goto exit2;
+		}
+		rc = msm_cvp_session_add_smem(inst, smem);
+		if (rc && rc != -ENOMEM)
+			goto exit2;
+		return smem;
+	}
+
+	if (!IS_CVP_BUF_VALID(buf, smem)) {
+		dprintk(CVP_ERR, "%s: invalid offset %d or size %d found\n",
+			__func__, buf->offset, buf->size);
+		if (found) {
+			mutex_lock(&inst->dma_cache.lock);
+			atomic_dec(&smem->refcount);
+			mutex_unlock(&inst->dma_cache.lock);
+			return NULL;
+		}
+		goto exit2;
+	}
+
+	return smem;
+
+exit2:
+	msm_cvp_unmap_smem(inst, smem, "unmap cpu");
+exit:
+	msm_cvp_smem_put_dma_buf(dma_buf);
+	cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+	smem = NULL;
+	return smem;
+}
+
+static int msm_cvp_unmap_user_persist_buf(struct msm_cvp_inst *inst,
+				struct cvp_buf_type *buf,
+				u32 pkt_type, u32 buf_idx, u32 *iova)
+{
+	struct msm_cvp_smem *smem = NULL;
+        struct list_head *ptr;
+        struct list_head *next;
+        struct cvp_internal_buf *pbuf;
+        struct dma_buf *dma_buf;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
+	if (!dma_buf)
+		return -EINVAL;
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+		if (!ptr) {
+			mutex_unlock(&inst->persistbufs.lock);
+			return -EINVAL;
+		}
+		pbuf = list_entry(ptr, struct cvp_internal_buf, list);
+		if (dma_buf == pbuf->smem->dma_buf && (pbuf->smem->flags & SMEM_PERSIST)) {
+			*iova = pbuf->smem->device_addr;
+			dprintk(CVP_MEM,
+				"Unmap persist fd %d, dma_buf %#llx iova %#x\n",
+				pbuf->fd, pbuf->smem->dma_buf, *iova);
+			list_del(&pbuf->list);
+			if (*iova) {
+				msm_cvp_unmap_smem(inst, pbuf->smem, "unmap user persist");
+				msm_cvp_smem_put_dma_buf(pbuf->smem->dma_buf);
+				pbuf->smem->device_addr = 0;
+			}
+			cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+			pbuf->smem = NULL;
+			cvp_kmem_cache_free(&cvp_driver->buf_cache, pbuf);
+			mutex_unlock(&inst->persistbufs.lock);
+			dma_buf_put(dma_buf);
+			return 0;
+		}
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+	dma_buf_put(dma_buf);
+
+	return -EINVAL;
+}
+
+static int msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
+				struct cvp_buf_type *buf,
+				u32 pkt_type, u32 buf_idx, u32 *iova)
+{
+	struct msm_cvp_smem *smem = NULL;
+	struct list_head *ptr;
+	struct list_head *next;
+	struct cvp_internal_buf *pbuf;
+	struct dma_buf *dma_buf;
+	int ret;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
+	if (!dma_buf)
+		return -EINVAL;
+
+	mutex_lock(&inst->persistbufs.lock);
+	if (!inst->persistbufs.list.next) {
+		mutex_unlock(&inst->persistbufs.lock);
+		return -EINVAL;
+	}
+	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+		if (!ptr)
+			return -EINVAL;
+		pbuf = list_entry(ptr, struct cvp_internal_buf, list);
+		if (dma_buf == pbuf->smem->dma_buf) {
+			pbuf->size =
+				(pbuf->size >= buf->size) ?
+				pbuf->size : buf->size;
+			*iova = pbuf->smem->device_addr + buf->offset;
+			mutex_unlock(&inst->persistbufs.lock);
+			atomic_inc(&pbuf->smem->refcount);
+			dma_buf_put(dma_buf);
+			dprintk(CVP_MEM,
+				"map persist Reuse fd %d, dma_buf %#llx\n",
+				pbuf->fd, pbuf->smem->dma_buf);
+			return 0;
+		}
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+
+	dma_buf_put(dma_buf);
+
+	pbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
+	if (!pbuf) {
+		dprintk(CVP_ERR, "%s failed to allocate kmem obj\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	if (is_params_pkt(pkt_type))
+		smem = msm_cvp_session_get_smem(inst, buf, false, pkt_type);
+	else
+		smem = msm_cvp_session_get_smem(inst, buf, true, pkt_type);
+
+	if (!smem) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	smem->pkt_type = pkt_type;
+	smem->buf_idx = buf_idx;
+	smem->fd = buf->fd;
+	pbuf->smem = smem;
+	pbuf->fd = buf->fd;
+	pbuf->size = buf->size;
+	pbuf->offset = buf->offset;
+	pbuf->ownership = CLIENT;
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_add_tail(&pbuf->list, &inst->persistbufs.list);
+	mutex_unlock(&inst->persistbufs.lock);
+
+	print_internal_buffer(CVP_MEM, "map persist", inst, pbuf);
+
+	*iova = smem->device_addr + buf->offset;
+
+	return 0;
+
+exit:
+	cvp_kmem_cache_free(&cvp_driver->buf_cache, pbuf);
+	return ret;
+}
+
+static u32 msm_cvp_map_frame_buf(struct msm_cvp_inst *inst,
+			struct cvp_buf_type *buf,
+			struct msm_cvp_frame *frame,
+			u32 pkt_type, u32 buf_idx)
+{
+	u32 iova = 0;
+	struct msm_cvp_smem *smem = NULL;
+	u32 nr;
+	u32 type;
+
+	if (!inst || !frame) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return 0;
+	}
+
+	nr = frame->nr;
+	if (nr == MAX_FRAME_BUFFER_NUMS) {
+		dprintk(CVP_ERR, "%s: max frame buffer reached\n", __func__);
+		return 0;
+	}
+
+	smem = msm_cvp_session_get_smem(inst, buf, false, pkt_type);
+	if (!smem)
+		return 0;
+
+	smem->buf_idx = buf_idx;
+
+	frame->bufs[nr].fd = buf->fd;
+	frame->bufs[nr].smem = smem;
+	frame->bufs[nr].size = buf->size;
+	frame->bufs[nr].offset = buf->offset;
+
+	print_internal_buffer(CVP_MEM, "map cpu", inst, &frame->bufs[nr]);
+
+	frame->nr++;
+
+	type = EVA_KMD_BUFTYPE_INPUT | EVA_KMD_BUFTYPE_OUTPUT;
+	msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
+
+	iova = smem->device_addr + buf->offset;
+
+	return iova;
+}
+
+static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
+			struct msm_cvp_frame *frame)
+{
+	u32 i;
+	u32 type;
+	struct msm_cvp_smem *smem = NULL;
+	struct cvp_internal_buf *buf;
+
+	type = EVA_KMD_BUFTYPE_OUTPUT;
+
+	for (i = 0; i < frame->nr; ++i) {
+		buf = &frame->bufs[i];
+		smem = buf->smem;
+		msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
+
+		if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
+			/* smem not in dmamap cache */
+			if (atomic_dec_and_test(&smem->refcount)) {
+				msm_cvp_unmap_smem(inst, smem, "unmap cpu");
+				dma_heap_buffer_free(smem->dma_buf);
+				smem->buf_idx |= 0xdead0000;
+				cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+				buf->smem = NULL;
+			}
+		} else {
+			mutex_lock(&inst->dma_cache.lock);
+			if (atomic_dec_and_test(&smem->refcount)) {
+				CLEAR_USE_BITMAP(smem->bitmap_index, inst);
+				print_smem(CVP_MEM, "Map dereference",
+					inst, smem);
+				smem->buf_idx |= 0x10000000;
+			}
+			mutex_unlock(&inst->dma_cache.lock);
+		}
+	}
+
+	cvp_kmem_cache_free(&cvp_driver->frame_cache, frame);
+}
+
+static void backup_frame_buffers(struct msm_cvp_inst *inst,
+			struct msm_cvp_frame *frame)
+{
+	/* Save frame buffers before unmap them */
+	int i = frame->nr;
+
+	if (i == 0 || i > MAX_FRAME_BUFFER_NUMS)
+		return;
+
+	inst->last_frame.ktid = frame->ktid;
+	inst->last_frame.nr = frame->nr;
+
+	do {
+		i--;
+		if (frame->bufs[i].smem->bitmap_index < MAX_DMABUF_NUMS) {
+			/*
+			 * Frame buffer info can be found in dma_cache table,
+			 * Skip saving
+			 */
+			inst->last_frame.nr = 0;
+			return;
+		}
+
+		inst->last_frame.smem[i] = *(frame->bufs[i].smem);
+	} while (i);
+}
+
+void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid)
+{
+	struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef, *dummy1;
+	bool found;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return;
+	}
+
+	ktid &= (FENCE_BIT - 1);
+	dprintk(CVP_MEM, "%s: (%#x) unmap frame %llu\n",
+			__func__, hash32_ptr(inst->session), ktid);
+
+	found = false;
+	mutex_lock(&inst->frames.lock);
+	list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
+		if (frame->ktid == ktid) {
+			found = true;
+			list_del(&frame->list);
+			dprintk(CVP_CMD, "%s: "
+				"pkt_type %08x sess_id %08x trans_id <> ktid %llu\n",
+				__func__, frame->pkt_type,
+				hash32_ptr(inst->session),
+				frame->ktid);
+			/* Save the previous frame mappings for debug */
+			backup_frame_buffers(inst, frame);
+			msm_cvp_unmap_frame_buf(inst, frame);
+			break;
+		}
+	}
+	mutex_unlock(&inst->frames.lock);
+
+	if (!found)
+		dprintk(CVP_WARN, "%s frame %llu not found!\n", __func__, ktid);
+}
+
+/*
+ * Unmap persistent buffer before sending RELEASE_PERSIST_BUFFERS to FW
+ * This packet is sent after SESSION_STOP. The assumption is FW/HW will
+ * NOT access any of the 3 persist buffer.
+ */
+int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
+			struct eva_kmd_hfi_packet *in_pkt,
+			unsigned int offset, unsigned int buf_num)
+{
+	struct cvp_buf_type *buf;
+	struct cvp_hfi_cmd_session_hdr *cmd_hdr;
+	int i, ret;
+	u32 iova;
+
+	if (!offset || !buf_num)
+		return 0;
+
+	if (offset < (sizeof(struct cvp_hfi_cmd_session_hdr)/sizeof(u32))) {
+		dprintk(CVP_ERR, "%s: Incorrect offset in cmd %d\n", __func__, offset);
+		return -EINVAL;
+	}
+	cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	for (i = 0; i < buf_num; i++) {
+		buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
+		offset += sizeof(*buf) >> 2;
+
+		if (buf->fd < 0 || !buf->size)
+			continue;
+
+		ret = msm_cvp_unmap_user_persist_buf(inst, buf,
+				cmd_hdr->packet_type, i, &iova);
+		if (ret) {
+			dprintk(CVP_ERR,
+				"%s: buf %d unmap failed.\n",
+				__func__, i);
+
+			return ret;
+		}
+		buf->fd = iova;
+	}
+	return 0;
+}
+
+int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
+			struct eva_kmd_hfi_packet *in_pkt,
+			unsigned int offset, unsigned int buf_num)
+{
+	struct cvp_buf_type *buf;
+	struct cvp_hfi_cmd_session_hdr *cmd_hdr;
+	int i, ret;
+	u32 iova;
+
+	if (!offset || !buf_num)
+		return 0;
+
+	if (offset < (sizeof(struct cvp_hfi_cmd_session_hdr)/sizeof(u32))) {
+		dprintk(CVP_ERR, "%s: Incorrect offset in cmd %d\n", __func__, offset);
+		return -EINVAL;
+	}
+	cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	for (i = 0; i < buf_num; i++) {
+		buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
+		offset += sizeof(*buf) >> 2;
+
+		if (buf->fd < 0 || !buf->size)
+			continue;
+
+		ret = msm_cvp_map_user_persist_buf(inst, buf,
+				cmd_hdr->packet_type, i, &iova);
+		if (ret) {
+			dprintk(CVP_ERR,
+				"%s: buf %d map failed.\n",
+				__func__, i);
+
+			return ret;
+		}
+		buf->fd = iova;
+	}
+	return 0;
+}
+
+int msm_cvp_map_frame(struct msm_cvp_inst *inst,
+		struct eva_kmd_hfi_packet *in_pkt,
+		unsigned int offset, unsigned int buf_num)
+{
+	struct cvp_buf_type *buf;
+	int i;
+	u32 iova;
+	u64 ktid;
+	struct msm_cvp_frame *frame;
+	struct cvp_hfi_cmd_session_hdr *cmd_hdr;
+	struct msm_cvp_inst *instance = (struct  msm_cvp_inst *)0xdeadbeef;
+	struct msm_cvp_core *core = NULL;
+
+	core = cvp_driver->cvp_core;
+	if (!core)
+		return -EINVAL;
+
+	if (!offset || !buf_num)
+		return 0;
+
+	if (offset < (sizeof(struct cvp_hfi_cmd_session_hdr)/sizeof(u32))) {
+		dprintk(CVP_ERR, "%s: Incorrect offset in cmd %d\n", __func__, offset);
+		return -EINVAL;
+	}
+	cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+	ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
+	ktid &= (FENCE_BIT - 1);
+	cmd_hdr->client_data.kdata = ktid;
+
+	dprintk(CVP_CMD, "%s:   "
+		"pkt_type %08x sess_id %08x trans_id %u ktid %llu\n",
+		__func__, cmd_hdr->packet_type,
+		cmd_hdr->session_id,
+		cmd_hdr->client_data.transaction_id,
+		cmd_hdr->client_data.kdata & (FENCE_BIT - 1));
+
+	frame = cvp_kmem_cache_zalloc(&cvp_driver->frame_cache, GFP_KERNEL);
+	if (!frame)
+		return -ENOMEM;
+
+	frame->ktid = ktid;
+	frame->nr = 0;
+	frame->pkt_type = cmd_hdr->packet_type;
+
+	for (i = 0; i < buf_num; i++) {
+		buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
+		offset += sizeof(*buf) >> 2;
+
+		if (buf->fd < 0 || !buf->size) {
+			buf->fd = 0;
+			buf->size = 0;
+			continue;
+		}
+
+		iova = msm_cvp_map_frame_buf(inst, buf, frame, cmd_hdr->packet_type, i);
+		if (!iova) {
+			dprintk(CVP_ERR,
+				"%s: buf %d register failed.\n",
+				__func__, i);
+			dprintk(CVP_ERR, "smem_leak_count %d\n", core->smem_leak_count);
+			mutex_lock(&core->lock);
+			list_for_each_entry(instance, &core->instances, list) {
+				msm_cvp_print_inst_bufs(instance, false);
+			}
+			mutex_unlock(&core->lock);
+			msm_cvp_unmap_frame_buf(inst, frame);
+			return -EINVAL;
+		}
+		buf->fd = iova;
+	}
+
+	mutex_lock(&inst->frames.lock);
+	list_add_tail(&frame->list, &inst->frames.list);
+	mutex_unlock(&inst->frames.lock);
+	dprintk(CVP_MEM, "%s: map frame %llu\n", __func__, ktid);
+
+	return 0;
+}
+
+int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
+{
+	int rc = 0, i;
+	struct cvp_internal_buf *cbuf, *dummy;
+	struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef, *dummy1;
+	struct msm_cvp_smem *smem;
+	struct cvp_hal_session *session;
+	struct eva_kmd_buffer buf;
+	struct list_head *ptr = (struct list_head *)0xdead;
+	struct list_head *next = (struct list_head *)0xdead;
+
+	session = (struct cvp_hal_session *)inst->session;
+
+	mutex_lock(&inst->frames.lock);
+	list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
+		list_del(&frame->list);
+		msm_cvp_unmap_frame_buf(inst, frame);
+	}
+	mutex_unlock(&inst->frames.lock);
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+		if (!ptr)
+			return -EINVAL;
+		cbuf = list_entry(ptr, struct cvp_internal_buf, list);
+		smem = cbuf->smem;
+		if (!smem) {
+			dprintk(CVP_ERR, "%s invalid persist smem\n", __func__);
+			mutex_unlock(&inst->persistbufs.lock);
+			return -EINVAL;
+		}
+		if (cbuf->ownership != DRIVER) {
+			dprintk(CVP_MEM,
+			"%s: %x : fd %d %pK size %d",
+			"free user persistent", hash32_ptr(inst->session), cbuf->fd,
+			smem->dma_buf, cbuf->size);
+			list_del(&cbuf->list);
+			if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
+				/*
+				 * don't care refcount, has to remove mapping
+				 * this is user persistent buffer
+				 */
+				if (smem->device_addr) {
+					msm_cvp_unmap_smem(inst, smem,
+						"unmap persist");
+					msm_cvp_smem_put_dma_buf(
+						cbuf->smem->dma_buf);
+					smem->device_addr = 0;
+				}
+				cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+				cbuf->smem = NULL;
+				cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
+			} else {
+				/*
+				 * DMM_PARAMS and WAP_NCC_PARAMS cases
+				 * Leave dma_cache cleanup to unmap
+				 */
+				cbuf->smem = NULL;
+				cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
+			}
+		}
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+
+	mutex_lock(&inst->dma_cache.lock);
+	for (i = 0; i < inst->dma_cache.nr; i++) {
+		smem = inst->dma_cache.entries[i];
+		if (atomic_read(&smem->refcount) == 0) {
+			print_smem(CVP_MEM, "free", inst, smem);
+		} else if (!(smem->flags & SMEM_PERSIST)) {
+			print_smem(CVP_WARN, "in use", inst, smem);
+		}
+		msm_cvp_unmap_smem(inst, smem, "unmap cpu");
+		msm_cvp_smem_put_dma_buf(smem->dma_buf);
+		cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+		inst->dma_cache.entries[i] = NULL;
+	}
+	mutex_unlock(&inst->dma_cache.lock);
+
+	cbuf = (struct cvp_internal_buf *)0xdeadbeef;
+	mutex_lock(&inst->cvpdspbufs.lock);
+	list_for_each_entry_safe(cbuf, dummy, &inst->cvpdspbufs.list, list) {
+		print_internal_buffer(CVP_MEM, "remove dspbufs", inst, cbuf);
+		if (cbuf->ownership == CLIENT) {
+			msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
+			msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
+		} else if (cbuf->ownership == DSP) {
+			rc = cvp_dsp_fastrpc_unmap(inst->dsp_handle, cbuf);
+			if (rc)
+				dprintk(CVP_ERR,
+				"%s: failed to unmap buf from DSP\n",
+				__func__);
+
+			rc = cvp_release_dsp_buffers(inst, cbuf);
+			if (rc)
+				dprintk(CVP_ERR,
+					"%s Fail to free buffer 0x%x\n",
+					__func__, rc);
+		}
+		list_del(&cbuf->list);
+		cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
+	}
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	mutex_lock(&inst->cvpwnccbufs.lock);
+	if (inst->cvpwnccbufs_num != 0)
+		dprintk(CVP_WARN, "%s: cvpwnccbufs not empty, contains %d bufs",
+			__func__, inst->cvpwnccbufs_num);
+	list_for_each_entry_safe(cbuf, dummy, &inst->cvpwnccbufs.list, list) {
+		print_internal_buffer(CVP_MEM, "remove wnccbufs", inst, cbuf);
+		buf.fd = cbuf->fd;
+		buf.reserved[0] = cbuf->ktid;
+
+		mutex_unlock(&inst->cvpwnccbufs.lock);
+		msm_cvp_unmap_buf_wncc(inst, &buf);
+		mutex_lock(&inst->cvpwnccbufs.lock);
+	}
+	mutex_unlock(&inst->cvpwnccbufs.lock);
+
+	return rc;
+}
+
+void msm_cvp_populate_dsp_buf_info(struct cvp_internal_buf *buf,
+								struct cvp_hal_session *session,
+								u32 session_id,
+								struct msm_cvp_core *core)
+{
+	struct cvp_hfi_ops *dev_ops = (struct cvp_hfi_ops *) core->dev_ops;
+	struct iris_hfi_device *cvp_device = (struct iris_hfi_device *) dev_ops->hfi_device_data;
+	struct cvp_iface_q_info dsp_debugQ_info = cvp_device->dsp_iface_queues[DEBUG_Q];
+	struct cvp_dsp_trace_buf *trace_buf;
+	struct cvp_dsp_trace *dsp_debug_trace;
+
+	dsp_debug_trace = (struct cvp_dsp_trace *) dsp_debugQ_info.q_array.align_virtual_addr;
+
+	if (!dsp_debug_trace) {
+		dprintk(CVP_ERR, "dsp trace is NULL\n");
+		return;
+	}
+	for (int session_idx = 0; session_idx < EVA_TRACE_MAX_SESSION_NUM; session_idx++) {
+		if (dsp_debug_trace->sessions[session_idx].session_id == session_id) {
+			u32 buf_cnt = dsp_debug_trace->sessions[session_idx].buf_cnt;
+
+			for (int buf_idx = 0; buf_idx < buf_cnt; buf_idx++) {
+				trace_buf = &dsp_debug_trace->sessions[session_idx].buf[buf_idx];
+				if (buf->smem->device_addr == trace_buf->iova) {
+					buf->smem->buf_idx = trace_buf->buf_idx;
+					buf->smem->pkt_type = trace_buf->pkt_type;
+					buf->smem->fd = trace_buf->fd;
+					return;
+				}
+			}
+		}
+	}
+}
+
+#define MAX_NUM_FRAMES_DUMP 4
+void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log)
+{
+	struct cvp_internal_buf *buf = (struct cvp_internal_buf *)0xdeadbeef;
+	struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef;
+	struct msm_cvp_core *core;
+	struct inst_snapshot *snap = NULL;
+	int i = 0, c = 0;
+
+	// DSP trace related variables
+	struct cvp_hal_session *session;
+	u32 session_id;
+
+	session = (struct cvp_hal_session *)inst->session;
+	session_id = hash32_ptr(session);
+
+	core = cvp_driver->cvp_core;
+	if (log && core->log.snapshot_index < 16) {
+		snap = &core->log.snapshot[core->log.snapshot_index];
+		snap->session = inst->session;
+		core->log.snapshot_index++;
+	}
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s - invalid param %pK\n",
+			__func__, inst);
+		return;
+	}
+
+	dprintk(CVP_ERR,
+			"---Buffer details for inst: %pK %s of type: %d---\n",
+			inst, inst->proc_name, inst->session_type);
+
+	dprintk(CVP_ERR, "dma_cache entries %d\n", inst->dma_cache.nr);
+	mutex_lock(&inst->dma_cache.lock);
+	if (inst->dma_cache.nr <= MAX_DMABUF_NUMS)
+		for (i = 0; i < inst->dma_cache.nr; i++)
+			_log_smem(snap, inst, inst->dma_cache.entries[i], log);
+	mutex_unlock(&inst->dma_cache.lock);
+
+	i = 0;
+	dprintk(CVP_ERR, "frame buffer list\n");
+	mutex_lock(&inst->frames.lock);
+	list_for_each_entry(frame, &inst->frames.list, list) {
+		i++;
+		if (i <= MAX_NUM_FRAMES_DUMP) {
+			dprintk(CVP_ERR, "frame no %d tid %llx bufs\n",
+					i, frame->ktid);
+			for (c = 0; c < frame->nr; c++)
+				_log_smem(snap, inst, frame->bufs[c].smem,
+						log);
+		}
+	}
+	if (i > MAX_NUM_FRAMES_DUMP)
+		dprintk(CVP_ERR, "Skipped %d frames' buffers\n",
+				(i - MAX_NUM_FRAMES_DUMP));
+	mutex_unlock(&inst->frames.lock);
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+
+	dprintk(CVP_ERR, "dsp buffer list:\n");
+	list_for_each_entry(buf, &inst->cvpdspbufs.list, list) {
+		// Populate DSP buffer info from debug queue to kernel instance
+		msm_cvp_populate_dsp_buf_info(buf, session, session_id, core);
+		// Log print buffer info
+		_log_buf(snap, SMEM_CDSP, inst, buf, log);
+	}
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	mutex_lock(&inst->cvpwnccbufs.lock);
+	dprintk(CVP_ERR, "wncc buffer list:\n");
+	list_for_each_entry(buf, &inst->cvpwnccbufs.list, list)
+		print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
+	mutex_unlock(&inst->cvpwnccbufs.lock);
+
+	mutex_lock(&inst->persistbufs.lock);
+	dprintk(CVP_ERR, "persist buffer list:\n");
+	list_for_each_entry(buf, &inst->persistbufs.list, list)
+		_log_buf(snap, SMEM_PERSIST, inst, buf, log);
+	mutex_unlock(&inst->persistbufs.lock);
+
+	dprintk(CVP_ERR, "last frame ktid %llx\n", inst->last_frame.ktid);
+	for (i = 0; i < inst->last_frame.nr; i++)
+		_log_smem(snap, inst, &inst->last_frame.smem[i], log);
+
+	dprintk(CVP_ERR, "unmapped wncc bufs\n");
+	for (i = 0; i < inst->unused_wncc_bufs.nr; i++)
+		_log_smem(snap, inst, &inst->unused_wncc_bufs.smem[i], log);
+
+	dprintk(CVP_ERR, "unmapped dsp bufs\n");
+	for (i = 0; i < inst->unused_dsp_bufs.nr; i++)
+		_log_smem(snap, inst, &inst->unused_dsp_bufs.smem[i], log);
+}
+
+struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
+			u32 buffer_size)
+{
+	struct cvp_internal_buf *buf;
+	struct msm_cvp_list *buf_list;
+	u32 smem_flags = SMEM_UNCACHED;
+	int rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s Invalid input\n", __func__);
+		return NULL;
+	}
+
+	buf_list = &inst->persistbufs;
+
+	if (!buffer_size)
+		return NULL;
+
+	/* If PERSIST buffer requires secure mapping, uncomment
+	 * below flags setting
+	 * smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
+	 */
+
+	buf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
+	if (!buf) {
+		dprintk(CVP_ERR, "%s Out of memory\n", __func__);
+		goto fail_kzalloc;
+	}
+
+	buf->smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
+	if (!buf->smem) {
+		dprintk(CVP_ERR, "%s Out of memory\n", __func__);
+		goto err_no_smem;
+	}
+
+	buf->smem->flags = smem_flags;
+	rc = msm_cvp_smem_alloc(buffer_size, 1, 0, /* 0: no mapping in kernel space */
+		&(inst->core->resources), buf->smem);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
+		goto err_no_mem;
+	}
+
+	buf->smem->pkt_type = buf->smem->buf_idx = 0;
+	atomic_inc(&buf->smem->refcount);
+	buf->size = buf->smem->size;
+	buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
+	buf->ownership = DRIVER;
+
+	mutex_lock(&buf_list->lock);
+	list_add_tail(&buf->list, &buf_list->list);
+	mutex_unlock(&buf_list->lock);
+	return buf;
+
+err_no_mem:
+	cvp_kmem_cache_free(&cvp_driver->smem_cache, buf->smem);
+err_no_smem:
+	cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
+fail_kzalloc:
+	return NULL;
+}
+
+int cvp_release_arp_buffers(struct msm_cvp_inst *inst)
+{
+	struct msm_cvp_smem *smem;
+	struct list_head *ptr = (struct list_head *)0xdead;
+	struct list_head *next = (struct list_head *)0xdead;
+	struct cvp_internal_buf *buf;
+	int rc = 0;
+	struct msm_cvp_core *core;
+	struct cvp_hfi_ops *ops_tbl;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	if (!core) {
+		dprintk(CVP_ERR, "Invalid core pointer = %pK\n", core);
+		return -EINVAL;
+	}
+	ops_tbl = core->dev_ops;
+	if (!ops_tbl) {
+		dprintk(CVP_ERR, "Invalid device pointer = %pK\n", ops_tbl);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_MEM, "release persist buffer!\n");
+
+	mutex_lock(&inst->persistbufs.lock);
+	/* Workaround for FW: release buffer means release all */
+	if (inst->state > MSM_CVP_CORE_INIT_DONE && inst->state <= MSM_CVP_CLOSE_DONE) {
+		rc = call_hfi_op(ops_tbl, session_release_buffers,
+				(void *)inst->session);
+		if (!rc) {
+			mutex_unlock(&inst->persistbufs.lock);
+			rc = wait_for_sess_signal_receipt(inst,
+				HAL_SESSION_RELEASE_BUFFER_DONE);
+			if (rc)
+				dprintk(CVP_WARN,
+				"%s: wait release_arp signal failed, rc %d\n",
+				__func__, rc);
+			mutex_lock(&inst->persistbufs.lock);
+		} else {
+			dprintk_rl(CVP_WARN, "Fail to send Rel prst buf\n");
+		}
+	}
+
+	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+		if (!ptr)
+			return -EINVAL;
+		buf = list_entry(ptr, struct cvp_internal_buf, list);
+		smem = buf->smem;
+		if (!smem) {
+			dprintk(CVP_ERR, "%s invalid smem\n", __func__);
+			mutex_unlock(&inst->persistbufs.lock);
+			return -EINVAL;
+		}
+
+		if (buf->ownership == DRIVER) {
+			dprintk(CVP_MEM,
+			"%s: %x : fd %d %pK size %d",
+			"free arp", hash32_ptr(inst->session), buf->fd,
+			smem->dma_buf, buf->size);
+			list_del(&buf->list);
+			atomic_dec(&smem->refcount);
+			msm_cvp_smem_free(smem);
+			cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+			buf->smem = NULL;
+			cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
+		}
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+	return rc;
+}
+
+int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst,
+			struct cvp_internal_buf *buf,
+			u32 buffer_size,
+			u32 secure_type)
+{
+	u32 smem_flags = SMEM_UNCACHED;
+	int rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!buf)
+		return -EINVAL;
+
+	if (!buffer_size)
+		return -EINVAL;
+
+	switch (secure_type) {
+	case 0:
+		break;
+	case 1:
+		smem_flags |= SMEM_SECURE | SMEM_PIXEL;
+		break;
+	case 2:
+		smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
+		break;
+	default:
+		dprintk(CVP_ERR, "%s Invalid secure_type %d\n",
+			__func__, secure_type);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_MEM, "%s smem_flags 0x%x\n", __func__, smem_flags);
+	buf->smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
+	if (!buf->smem) {
+		dprintk(CVP_ERR, "%s Out of memory\n", __func__);
+		goto fail_kzalloc_smem_cache;
+	}
+
+	buf->smem->flags = smem_flags;
+	rc = msm_cvp_smem_alloc(buffer_size, 1, 0,
+			&(inst->core->resources), buf->smem);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to allocate DSP buf\n");
+		goto err_no_mem;
+	}
+	buf->smem->pkt_type = buf->smem->buf_idx = 0;
+	atomic_inc(&buf->smem->refcount);
+
+	dprintk(CVP_MEM, "%s dma_buf %pK\n", __func__, buf->smem->dma_buf);
+
+	buf->size = buf->smem->size;
+	buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
+	buf->ownership = DSP;
+
+	return rc;
+
+err_no_mem:
+	cvp_kmem_cache_free(&cvp_driver->smem_cache, buf->smem);
+fail_kzalloc_smem_cache:
+	return rc;
+}
+
+int cvp_release_dsp_buffers(struct msm_cvp_inst *inst,
+			struct cvp_internal_buf *buf)
+{
+	struct msm_cvp_smem *smem;
+	int rc = 0;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+
+	if (!buf) {
+		dprintk(CVP_ERR, "Invalid buffer pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+
+	smem = buf->smem;
+	if (!smem) {
+		dprintk(CVP_ERR, "%s invalid smem\n", __func__);
+		return -EINVAL;
+	}
+
+	if (buf->ownership == DSP) {
+		dprintk(CVP_MEM,
+			"%s: %x : fd %x %s size %d",
+			__func__, hash32_ptr(inst->session), buf->fd,
+			smem->dma_buf->name, buf->size);
+		if (atomic_dec_and_test(&smem->refcount)) {
+			msm_cvp_smem_free(smem);
+			cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+		}
+	} else {
+		dprintk(CVP_ERR,
+			"%s: wrong owner %d %x : fd %x %s size %d",
+			__func__, buf->ownership, hash32_ptr(inst->session),
+			buf->fd, smem->dma_buf->name, buf->size);
+	}
+
+	return rc;
+}
+
+int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
+		struct eva_kmd_buffer *buf)
+{
+	struct cvp_hfi_ops *ops_tbl;
+	struct cvp_hal_session *session;
+	struct msm_cvp_inst *s;
+	int rc = 0;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	session = (struct cvp_hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+	ops_tbl = inst->core->dev_ops;
+	print_client_buffer(CVP_HFI, "register", inst, buf);
+
+	if (buf->index)
+		rc = msm_cvp_map_buf_dsp(inst, buf);
+	else
+		rc = msm_cvp_map_buf_wncc(inst, buf);
+	dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__,
+			buf->fd, buf->reserved[0]);
+exit:
+	cvp_put_inst(s);
+	return rc;
+}
+
+int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
+		struct eva_kmd_buffer *buf)
+{
+	struct msm_cvp_inst *s;
+	int rc = 0;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	print_client_buffer(CVP_HFI, "unregister", inst, buf);
+
+	if (buf->index)
+		rc = msm_cvp_unmap_buf_dsp(inst, buf);
+	else
+		rc = msm_cvp_unmap_buf_wncc(inst, buf);
+	cvp_put_inst(s);
+	return rc;
+}

+ 244 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_buf.h

@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _MSM_CVP_BUF_H_
+#define _MSM_CVP_BUF_H_
+
+#include <linux/poll.h>
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/refcount.h>
+#include <media/msm_eva_private.h>
+#include "cvp_comm_def.h"
+
+#define MAX_FRAME_BUFFER_NUMS 40
+#define MAX_DMABUF_NUMS 64
+#define IS_CVP_BUF_VALID(buf, smem) \
+	((buf->size <= smem->size) && \
+	(buf->size <= smem->size - buf->offset))
+
+struct msm_cvp_inst;
+struct msm_cvp_platform_resources;
+struct msm_cvp_list;
+
+enum smem_cache_ops {
+	SMEM_CACHE_CLEAN,
+	SMEM_CACHE_INVALIDATE,
+	SMEM_CACHE_CLEAN_INVALIDATE,
+};
+
+enum smem_prop {
+	SMEM_UNCACHED = 0x1,
+	SMEM_CACHED = 0x2,
+	SMEM_SECURE = 0x4,
+	SMEM_CDSP = 0x8,
+	SMEM_NON_PIXEL = 0x10,
+	SMEM_PIXEL = 0x20,
+	SMEM_CAMERA = 0x40,
+	SMEM_PERSIST = 0x100,
+};
+
+struct msm_cvp_list {
+	struct list_head list;
+	struct mutex lock;
+};
+
+static inline void INIT_MSM_CVP_LIST(struct msm_cvp_list *mlist)
+{
+	mutex_init(&mlist->lock);
+	INIT_LIST_HEAD(&mlist->list);
+}
+
+static inline void DEINIT_MSM_CVP_LIST(struct msm_cvp_list *mlist)
+{
+	mutex_destroy(&mlist->lock);
+}
+
+struct cvp_dma_mapping_info {
+	struct device *dev;
+	struct iommu_domain *domain;
+	struct sg_table *table;
+	struct dma_buf_attachment *attach;
+	struct dma_buf *buf;
+	void *cb_info;
+};
+
+struct msm_cvp_smem {
+	struct list_head list;
+	atomic_t refcount;
+	struct dma_buf *dma_buf;
+	void *kvaddr;
+	u32 device_addr;
+	dma_addr_t dma_handle;
+	u32 size;
+	u32 bitmap_index;
+	u32 flags;
+	u32 pkt_type;
+	u32 buf_idx;
+	u32 fd;
+	struct cvp_dma_mapping_info mapping_info;
+};
+
+struct msm_cvp_wncc_buffer {
+	u32 fd;
+	u32 iova;
+	u32 size;
+};
+
+struct cvp_dmamap_cache {
+	unsigned long usage_bitmap;
+	struct mutex lock;
+	struct msm_cvp_smem *entries[MAX_DMABUF_NUMS];
+	unsigned int nr;
+};
+
+static inline void INIT_DMAMAP_CACHE(struct cvp_dmamap_cache *cache)
+{
+	mutex_init(&cache->lock);
+	cache->usage_bitmap = 0;
+	cache->nr = 0;
+}
+
+static inline void DEINIT_DMAMAP_CACHE(struct cvp_dmamap_cache *cache)
+{
+	mutex_destroy(&cache->lock);
+	cache->usage_bitmap = 0;
+	cache->nr = 0;
+}
+
+#define INPUT_FENCE_BITMASK 0x1
+#define OUTPUT_FENCE_BITMASK 0x2
+
+/* Track source of dma_buf allocator/owner */
+enum buffer_owner {
+	DRIVER,		/* Allocated by KMD, for CPU driver */
+	CLIENT,		/* Allocated by Client (DSP or CPU) */
+	DSP,		/* Allocated by KMD, for DSP driver */
+	MAX_OWNER
+};
+
+struct cvp_internal_buf {
+	struct list_head list;
+	s32 fd;
+	u32 size;
+	u32 offset;
+	u32 type;
+	u32 index;
+	u64 ktid;
+	enum buffer_owner ownership;
+	struct msm_cvp_smem *smem;
+};
+
+struct msm_cvp_frame {
+	struct list_head list;
+	struct cvp_internal_buf bufs[MAX_FRAME_BUFFER_NUMS];
+	u32 nr;
+	u64 ktid;
+	u32 pkt_type;
+};
+
+struct cvp_frame_bufs {
+	u64 ktid;
+	u32 nr;
+	struct msm_cvp_smem smem[MAX_FRAME_BUFFER_NUMS];
+};
+
+struct wncc_oob_buf {
+	u32 bitmap_idx;
+	struct eva_kmd_oob_wncc *buf;
+};
+
+#define NUM_WNCC_BUFS 8
+struct cvp_oob_pool {
+	struct mutex lock;
+	bool allocated;
+	u32 used_bitmap;
+	struct eva_kmd_oob_wncc *bufs[NUM_WNCC_BUFS];
+};
+
+extern struct cvp_oob_pool wncc_buf_pool;
+
+void print_cvp_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst,
+		struct cvp_internal_buf *cbuf);
+void print_cvp_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst,
+		struct cvp_internal_buf *cbuf);
+void print_client_buffer(u32 tag, const char *str,
+		struct msm_cvp_inst *inst,
+		struct eva_kmd_buffer *cbuf);
+int print_smem(u32 tag, const char *str,
+		struct msm_cvp_inst *inst,
+		struct msm_cvp_smem *smem);
+
+/*Kernel DMA buffer and IOMMU mapping functions*/
+int msm_cvp_smem_alloc(size_t size, u32 align, int map_kernel,
+			void  *res, struct msm_cvp_smem *smem);
+int msm_cvp_smem_free(struct msm_cvp_smem *smem);
+struct context_bank_info *msm_cvp_smem_get_context_bank(
+				struct msm_cvp_platform_resources *res,
+				unsigned int flags);
+int msm_cvp_map_smem(struct msm_cvp_inst *inst,
+			struct msm_cvp_smem *smem,
+			const char *str);
+int msm_cvp_unmap_smem(struct msm_cvp_inst *inst,
+			struct msm_cvp_smem *smem,
+			const char *str);
+struct dma_buf *msm_cvp_smem_get_dma_buf(int fd);
+void msm_cvp_smem_put_dma_buf(void *dma_buf);
+int msm_cvp_smem_cache_operations(struct dma_buf *dbuf,
+				enum smem_cache_ops cache_op,
+				unsigned long offset,
+				unsigned long size);
+int msm_cvp_map_ipcc_regs(u32 *iova);
+int msm_cvp_unmap_ipcc_regs(u32 iova);
+
+/* CVP driver internal buffer management functions*/
+struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
+					u32 buffer_size);
+int cvp_release_arp_buffers(struct msm_cvp_inst *inst);
+int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
+			struct eva_kmd_buffer *buf);
+int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst,
+			struct eva_kmd_buffer *buf);
+int msm_cvp_map_buf_dsp_new(struct msm_cvp_inst *inst,
+			struct eva_kmd_buffer *buf,
+			int32_t pid,
+			uint32_t *iova);
+int msm_cvp_unmap_buf_dsp_new(struct msm_cvp_inst *inst,
+			struct eva_kmd_buffer *buf);
+int msm_cvp_map_buf_wncc(struct msm_cvp_inst* inst,
+			struct eva_kmd_buffer* buf);
+int msm_cvp_unmap_buf_wncc(struct msm_cvp_inst* inst,
+			struct eva_kmd_buffer* buf);
+int msm_cvp_proc_oob(struct msm_cvp_inst* inst,
+			struct eva_kmd_hfi_packet* in_pkt);
+void msm_cvp_cache_operations(struct msm_cvp_smem *smem,
+			u32 type, u32 offset, u32 size);
+int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
+			struct eva_kmd_hfi_packet *in_pkt,
+			unsigned int offset, unsigned int buf_num);
+int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
+			struct eva_kmd_hfi_packet *in_pkt,
+			unsigned int offset, unsigned int buf_num);
+int msm_cvp_map_frame(struct msm_cvp_inst *inst,
+		struct eva_kmd_hfi_packet *in_pkt,
+		unsigned int offset, unsigned int buf_num);
+void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid);
+int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
+		struct eva_kmd_buffer *buf);
+int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
+		struct eva_kmd_buffer *buf);
+int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst);
+void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log);
+int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst,
+			struct cvp_internal_buf *buf,
+			u32 buffer_size,
+			u32 secure_type);
+int cvp_release_dsp_buffers(struct msm_cvp_inst *inst,
+			struct cvp_internal_buf *buf);
+#endif

+ 494 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_clocks.c

@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "msm_cvp_common.h"
+#include "cvp_hfi_api.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_clocks.h"
+
+static bool __mmrm_client_check_scaling_supported(
+				struct mmrm_client_desc *client)
+{
+#ifdef CVP_MMRM_ENABLED
+	return mmrm_client_check_scaling_supported(
+				client->client_type,
+				client->client_info.desc.client_domain);
+#else
+	return false;
+#endif
+}
+
+static struct mmrm_client *__mmrm_client_register(
+				struct mmrm_client_desc *client)
+{
+#ifdef CVP_MMRM_ENABLED
+	return mmrm_client_register(client);
+#else
+	return NULL;
+#endif
+}
+
+static int __mmrm_client_deregister(struct mmrm_client *client)
+{
+#ifdef CVP_MMRM_ENABLED
+	return mmrm_client_deregister(client);
+#else
+	return -ENODEV;
+#endif
+}
+
+static int __mmrm_client_set_value_in_range(struct mmrm_client *client,
+					struct mmrm_client_data *data,
+					struct mmrm_client_res_value *val)
+{
+#ifdef CVP_MMRM_ENABLED
+	return mmrm_client_set_value_in_range(client, data, val);
+#else
+	return -ENODEV;
+#endif
+}
+
+int msm_cvp_mmrm_notifier_cb(
+	struct mmrm_client_notifier_data *notifier_data)
+{
+	if (!notifier_data) {
+		dprintk(CVP_WARN, "%s Invalid notifier data: %pK\n",
+			__func__, notifier_data);
+		return -EINVAL;
+	}
+
+	if (notifier_data->cb_type == MMRM_CLIENT_RESOURCE_VALUE_CHANGE) {
+		struct iris_hfi_device *dev = notifier_data->pvt_data;
+
+		dprintk(CVP_PWR,
+			"%s: Clock %s throttled from %ld to %ld \n",
+			__func__, dev->mmrm_desc.client_info.desc.name,
+			notifier_data->cb_data.val_chng.old_val,
+			notifier_data->cb_data.val_chng.new_val);
+
+		/*TODO: if need further handling to notify eva client */
+	} else {
+		dprintk(CVP_WARN, "%s Invalid cb type: %d\n",
+			__func__, notifier_data->cb_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int msm_cvp_set_clocks(struct msm_cvp_core *core)
+{
+	struct cvp_hfi_ops *ops_tbl;
+	int rc;
+
+	if (!core || !core->dev_ops) {
+		dprintk(CVP_ERR, "%s Invalid args: %pK\n", __func__, core);
+		return -EINVAL;
+	}
+
+	ops_tbl = core->dev_ops;
+	rc = call_hfi_op(ops_tbl, scale_clocks,
+		ops_tbl->hfi_device_data, core->curr_freq);
+	return rc;
+}
+
+int msm_cvp_mmrm_register(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct clock_info *cl = NULL;
+	char *name;
+	bool isSupport;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	name = (char *)device->mmrm_desc.client_info.desc.name;
+	device->mmrm_cvp=NULL;
+	device->mmrm_desc.client_type=MMRM_CLIENT_CLOCK;
+	device->mmrm_desc.priority=MMRM_CLIENT_PRIOR_LOW;
+	device->mmrm_desc.pvt_data = device;
+	device->mmrm_desc.notifier_callback_fn = msm_cvp_mmrm_notifier_cb;
+	device->mmrm_desc.client_info.desc.client_domain=MMRM_CLIENT_DOMAIN_CVP;
+
+	iris_hfi_for_each_clock(device, cl) {
+		if (cl->has_scaling) {	/* only clk source enabled in dtsi */
+			device->mmrm_desc.client_info.desc.clk=cl->clk;
+			device->mmrm_desc.client_info.desc.client_id=cl->clk_id;
+			strlcpy(name, cl->name,
+			sizeof(device->mmrm_desc.client_info.desc.name));
+		}
+	}
+
+	isSupport = __mmrm_client_check_scaling_supported(&(device->mmrm_desc));
+
+	if (!isSupport) {
+		dprintk(CVP_PWR, "%s: mmrm not supported, flag: %d\n",
+			__func__, isSupport);
+		return rc;
+	}
+
+	dprintk(CVP_PWR,
+		"%s: Register for %s, clk_id %d\n",
+		__func__, device->mmrm_desc.client_info.desc.name,
+		device->mmrm_desc.client_info.desc.client_id);
+
+	device->mmrm_cvp = __mmrm_client_register(&(device->mmrm_desc));
+	if (device->mmrm_cvp == NULL) {
+		dprintk(CVP_ERR,
+			"%s: Failed mmrm_client_register with mmrm_cvp: %pK\n",
+			__func__, device->mmrm_cvp);
+		rc = -ENOENT;
+	} else {
+		dprintk(CVP_PWR,
+			"%s: mmrm_client_register done: %pK, type:%d, uid:%ld\n",
+			__func__, device->mmrm_cvp,
+			device->mmrm_cvp->client_type,
+			device->mmrm_cvp->client_uid);
+	}
+
+	return rc;
+}
+
+int msm_cvp_mmrm_deregister(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct clock_info *cl = NULL;
+
+	if (!device) {
+		dprintk(CVP_ERR,
+			"%s invalid args: device %pK \n",
+			__func__, device);
+		return -EINVAL;
+	}
+
+	if (!device->mmrm_cvp) {	// when mmrm not supported
+		dprintk(CVP_ERR,
+			"%s device->mmrm_cvp not initialized \n",
+			__func__);
+		return rc;
+	}
+
+	/* set clk value to 0 before deregister	*/
+	iris_hfi_for_each_clock(device, cl) {
+		if ((cl->has_scaling) && (__clk_is_enabled(cl->clk))){
+			// set min freq and cur freq to 0;
+			rc = msm_cvp_mmrm_set_value_in_range(device,
+				0, 0);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s Failed set clock %s: %d\n",
+					__func__, cl->name, rc);
+			}
+		}
+	}
+
+	rc = __mmrm_client_deregister(device->mmrm_cvp);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: Failed mmrm_client_deregister with rc: %d\n",
+			__func__, rc);
+	}
+
+	device->mmrm_cvp = NULL;
+
+	return rc;
+}
+
+int msm_cvp_mmrm_set_value_in_range(struct iris_hfi_device *device,
+	u32 freq_min, u32 freq_cur)
+{
+	int rc = 0;
+	struct mmrm_client_res_value val;
+	struct mmrm_client_data data;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_PWR,
+		"%s: set clock rate for mmrm_cvp: %pK, type :%d, uid: %ld\n",
+		__func__, device->mmrm_cvp,
+		device->mmrm_cvp->client_type, device->mmrm_cvp->client_uid);
+
+	val.min = freq_min;
+	val.cur = freq_cur;
+	data.num_hw_blocks = 1;
+	data.flags = 0;		/* Not MMRM_CLIENT_DATA_FLAG_RESERVE_ONLY */
+
+	dprintk(CVP_PWR,
+		"%s: set clock rate to min %u cur %u: %d\n",
+		__func__, val.min, val.cur, rc);
+
+	rc = __mmrm_client_set_value_in_range(device->mmrm_cvp, &data, &val);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: Failed to set clock rate to min %u cur %u: %d\n",
+			__func__, val.min, val.cur, rc);
+	}
+	return rc;
+}
+
+int msm_cvp_set_clocks_impl(struct iris_hfi_device *device, u32 freq)
+{
+	struct clock_info *cl;
+	int rc = 0;
+	int fsrc2clk = 3;
+	// ratio factor for clock source : clk
+	u32 freq_min = device->res->allowed_clks_tbl[0].clock_rate * fsrc2clk;
+
+	dprintk(CVP_PWR, "%s: entering with freq : %ld\n", __func__, freq);
+
+	iris_hfi_for_each_clock(device, cl) {
+		if (cl->has_scaling) {/* has_scaling */
+			device->clk_freq = freq;
+			if (msm_cvp_clock_voting)
+				freq = msm_cvp_clock_voting;
+
+			freq = freq * fsrc2clk;
+			dprintk(CVP_PWR,
+				"%s: clock source rate set to: %ld\n",
+				__func__, freq);
+
+			if (device->mmrm_cvp != NULL) {
+				/* min freq : 1st element value in the table */
+				rc = msm_cvp_mmrm_set_value_in_range(device,
+					freq_min, freq);
+				if (rc) {
+					dprintk(CVP_ERR,
+						"Failed set clock %s: %d\n",
+						cl->name, rc);
+					return rc;
+				}
+			}
+			else {
+				dprintk(CVP_PWR,
+					"%s: set clock with clk_set_rate\n",
+					__func__);
+				rc = clk_set_rate(cl->clk, freq);
+				if (rc) {
+					dprintk(CVP_ERR,
+						"Failed set clock %u %s: %d\n",
+						freq, cl->name, rc);
+					return rc;
+				}
+
+				dprintk(CVP_PWR, "Scaling clock %s to %u\n",
+					cl->name, freq);
+			}
+		}
+	}
+
+	return 0;
+}
+
+int msm_cvp_scale_clocks(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u32 rate = 0;
+
+	allowed_clks_tbl = device->res->allowed_clks_tbl;
+
+	rate = device->clk_freq ? device->clk_freq :
+		allowed_clks_tbl[0].clock_rate;
+
+	dprintk(CVP_PWR, "%s: scale clock rate %d\n", __func__, rate);
+	rc = msm_cvp_set_clocks_impl(device, rate);
+	return rc;
+}
+
+int msm_cvp_prepare_enable_clk(struct iris_hfi_device *device,
+		const char *name)
+{
+	struct clock_info *cl = NULL;
+	int rc = 0;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	iris_hfi_for_each_clock(device, cl) {
+		if (strcmp(cl->name, name))
+                        continue;
+		/*
+		* For the clocks we control, set the rate prior to preparing
+		* them.  Since we don't really have a load at this point,
+		* scale it to the lowest frequency possible
+		*/
+		if (!cl->clk) {
+			dprintk(CVP_PWR, "%s %s already enabled by framework",
+				__func__, cl->name);
+			return 0;
+		}
+
+		if (cl->has_scaling) {
+			if (device->mmrm_cvp != NULL) {
+				// set min freq and cur freq to 0;
+				rc = msm_cvp_mmrm_set_value_in_range(device,
+						0, 0);
+				if (rc)
+					dprintk(CVP_ERR,
+						"%s Failed set clock %s: %d\n",
+						__func__, cl->name, rc);
+			}
+			else {
+				dprintk(CVP_PWR,
+					"%s: set clock with clk_set_rate\n",
+					__func__);
+				clk_set_rate(cl->clk,
+						clk_round_rate(cl->clk, 0));
+			}
+		}
+		rc = clk_prepare_enable(cl->clk);
+		if (rc) {
+			dprintk(CVP_ERR, "Failed to enable clock %s\n",
+				cl->name);
+			return rc;
+		}
+		if (!__clk_is_enabled(cl->clk)) {
+			dprintk(CVP_ERR, "%s: clock %s not enabled\n",
+					__func__, cl->name);
+			clk_disable_unprepare(cl->clk);
+			return -EINVAL;
+		}
+
+		dprintk(CVP_PWR, "Clock: %s prepared and enabled\n",
+				cl->name);
+		return 0;
+	}
+
+	dprintk(CVP_ERR, "%s clock %s not found\n", __func__, name);
+	return -EINVAL;
+}
+
+int msm_cvp_disable_unprepare_clk(struct iris_hfi_device *device,
+		const char *name)
+{
+	struct clock_info *cl;
+	int rc = 0;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	iris_hfi_for_each_clock_reverse(device, cl) {
+		if (strcmp(cl->name, name))
+			continue;
+		if (!cl->clk) {
+			dprintk(CVP_PWR, "%s %s always enabled by framework",
+				__func__, cl->name);
+			return 0;
+		}
+		clk_disable_unprepare(cl->clk);
+		dprintk(CVP_PWR, "Clock: %s disable and unprepare\n",
+			cl->name);
+
+		if (cl->has_scaling) {
+			if (device->mmrm_cvp != NULL) {
+				// set min freq and cur freq to 0;
+				rc = msm_cvp_mmrm_set_value_in_range(device,
+					0, 0);
+				if (rc)
+					dprintk(CVP_ERR,
+						"%s Failed set clock %s: %d\n",
+						__func__, cl->name, rc);
+			}
+		}
+		return 0;
+	}
+
+	dprintk(CVP_ERR, "%s clock %s not found\n", __func__, name);
+	return -EINVAL;
+}
+
+int msm_cvp_init_clocks(struct iris_hfi_device *device)
+{
+	int rc = 0;
+	struct clock_info *cl = NULL;
+
+	if (!device) {
+		dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+		return -EINVAL;
+	}
+
+	iris_hfi_for_each_clock(device, cl) {
+
+		dprintk(CVP_PWR, "%s: scalable? %d, count %d\n",
+			cl->name, cl->has_scaling, cl->count);
+	}
+
+	iris_hfi_for_each_clock(device, cl) {
+		if (!cl->clk) {
+			cl->clk = clk_get(&device->res->pdev->dev, cl->name);
+			if (IS_ERR(cl->clk)) {
+				rc = PTR_ERR(cl->clk);
+				dprintk(CVP_ERR,
+					"Failed to get clock: %s, rc %d\n",
+					cl->name, rc);
+				cl->clk = NULL;
+				goto err_clk_get;
+			}
+		}
+	}
+	device->clk_freq = 0;
+	return 0;
+
+err_clk_get:
+	msm_cvp_deinit_clocks(device);
+	return rc;
+}
+
+void msm_cvp_deinit_clocks(struct iris_hfi_device *device)
+{
+	struct clock_info *cl;
+
+	device->clk_freq = 0;
+	iris_hfi_for_each_clock_reverse(device, cl) {
+		if (cl->clk) {
+			clk_put(cl->clk);
+			cl->clk = NULL;
+		}
+	}
+}
+
+int msm_cvp_set_bw(struct msm_cvp_core *core, struct bus_info *bus, unsigned long bw)
+{
+	struct cvp_hfi_ops *ops_tbl;
+	int rc;
+
+	if (!core || !core->dev_ops) {
+		dprintk(CVP_ERR, "%s Invalid args: %pK\n", __func__, core);
+		return -EINVAL;
+	}
+
+	ops_tbl = core->dev_ops;
+	rc = call_hfi_op(ops_tbl, vote_bus, ops_tbl->hfi_device_data, bus, bw);
+	return rc;
+
+}
+
+int cvp_set_bw(struct bus_info *bus, unsigned long bw)
+{
+	int rc = 0;
+
+	if (!bus->client)
+		return -EINVAL;
+	dprintk(CVP_PWR, "bus->name = %s to bw = %u\n",
+			bus->name, bw);
+
+	rc = icc_set_bw(bus->client, bw, 0);
+	if (rc)
+		dprintk(CVP_ERR, "Failed voting bus %s to ab %u\n",
+			bus->name, bw);
+
+	return rc;
+}
+

+ 28 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_clocks.h

@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+
+#ifndef _MSM_CVP_CLOCKS_H_
+#define _MSM_CVP_CLOCKS_H_
+#include "msm_cvp_internal.h"
+#include "cvp_core_hfi.h"
+
+int msm_cvp_set_clocks(struct msm_cvp_core *core);
+int msm_cvp_mmrm_register(struct iris_hfi_device *device);
+int msm_cvp_mmrm_deregister(struct iris_hfi_device *device);
+int msm_cvp_mmrm_set_value_in_range(struct iris_hfi_device *device,
+		u32 freq_min, u32 freq_cur);
+int msm_cvp_set_clocks_impl(struct iris_hfi_device *device, u32 freq);
+int msm_cvp_scale_clocks(struct iris_hfi_device *device);
+int msm_cvp_prepare_enable_clk(struct iris_hfi_device *device,
+		const char *name);
+int msm_cvp_disable_unprepare_clk(struct iris_hfi_device *device,
+		const char *name);
+int msm_cvp_init_clocks(struct iris_hfi_device *device);
+void msm_cvp_deinit_clocks(struct iris_hfi_device *device);
+int msm_cvp_set_bw(struct msm_cvp_core *core, struct bus_info *bus, unsigned long bw);
+int cvp_set_bw(struct bus_info *bus, unsigned long bw);
+#endif

+ 1431 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_common.c

@@ -0,0 +1,1431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <asm/div64.h>
+#include "msm_cvp_common.h"
+#include "cvp_hfi_api.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_clocks.h"
+#include "msm_cvp.h"
+#include "cvp_core_hfi.h"
+
+#define IS_ALREADY_IN_STATE(__p, __d) (\
+	(__p >= __d)\
+)
+
+static void handle_session_error(enum hal_command_response cmd, void *data);
+
+static void dump_hfi_queue(struct iris_hfi_device *device)
+{
+	struct cvp_hfi_queue_header *queue;
+	struct cvp_iface_q_info *qinfo;
+	int i;
+	u32 *read_ptr, read_idx;
+
+	dprintk(CVP_ERR, "HFI queues in order of cmd(rd, wr), msg and dbg:\n");
+
+	/*
+	 * mb() to ensure driver reads the updated header values from
+	 * main memory.
+	 */
+	mb();
+	mutex_lock(&device->lock);
+	for (i = 0; i <= CVP_IFACEQ_DBGQ_IDX; i++) {
+		qinfo = &device->iface_queues[i];
+		queue = (struct cvp_hfi_queue_header *)qinfo->q_hdr;
+		if (!queue) {
+			mutex_unlock(&device->lock);
+			dprintk(CVP_ERR, "HFI queue not init, fail to dump\n");
+			return;
+		}
+		dprintk(CVP_ERR, "queue details: r:w %d:%d r:t %d %d\n",
+				queue->qhdr_read_idx, queue->qhdr_write_idx,
+				queue->qhdr_rx_req, queue->qhdr_tx_req);
+		if (queue->qhdr_read_idx != queue->qhdr_write_idx) {
+			read_idx = queue->qhdr_read_idx;
+			read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+				(read_idx << 2));
+			dprintk(CVP_ERR,
+				"queue payload: %x %x %x %x %x %x %x %x %x\n",
+				read_ptr[0], read_ptr[1], read_ptr[2],
+				read_ptr[3], read_ptr[4], read_ptr[5],
+				read_ptr[6], read_ptr[7], read_ptr[8]);
+		}
+
+	}
+	mutex_unlock(&device->lock);
+}
+
+void print_hfi_queue_info(struct cvp_hfi_ops *ops_tbl)
+{
+	if (ops_tbl && ops_tbl->hfi_device_data) {
+		call_hfi_op(ops_tbl, flush_debug_queue, ops_tbl->hfi_device_data);
+		dump_hfi_queue(ops_tbl->hfi_device_data);
+	}
+}
+
+static void handle_sys_init_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_core *core;
+	struct cvp_hal_sys_init_done *sys_init_msg;
+	u32 index;
+
+	if (!IS_HAL_SYS_CMD(cmd)) {
+		dprintk(CVP_ERR, "%s - invalid cmd\n", __func__);
+		return;
+	}
+
+	index = SYS_MSG_INDEX(cmd);
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for sys init\n");
+		return;
+	}
+	core = cvp_driver->cvp_core;
+	if (!core) {
+		dprintk(CVP_ERR, "Wrong device_id received\n");
+		return;
+	}
+	sys_init_msg = &response->data.sys_init_done;
+	if (!sys_init_msg) {
+		dprintk(CVP_ERR, "sys_init_done message not proper\n");
+		return;
+	}
+
+	/* This should come from sys_init_done */
+	core->resources.max_inst_count =
+		sys_init_msg->max_sessions_supported ?
+		min_t(u32, sys_init_msg->max_sessions_supported,
+		MAX_SUPPORTED_INSTANCES) : MAX_SUPPORTED_INSTANCES;
+
+	core->resources.max_secure_inst_count =
+		core->resources.max_secure_inst_count ?
+		core->resources.max_secure_inst_count :
+		core->resources.max_inst_count;
+
+	memcpy(core->capabilities, sys_init_msg->capabilities,
+		sys_init_msg->codec_count * sizeof(struct msm_cvp_capability));
+
+	dprintk(CVP_CORE,
+		"%s: max_inst_count %d, max_secure_inst_count %d\n",
+		__func__, core->resources.max_inst_count,
+		core->resources.max_secure_inst_count);
+
+	complete(&(core->completions[index]));
+}
+
+static void put_inst_helper(struct kref *kref)
+{
+	struct msm_cvp_inst *inst;
+
+	if (!kref)
+		return;
+
+	inst = container_of(kref,
+			struct msm_cvp_inst, kref);
+
+	msm_cvp_destroy(inst);
+}
+
+void cvp_put_inst(struct msm_cvp_inst *inst)
+{
+	if (!inst || (kref_read(&inst->kref) < 1)) {
+		dprintk(CVP_ERR, "Invalid session %llx\n", inst);
+		WARN_ON(true);
+		return;
+	}
+
+	kref_put(&inst->kref, put_inst_helper);
+}
+
+struct msm_cvp_inst *cvp_get_inst(struct msm_cvp_core *core,
+		void *session_id)
+{
+	struct msm_cvp_inst *inst = NULL;
+	bool matches = false;
+
+	if (!core || !session_id)
+		return NULL;
+
+	mutex_lock(&core->lock);
+	/*
+	 * This is as good as !list_empty(!inst->list), but at this point
+	 * we don't really know if inst was kfree'd via close syscall before
+	 * hardware could respond.  So manually walk thru the list of active
+	 * sessions
+	 */
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst == session_id) {
+			/*
+			 * Even if the instance is valid, we really shouldn't
+			 * be receiving or handling callbacks when we've deleted
+			 * our session with HFI
+			 */
+			matches = !!inst->session;
+			break;
+		}
+	}
+
+	/*
+	 * kref_* is atomic_int backed, so no need for inst->lock.  But we can
+	 * always acquire inst->lock and release it in cvp_put_inst
+	 * for a stronger locking system.
+	 */
+	inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
+	mutex_unlock(&core->lock);
+
+	return inst;
+}
+
+struct msm_cvp_inst *cvp_get_inst_validate(struct msm_cvp_core *core,
+		void *session_id)
+{
+	int rc = 0;
+	struct cvp_hfi_ops *ops_tbl;
+	struct msm_cvp_inst *s;
+
+	s = cvp_get_inst(core, session_id);
+	if (!s) {
+		dprintk(CVP_WARN, "%s session doesn't exit\n", __func__);
+		return NULL;
+	}
+
+	ops_tbl = s->core->dev_ops;
+	rc = call_hfi_op(ops_tbl, validate_session, s->session, __func__);
+	if (rc) {
+		cvp_put_inst(s);
+		s = NULL;
+	}
+
+	return s;
+}
+
+static void handle_session_set_buf_done(enum hal_command_response cmd,
+	void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+
+	if (!response) {
+		dprintk(CVP_ERR, "Invalid set_buf_done response\n");
+		return;
+	}
+
+	inst = cvp_get_inst(cvp_driver->cvp_core, response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "set_buf_done has an inactive session\n");
+		return;
+	}
+
+	if (response->status) {
+		dprintk(CVP_ERR,
+			"set ARP buffer error from FW : %#x\n",
+			response->status);
+	}
+
+	if (IS_HAL_SESSION_CMD(cmd))
+		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+	else
+		dprintk(CVP_ERR, "set_buf_done: invalid cmd: %d\n", cmd);
+	cvp_put_inst(inst);
+
+}
+
+static void handle_session_release_buf_done(enum hal_command_response cmd,
+	void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+	struct cvp_internal_buf *buf;
+	struct list_head *ptr, *next;
+	u32 buf_found = false;
+	u32 address;
+
+	if (!response) {
+		dprintk(CVP_ERR, "Invalid release_buf_done response\n");
+		return;
+	}
+
+	inst = cvp_get_inst(cvp_driver->cvp_core, response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN,
+			"%s: Got a response for an inactive session\n",
+			__func__);
+		return;
+	}
+
+	address = response->data.buffer_addr;
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+		buf = list_entry(ptr, struct cvp_internal_buf, list);
+		if (address == buf->smem->device_addr + buf->offset) {
+			dprintk(CVP_SESS, "releasing persist: %#x\n",
+					buf->smem->device_addr);
+			buf_found = true;
+		}
+	}
+	mutex_unlock(&inst->persistbufs.lock);
+
+	if (response->status)
+		dprintk(CVP_ERR, "HFI release persist buf err 0x%x\n",
+			response->status);
+	inst->error_code = response->status;
+
+	if (IS_HAL_SESSION_CMD(cmd))
+		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+	else
+		dprintk(CVP_ERR, "Invalid inst cmd response: %d\n", cmd);
+
+	cvp_put_inst(inst);
+}
+
+static void handle_sys_release_res_done(
+		enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_core *core;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for sys init\n");
+		return;
+	}
+	core = cvp_driver->cvp_core;
+	if (!core) {
+		dprintk(CVP_ERR, "Wrong device_id received\n");
+		return;
+	}
+	complete(&core->completions[
+			SYS_MSG_INDEX(HAL_SYS_RELEASE_RESOURCE_DONE)]);
+}
+
+void change_cvp_inst_state(struct msm_cvp_inst *inst, enum instance_state state)
+{
+	if (!inst) {
+		dprintk(CVP_ERR, "Invalid parameter %s\n", __func__);
+		return;
+	}
+	mutex_lock(&inst->lock);
+	if (inst->state == MSM_CVP_CORE_INVALID) {
+		dprintk(CVP_SESS,
+			"Inst: %pK is in bad state can't change state to %d\n",
+			inst, state);
+		goto exit;
+	}
+	dprintk(CVP_SESS, "Moved inst: %pK from state: %d to state: %d\n",
+		   inst, inst->state, state);
+	inst->state = state;
+exit:
+	mutex_unlock(&inst->lock);
+}
+
+static int signal_session_msg_receipt(enum hal_command_response cmd,
+		struct msm_cvp_inst *inst)
+{
+	if (!inst) {
+		dprintk(CVP_ERR, "Invalid(%pK) instance id\n", inst);
+		return -EINVAL;
+	}
+	if (IS_HAL_SESSION_CMD(cmd)) {
+		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+	} else {
+		dprintk(CVP_ERR, "Invalid inst cmd response: %d\n", cmd);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
+	enum hal_command_response cmd)
+{
+	int rc = 0;
+	struct cvp_hfi_ops *ops_tbl;
+
+	if (!IS_HAL_SESSION_CMD(cmd)) {
+		dprintk(CVP_ERR, "Invalid inst cmd response: %d\n", cmd);
+		return -EINVAL;
+	}
+	ops_tbl = (struct cvp_hfi_ops *)(inst->core->dev_ops);
+	rc = wait_for_completion_timeout(
+		&inst->completions[SESSION_MSG_INDEX(cmd)],
+		msecs_to_jiffies(
+			inst->core->resources.msm_cvp_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(CVP_WARN, "Wait interrupted or timed out: %d\n",
+				SESSION_MSG_INDEX(cmd));
+		if (inst->state != MSM_CVP_CORE_INVALID)
+			print_hfi_queue_info(ops_tbl);
+		rc = -ETIMEDOUT;
+	} else if (inst->state == MSM_CVP_CORE_INVALID) {
+		rc = -ECONNRESET;
+	} else {
+		rc = inst->error_code;
+		inst->prev_error_code = inst->error_code;
+		inst->error_code = CVP_ERR_NONE;
+	}
+	return rc;
+}
+
+static int wait_for_state(struct msm_cvp_inst *inst,
+	enum instance_state flipped_state,
+	enum instance_state desired_state,
+	enum hal_command_response hal_cmd)
+{
+	int rc = 0;
+
+	if (IS_ALREADY_IN_STATE(flipped_state, desired_state)) {
+		dprintk(CVP_INFO, "inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto err_same_state;
+	}
+	dprintk(CVP_SESS, "Waiting for hal_cmd: %d\n", hal_cmd);
+	rc = wait_for_sess_signal_receipt(inst, hal_cmd);
+	if (!rc)
+		change_cvp_inst_state(inst, desired_state);
+err_same_state:
+	return rc;
+}
+
+static void handle_session_init_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst = NULL;
+	struct msm_cvp_core *core;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+				"Failed to get valid response for session init\n");
+		return;
+	}
+
+	core = cvp_driver->cvp_core;
+	inst = cvp_get_inst(cvp_driver->cvp_core, response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s:Got a response for an inactive session %#x\n",
+				__func__, response->session_id);
+		list_for_each_entry(inst, &core->instances, list)
+			cvp_print_inst(CVP_WARN, inst);
+		return;
+	}
+
+	if (response->status)
+		dprintk(CVP_ERR,
+			"Session %#x init err response from FW : 0x%x\n",
+			 hash32_ptr(inst->session), response->status);
+
+	else
+		dprintk(CVP_SESS, "%s: cvp session %#x\n", __func__,
+			hash32_ptr(inst->session));
+
+	inst->error_code = response->status;
+	signal_session_msg_receipt(cmd, inst);
+	cvp_put_inst(inst);
+	return;
+
+}
+
+static void handle_event_change(enum hal_command_response cmd, void *data)
+{
+	dprintk(CVP_WARN, "%s is not supported on CVP!\n", __func__);
+}
+
+static void handle_session_dump_notify(enum hal_command_response cmd,
+	void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+	unsigned long flags = 0;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response during dump notify\n");
+		return;
+	}
+
+	inst = cvp_get_inst(cvp_driver->cvp_core, response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s:Got a response for an inactive session\n",
+				__func__);
+		return;
+	}
+	spin_lock_irqsave(&inst->event_handler.lock, flags);
+	inst->event_handler.event = CVP_DUMP_EVENT;
+	spin_unlock_irqrestore(&inst->event_handler.lock, flags);
+	wake_up_all(&inst->event_handler.wq);
+	dprintk(CVP_ERR,"Event_handler woken up\n");
+	cvp_put_inst(inst);
+}
+
+static void handle_release_res_done(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for release resource\n");
+		return;
+	}
+
+	inst = cvp_get_inst(cvp_driver->cvp_core, response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s:Got a response for an inactive session\n",
+				__func__);
+		return;
+	}
+
+	signal_session_msg_receipt(cmd, inst);
+	cvp_put_inst(inst);
+}
+
+static void handle_session_ctrl(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for release resource\n");
+		return;
+	}
+
+	inst = cvp_get_inst(cvp_driver->cvp_core, response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s:Got a response for an inactive session\n",
+				__func__);
+		return;
+	}
+
+	if (response->status)
+		dprintk(CVP_ERR, "HFI sess ctrl err 0x%x HAL cmd %d\n",
+			response->status, cmd);
+
+	inst->error_code = response->status;
+	signal_session_msg_receipt(cmd, inst);
+	cvp_put_inst(inst);
+}
+
+static void handle_session_error(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct cvp_hfi_ops *ops_tbl = NULL;
+	struct msm_cvp_inst *inst = NULL;
+	//unsigned long flags = 0;
+	//int i;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for session error\n");
+		return;
+	}
+
+	inst = cvp_get_inst(cvp_driver->cvp_core, response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s: response for an inactive session\n",
+				__func__);
+		return;
+	}
+
+	ops_tbl = inst->core->dev_ops;
+	dprintk(CVP_ERR, "Sess error 0x%x received for inst %pK sess %x\n",
+		response->status, inst, hash32_ptr(inst->session));
+	cvp_print_inst(CVP_WARN, inst);
+
+	print_hfi_queue_info(ops_tbl);
+	//if (inst->state != MSM_CVP_CORE_INVALID) {
+	//	change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+	//	if (cvp_clean_session_queues(inst))
+	//		dprintk(CVP_WARN, "Failed to clean sess queues\n");
+	//	for (i = 0; i < ARRAY_SIZE(inst->completions); i++)
+	//		complete(&inst->completions[i]);
+	//	spin_lock_irqsave(&inst->event_handler.lock, flags);
+	//	inst->event_handler.event = CVP_SSR_EVENT;
+	//	spin_unlock_irqrestore(
+	//		&inst->event_handler.lock, flags);
+	//	wake_up_all(&inst->event_handler.wq);
+	//}
+
+	cvp_put_inst(inst);
+}
+
+void handle_sys_error(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_core *core = NULL;
+	struct cvp_hfi_ops *ops_tbl = NULL;
+	struct iris_hfi_device *hfi_device;
+	struct msm_cvp_inst *inst = NULL;
+	int i, rc = 0;
+	unsigned long flags = 0;
+	enum cvp_core_state cur_state;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for sys error\n");
+		return;
+	}
+
+	core = cvp_driver->cvp_core;
+	if (!core) {
+		dprintk(CVP_ERR,
+				"Got SYS_ERR but unable to identify core\n");
+		return;
+	}
+	ops_tbl = core->dev_ops;
+
+	mutex_lock(&core->lock);
+	core->ssr_count++;
+	if (core->state == CVP_CORE_UNINIT) {
+		dprintk(CVP_ERR,
+			"%s: Core %pK already moved to state %d\n",
+			 __func__, core, core->state);
+		mutex_unlock(&core->lock);
+		return;
+	}
+
+	cur_state = core->state;
+	core->state = CVP_CORE_UNINIT;
+	dprintk(CVP_WARN, "SYS_ERROR from core %pK cmd %x total: %d\n",
+			core, cmd, core->ssr_count);
+	mutex_lock(&core->clk_lock);
+	hfi_device = ops_tbl->hfi_device_data;
+	call_hfi_op(ops_tbl, flush_debug_queue, ops_tbl->hfi_device_data);
+	if (hfi_device->error == CVP_ERR_NOC_ERROR) {
+		dprintk(CVP_WARN, "Got NOC error");
+		msm_cvp_noc_error_info(core);
+	}
+	list_for_each_entry(inst, &core->instances, list) {
+		cvp_print_inst(CVP_WARN, inst);
+		if (inst->state != MSM_CVP_CORE_INVALID) {
+			change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+			if (cvp_clean_session_queues(inst))
+				dprintk(CVP_ERR, "Failed to clean fences\n");
+			for (i = 0; i < ARRAY_SIZE(inst->completions); i++)
+				complete(&inst->completions[i]);
+			spin_lock_irqsave(&inst->event_handler.lock, flags);
+			inst->event_handler.event = CVP_SSR_EVENT;
+			spin_unlock_irqrestore(
+				&inst->event_handler.lock, flags);
+			wake_up_all(&inst->event_handler.wq);
+		}
+
+		if (!core->trigger_ssr)
+			if (hfi_device->error != CVP_ERR_NOC_ERROR)
+				msm_cvp_print_inst_bufs(inst, false);
+	}
+
+	/* handle the hw error before core released to get full debug info */
+	msm_cvp_handle_hw_error(core);
+
+	dprintk(CVP_CORE, "Calling core_release\n");
+	rc = call_hfi_op(ops_tbl, core_release, ops_tbl->hfi_device_data);
+	if (rc) {
+		dprintk(CVP_ERR, "core_release failed\n");
+		core->state = cur_state;
+		mutex_unlock(&core->clk_lock);
+		mutex_unlock(&core->lock);
+		return;
+	}
+	mutex_unlock(&core->clk_lock);
+	mutex_unlock(&core->lock);
+
+	dprintk(CVP_WARN, "SYS_ERROR handled.\n");
+	BUG_ON(core->resources.fatal_ssr);
+}
+
+void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hfi_ops *ops_tbl = NULL;
+
+	if (!inst || !inst->core || !inst->core->dev_ops) {
+		dprintk(CVP_ERR, "%s invalid params\n", __func__);
+		return;
+	}
+	if (!inst->session || inst->session == (void *)0xdeadbeef) {
+		dprintk(CVP_SESS, "%s: inst %pK session already cleaned\n",
+			__func__, inst);
+		return;
+	}
+
+	ops_tbl = inst->core->dev_ops;
+	mutex_lock(&inst->lock);
+	dprintk(CVP_SESS, "%s: inst %pK\n", __func__, inst);
+	rc = call_hfi_op(ops_tbl, session_clean,
+			(void *)inst->session);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Session clean failed :%pK\n", inst);
+	}
+	inst->session = NULL;
+	mutex_unlock(&inst->lock);
+}
+
+static void handle_session_close(enum hal_command_response cmd, void *data)
+{
+	struct msm_cvp_cb_cmd_done *response = data;
+	struct msm_cvp_inst *inst;
+	struct msm_cvp_core *core;
+
+	if (!response) {
+		dprintk(CVP_ERR,
+			"Failed to get valid response for session close\n");
+		return;
+	}
+
+	core = cvp_driver->cvp_core;
+	inst = cvp_get_inst(cvp_driver->cvp_core, response->session_id);
+	if (!inst) {
+		dprintk(CVP_WARN, "%s: response for an inactive session %#x\n",
+				__func__, response->session_id);
+
+		list_for_each_entry(inst, &core->instances, list)
+			cvp_print_inst(CVP_WARN, inst);
+
+		return;
+	}
+
+	if (response->status)
+		dprintk(CVP_ERR, "HFI sess close fail 0x%x\n",
+			response->status);
+
+	inst->error_code = response->status;
+	signal_session_msg_receipt(cmd, inst);
+	show_stats(inst);
+	cvp_put_inst(inst);
+}
+
+void cvp_handle_cmd_response(enum hal_command_response cmd, void *data)
+{
+	dprintk(CVP_HFI, "Command response = %d\n", cmd);
+	switch (cmd) {
+	case HAL_SYS_INIT_DONE:
+		handle_sys_init_done(cmd, data);
+		break;
+	case HAL_SYS_RELEASE_RESOURCE_DONE:
+		handle_sys_release_res_done(cmd, data);
+		break;
+	case HAL_SESSION_INIT_DONE:
+		handle_session_init_done(cmd, data);
+		break;
+	case HAL_SESSION_RELEASE_RESOURCE_DONE:
+		handle_release_res_done(cmd, data);
+		break;
+	case HAL_SESSION_END_DONE:
+	case HAL_SESSION_ABORT_DONE:
+		handle_session_close(cmd, data);
+		break;
+	case HAL_SESSION_EVENT_CHANGE:
+		handle_event_change(cmd, data);
+		break;
+	case HAL_SESSION_FLUSH_DONE:
+	case HAL_SESSION_START_DONE:
+	case HAL_SESSION_STOP_DONE:
+		handle_session_ctrl(cmd, data);
+		break;
+	case HAL_SYS_WATCHDOG_TIMEOUT:
+	case HAL_SYS_ERROR:
+		handle_sys_error(cmd, data);
+		break;
+	case HAL_SESSION_ERROR:
+		handle_session_error(cmd, data);
+		break;
+	case HAL_SESSION_SET_BUFFER_DONE:
+		handle_session_set_buf_done(cmd, data);
+		break;
+	case HAL_SESSION_RELEASE_BUFFER_DONE:
+		handle_session_release_buf_done(cmd, data);
+		break;
+        case HAL_SESSION_DUMP_NOTIFY:
+		handle_session_dump_notify(cmd, data);
+		break;
+	default:
+		dprintk(CVP_HFI, "response unhandled: %d\n", cmd);
+		break;
+	}
+}
+
+static inline enum msm_cvp_thermal_level msm_comm_cvp_thermal_level(int level)
+{
+	switch (level) {
+	case 0:
+		return CVP_THERMAL_NORMAL;
+	case 1:
+		return CVP_THERMAL_LOW;
+	case 2:
+		return CVP_THERMAL_HIGH;
+	default:
+		return CVP_THERMAL_CRITICAL;
+	}
+}
+
+static int msm_comm_session_abort(struct msm_cvp_inst *inst)
+{
+	int rc = 0, abort_completion = 0;
+	struct cvp_hfi_ops *ops_tbl;
+
+
+	if (!inst || !inst->core || !inst->core->dev_ops) {
+		dprintk(CVP_ERR, "%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	ops_tbl = inst->core->dev_ops;
+	print_hfi_queue_info(ops_tbl);
+	if (1)
+		return 0;
+
+	/* Activate code below for Watchdog timeout testing */
+	abort_completion = SESSION_MSG_INDEX(HAL_SESSION_ABORT_DONE);
+
+	dprintk(CVP_WARN, "%s: inst %pK session %x\n", __func__,
+		inst, hash32_ptr(inst->session));
+	rc = call_hfi_op(ops_tbl, session_abort, (void *)inst->session);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s session_abort failed rc: %d\n", __func__, rc);
+		goto exit;
+	}
+	rc = wait_for_completion_timeout(
+			&inst->completions[abort_completion],
+			msecs_to_jiffies(
+				inst->core->resources.msm_cvp_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(CVP_ERR, "%s: inst %pK session %x abort timed out\n",
+				__func__, inst, hash32_ptr(inst->session));
+		print_hfi_queue_info(ops_tbl);
+		msm_cvp_comm_generate_sys_error(inst);
+		rc = -EBUSY;
+	} else {
+		rc = 0;
+	}
+exit:
+	return rc;
+}
+
+void msm_cvp_comm_handle_thermal_event(void)
+{
+	dprintk(CVP_WARN, "deprecated %s called\n", __func__);
+}
+
+int msm_cvp_comm_check_core_init(struct msm_cvp_core *core)
+{
+	int rc = 0;
+	struct cvp_hfi_ops *ops_tbl;
+
+	mutex_lock(&core->lock);
+	if (core->state >= CVP_CORE_INIT_DONE) {
+		dprintk(CVP_INFO, "CVP core: is already in state: %d\n",
+				core->state);
+		goto exit;
+	}
+	dprintk(CVP_CORE, "Waiting for SYS_INIT_DONE\n");
+	rc = wait_for_completion_timeout(
+		&core->completions[SYS_MSG_INDEX(HAL_SYS_INIT_DONE)],
+		msecs_to_jiffies(core->resources.msm_cvp_hw_rsp_timeout));
+	if (!rc) {
+		dprintk(CVP_ERR, "%s: Wait interrupted or timed out: %d\n",
+				__func__, SYS_MSG_INDEX(HAL_SYS_INIT_DONE));
+		ops_tbl = core->dev_ops;
+		print_hfi_queue_info(ops_tbl);
+		rc = -EIO;
+		goto exit;
+	} else {
+		core->state = CVP_CORE_INIT_DONE;
+		rc = 0;
+	}
+	dprintk(CVP_CORE, "SYS_INIT_DONE!!!\n");
+exit:
+	mutex_unlock(&core->lock);
+	return rc;
+}
+
+static int msm_comm_init_core_done(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+
+	rc = msm_cvp_comm_check_core_init(inst->core);
+	if (rc) {
+		dprintk(CVP_ERR, "%s - failed to initialize core\n", __func__);
+		msm_cvp_comm_generate_sys_error(inst);
+		return rc;
+	}
+	change_cvp_inst_state(inst, MSM_CVP_CORE_INIT_DONE);
+	return rc;
+}
+
+static int msm_comm_init_core(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hfi_ops *ops_tbl;
+	struct msm_cvp_core *core;
+
+	if (!inst || !inst->core || !inst->core->dev_ops)
+		return -EINVAL;
+
+	core = inst->core;
+	ops_tbl = core->dev_ops;
+	mutex_lock(&core->lock);
+	if (core->state >= CVP_CORE_INIT) {
+		dprintk(CVP_CORE, "CVP core: is already in state: %d\n",
+				core->state);
+		goto core_already_inited;
+	}
+	if (!core->capabilities) {
+		core->capabilities = kcalloc(CVP_MAX_SESSIONS,
+				sizeof(struct msm_cvp_capability), GFP_KERNEL);
+		if (!core->capabilities) {
+			dprintk(CVP_ERR,
+				"%s: failed to allocate capabilities\n",
+				__func__);
+			rc = -ENOMEM;
+			goto fail_cap_alloc;
+		}
+	} else {
+		dprintk(CVP_WARN,
+			"%s: capabilities memory is expected to be freed\n",
+			__func__);
+	}
+	dprintk(CVP_CORE, "%s: core %pK\n", __func__, core);
+	rc = call_hfi_op(ops_tbl, core_init, ops_tbl->hfi_device_data);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to init core\n");
+		goto fail_core_init;
+	}
+	core->state = CVP_CORE_INIT;
+	core->trigger_ssr = false;
+
+core_already_inited:
+	change_cvp_inst_state(inst, MSM_CVP_CORE_INIT);
+	mutex_unlock(&core->lock);
+
+	return rc;
+
+fail_core_init:
+	kfree(core->capabilities);
+fail_cap_alloc:
+	core->capabilities = NULL;
+	core->state = CVP_CORE_UNINIT;
+	mutex_unlock(&core->lock);
+	return rc;
+}
+
+int msm_cvp_deinit_core(struct msm_cvp_inst *inst)
+{
+	struct msm_cvp_core *core;
+	struct cvp_hfi_ops *ops_tbl;
+
+	if (!inst || !inst->core || !inst->core->dev_ops) {
+		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	ops_tbl = core->dev_ops;
+
+	mutex_lock(&core->lock);
+	change_cvp_inst_state(inst, MSM_CVP_CORE_UNINIT);
+	mutex_unlock(&core->lock);
+	return 0;
+}
+
+static int msm_comm_session_init_done(int flipped_state,
+	struct msm_cvp_inst *inst)
+{
+	int rc;
+
+	dprintk(CVP_SESS, "inst %pK: waiting for session init done\n", inst);
+	rc = wait_for_state(inst, flipped_state, MSM_CVP_OPEN_DONE,
+			HAL_SESSION_INIT_DONE);
+	if (rc) {
+		dprintk(CVP_ERR, "Session init failed for inst %pK\n", inst);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int msm_comm_session_init(int flipped_state,
+	struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hfi_ops *ops_tbl;
+
+	if (!inst || !inst->core || !inst->core->dev_ops) {
+		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	ops_tbl = inst->core->dev_ops;
+
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_OPEN)) {
+		dprintk(CVP_INFO, "inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto exit;
+	}
+
+	dprintk(CVP_SESS, "%s: inst %pK\n", __func__, inst);
+	rc = call_hfi_op(ops_tbl, session_init, ops_tbl->hfi_device_data,
+			inst, &inst->session);
+
+	if (rc || !inst->session) {
+		dprintk(CVP_ERR,
+			"Failed to call session init for: %pK, %pK, %d\n",
+			inst->core->dev_ops, inst, inst->session_type);
+		rc = -EINVAL;
+		goto exit;
+	}
+	change_cvp_inst_state(inst, MSM_CVP_OPEN);
+
+exit:
+	return rc;
+}
+
+static int msm_comm_session_close(int flipped_state,
+			struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_hfi_ops *ops_tbl;
+
+	if (!inst || !inst->core || !inst->core->dev_ops) {
+		dprintk(CVP_ERR, "%s invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_CLOSE)) {
+		dprintk(CVP_INFO,
+			"inst: %pK is already in state: %d\n",
+						inst, inst->state);
+		goto exit;
+	}
+	ops_tbl = inst->core->dev_ops;
+	dprintk(CVP_SESS, "%s: inst %pK\n", __func__, inst);
+	rc = call_hfi_op(ops_tbl, session_end, (void *) inst->session);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to send close\n");
+		goto exit;
+	}
+	change_cvp_inst_state(inst, MSM_CVP_CLOSE);
+exit:
+	return rc;
+}
+
+int msm_cvp_comm_suspend(void)
+{
+	struct cvp_hfi_ops *ops_tbl;
+	struct msm_cvp_core *core;
+	int rc = 0;
+
+	core = cvp_driver->cvp_core;
+	if (!core) {
+		dprintk(CVP_ERR,
+			"%s: Failed to find cvp core\n", __func__);
+		return -EINVAL;
+	}
+
+	ops_tbl = (struct cvp_hfi_ops *)core->dev_ops;
+	if (!ops_tbl) {
+		dprintk(CVP_ERR, "%s Invalid device handle\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = call_hfi_op(ops_tbl, suspend, ops_tbl->hfi_device_data);
+
+	return rc;
+}
+
+static int get_flipped_state(int present_state, int desired_state)
+{
+	int flipped_state;
+
+	if (present_state == MSM_CVP_CORE_INIT_DONE && desired_state > MSM_CVP_CLOSE)
+		flipped_state = MSM_CVP_CORE_UNINIT;
+	else if (present_state == MSM_CVP_CORE_INVALID)
+		flipped_state = MSM_CVP_CLOSE;
+	else
+		flipped_state = present_state;
+
+	return flipped_state;
+}
+
+static char state_names[MSM_CVP_CORE_INVALID + 1][32] = {
+	"Invlid entry",
+	"CORE_UNINIT_DONE",
+	"CORE_INIT",
+	"CORE_INIT_DONE",
+	"OPEN",
+	"OPEN_DONE",
+	"CLOSE",
+	"CLOSE_DONE",
+	"CORE_UNINIT",
+	"CORE_INVALID"
+};
+
+int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state)
+{
+	int rc = 0;
+	int flipped_state;
+	struct msm_cvp_core *core;
+
+	core = cvp_driver->cvp_core;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params %pK", __func__, inst);
+		return -EINVAL;
+	}
+
+	mutex_lock(&inst->sync_lock);
+	if (inst->state == MSM_CVP_CORE_INVALID &&
+				core->state == CVP_CORE_UNINIT) {
+		dprintk(CVP_ERR, "%s: inst %pK & core are in invalid\n",
+			__func__, inst);
+		mutex_unlock(&inst->sync_lock);
+		return -EINVAL;
+	}
+
+	flipped_state = get_flipped_state(inst->state, state);
+	dprintk(CVP_SESS,
+		"inst: %pK (%#x) cur_state %s dest_state %s flipped_state = %s\n",
+		inst, hash32_ptr(inst->session), state_names[inst->state],
+		state_names[state], state_names[flipped_state]);
+
+	switch (flipped_state) {
+	case MSM_CVP_CORE_UNINIT_DONE:
+	case MSM_CVP_CORE_INIT:
+		rc = msm_comm_init_core(inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		/* defined in linux/compiler_attributes.h */
+		fallthrough;
+	case MSM_CVP_CORE_INIT_DONE:
+		rc = msm_comm_init_core_done(inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		fallthrough;
+	case MSM_CVP_OPEN:
+		rc = msm_comm_session_init(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		fallthrough;
+	case MSM_CVP_OPEN_DONE:
+		rc = msm_comm_session_init_done(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		fallthrough;
+	case MSM_CVP_CLOSE:
+		dprintk(CVP_INFO, "to CVP_CLOSE state\n");
+		rc = msm_comm_session_close(flipped_state, inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		fallthrough;
+	case MSM_CVP_CLOSE_DONE:
+		dprintk(CVP_INFO, "to CVP_CLOSE_DONE state\n");
+		rc = wait_for_state(inst, flipped_state, MSM_CVP_CLOSE_DONE,
+				HAL_SESSION_END_DONE);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		msm_cvp_comm_session_clean(inst);
+		fallthrough;
+	case MSM_CVP_CORE_UNINIT:
+	case MSM_CVP_CORE_INVALID:
+		dprintk(CVP_INFO, "Sending core uninit\n");
+		rc = msm_cvp_deinit_core(inst);
+		if (rc || state <= get_flipped_state(inst->state, state))
+			break;
+		fallthrough;
+	default:
+		dprintk(CVP_ERR, "State not recognized\n");
+		rc = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&inst->sync_lock);
+
+	if (rc == -ETIMEDOUT) {
+		dprintk(CVP_ERR,
+				"Timedout move from state: %s to %s\n",
+				state_names[inst->state],
+				state_names[state]);
+		if (inst->state != MSM_CVP_CORE_INVALID)
+			msm_cvp_comm_kill_session(inst);
+	}
+	return rc;
+}
+
+int msm_cvp_noc_error_info(struct msm_cvp_core *core)
+{
+	struct cvp_hfi_ops *ops_tbl;
+	static u32 last_fault_count = 0;
+
+	if (!core || !core->dev_ops) {
+		dprintk(CVP_WARN, "%s: Invalid parameters: %pK\n",
+			__func__, core);
+		return -EINVAL;
+	}
+
+	if (!core->smmu_fault_count ||
+			core->smmu_fault_count == last_fault_count)
+		return 0;
+
+	last_fault_count = core->smmu_fault_count;
+	dprintk(CVP_ERR, "cvp ssr count %d %d %d\n", core->ssr_count,
+			core->resources.max_ssr_allowed,
+			core->smmu_fault_count);
+	ops_tbl = core->dev_ops;
+	call_hfi_op(ops_tbl, noc_error_info, ops_tbl->hfi_device_data);
+
+	if (core->smmu_fault_count >= core->resources.max_ssr_allowed)
+		BUG_ON(!core->resources.non_fatal_pagefaults);
+
+	return 0;
+}
+
+int msm_cvp_trigger_ssr(struct msm_cvp_core *core,
+	enum hal_ssr_trigger_type type)
+{
+	if (!core) {
+		dprintk(CVP_WARN, "%s: Invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	core->ssr_type = type;
+	schedule_work(&core->ssr_work);
+	return 0;
+}
+
+void msm_cvp_ssr_handler(struct work_struct *work)
+{
+	int rc;
+	struct msm_cvp_core *core;
+	struct cvp_hfi_ops *ops_tbl;
+
+	if (!work)
+		return;
+
+	core = container_of(work, struct msm_cvp_core, ssr_work);
+	if (!core || !core->dev_ops) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return;
+	}
+	ops_tbl = core->dev_ops;
+
+	if (core->ssr_type == SSR_SESSION_ABORT) {
+		struct msm_cvp_inst *inst = NULL, *s;
+
+		dprintk(CVP_ERR, "Session abort triggered\n");
+		list_for_each_entry(inst, &core->instances, list) {
+			dprintk(CVP_WARN,
+				"Session to abort: inst %#x ref %x\n",
+				inst, kref_read(&inst->kref));
+			break;
+		}
+
+		if (inst != NULL) {
+			s = cvp_get_inst_validate(inst->core, inst);
+			if (!s)
+				return;
+			print_hfi_queue_info(ops_tbl);
+			cvp_put_inst(s);
+		} else {
+			dprintk(CVP_WARN, "No active CVP session to abort\n");
+		}
+
+		return;
+	}
+
+send_again:
+	mutex_lock(&core->lock);
+	if (core->state == CVP_CORE_INIT_DONE) {
+		dprintk(CVP_WARN, "%s: ssr type %d at %llu\n", __func__,
+			core->ssr_type, get_aon_time());
+		/*
+		 * In current implementation user-initiated SSR triggers
+		 * a fatal error from hardware. However, there is no way
+		 * to know if fatal error is due to SSR or not. Handle
+		 * user SSR as non-fatal.
+		 */
+		core->trigger_ssr = true;
+		rc = call_hfi_op(ops_tbl, core_trigger_ssr,
+				ops_tbl->hfi_device_data, core->ssr_type);
+		if (rc) {
+			if (rc == -EAGAIN) {
+				core->trigger_ssr = false;
+				mutex_unlock(&core->lock);
+				usleep_range(500, 1000);
+				dprintk(CVP_WARN, "Retry ssr\n");
+				goto send_again;
+			}
+			dprintk(CVP_ERR, "%s: trigger_ssr failed\n",
+				__func__);
+			core->trigger_ssr = false;
+		}
+	} else {
+		dprintk(CVP_WARN, "%s: cvp core %pK not initialized\n",
+			__func__, core);
+	}
+	mutex_unlock(&core->lock);
+}
+
+void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst)
+{
+	struct msm_cvp_core *core;
+	enum hal_command_response cmd = HAL_SYS_ERROR;
+	struct msm_cvp_cb_cmd_done response  = {0};
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
+		return;
+	}
+	dprintk(CVP_WARN, "%s: inst %pK\n", __func__, inst);
+	core = inst->core;
+	handle_sys_error(cmd, (void *) &response);
+
+}
+
+int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	unsigned long flags = 0;
+
+	if (!inst || !inst->core || !inst->core->dev_ops) {
+		dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
+		return -EINVAL;
+	} else if (!inst->session || inst->session == (void *)0xdeadbeef) {
+		dprintk(CVP_ERR, "%s: no session to kill for inst %pK\n",
+			__func__, inst);
+		return 0;
+	}
+	dprintk(CVP_WARN, "%s: inst %pK, session %x state %d\n", __func__,
+		inst, hash32_ptr(inst->session), inst->state);
+	/*
+	 * We're internally forcibly killing the session, if fw is aware of
+	 * the session send session_abort to firmware to clean up and release
+	 * the session, else just kill the session inside the driver.
+	 */
+	if (inst->state >= MSM_CVP_OPEN_DONE &&
+			inst->state < MSM_CVP_CLOSE_DONE) {
+		msm_comm_session_abort(inst);
+		change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+	}
+
+	if (inst->state >= MSM_CVP_CORE_UNINIT) {
+		spin_lock_irqsave(&inst->event_handler.lock, flags);
+		inst->event_handler.event = CVP_SSR_EVENT;
+		spin_unlock_irqrestore(&inst->event_handler.lock, flags);
+		wake_up_all(&inst->event_handler.wq);
+	}
+
+	return rc;
+}
+
+static int set_internal_buf_on_fw(struct msm_cvp_inst *inst,
+				struct msm_cvp_smem *handle)
+{
+	struct cvp_hfi_ops *ops_tbl;
+	int rc = 0;
+	u32 iova;
+	u32 size;
+
+	if (!inst || !inst->core || !inst->core->dev_ops || !handle) {
+		dprintk(CVP_ERR, "%s - invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	ops_tbl = inst->core->dev_ops;
+
+	iova = handle->device_addr;
+	size = handle->size;
+
+	dprintk(CVP_SESS, "%s: allocated ARP buffer : %x\n", __func__, iova);
+
+	rc = call_hfi_op(ops_tbl, session_set_buffers,
+			(void *) inst->session, iova, size);
+	if (rc) {
+		dprintk(CVP_ERR, "cvp_session_set_buffers failed\n");
+		return rc;
+	}
+	return 0;
+}
+
+/* Set ARP buffer for CVP firmware to handle concurrency */
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	struct cvp_internal_buf *buf;
+
+	if (!inst || !inst->core || !inst->core->dev_ops) {
+		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	buf = cvp_allocate_arp_bufs(inst, ARP_BUF_SIZE);
+	if (!buf) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	rc = set_internal_buf_on_fw(inst, buf->smem);
+	if (rc)
+		goto error;
+
+	rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_SET_BUFFER_DONE);
+	if (rc) {
+		dprintk(CVP_WARN, "wait for set_buffer_done timeout %d\n", rc);
+		goto error;
+	}
+
+	return rc;
+
+error:
+	if (rc != -ENOMEM)
+		cvp_release_arp_buffers(inst);
+	return rc;
+}
+
+
+bool is_cvp_inst_valid(struct msm_cvp_inst *inst)
+{
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *sess;
+
+	core = cvp_driver->cvp_core;
+	if (!core)
+		return false;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(sess, &core->instances, list) {
+		if (inst == sess) {
+			if (kref_read(&inst->kref)) {
+				mutex_unlock(&core->lock);
+				return true;
+			}
+		}
+	}
+	mutex_unlock(&core->lock);
+	return false;
+}
+
+int cvp_print_inst(u32 tag, struct msm_cvp_inst *inst)
+{
+	if (!inst) {
+		dprintk(CVP_ERR, "%s invalid inst %pK\n", __func__, inst);
+		return -EINVAL;
+	}
+
+	dprintk(tag, "%s inst stype %d %pK id = %#x ptype %#x prio %#x secure %#x kmask %#x dmask %#x, kref %#x state %#x\n",
+		inst->proc_name, inst->session_type, inst, hash32_ptr(inst->session),
+		inst->prop.type, inst->prop.priority, inst->prop.is_secure,
+		inst->prop.kernel_mask, inst->prop.dsp_mask,
+		kref_read(&inst->kref), inst->state);
+
+	return 0;
+}

+ 36 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_common.h

@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef _MSM_CVP_COMMON_H_
+#define _MSM_CVP_COMMON_H_
+#include "msm_cvp_internal.h"
+
+void cvp_put_inst(struct msm_cvp_inst *inst);
+struct msm_cvp_inst *cvp_get_inst(struct msm_cvp_core *core,
+		void *session_id);
+struct msm_cvp_inst *cvp_get_inst_validate(struct msm_cvp_core *core,
+		void *session_id);
+bool is_cvp_inst_valid(struct msm_cvp_inst *inst);
+void cvp_change_inst_state(struct msm_cvp_inst *inst,
+		enum instance_state state);
+int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state);
+int msm_cvp_deinit_core(struct msm_cvp_inst *inst);
+int msm_cvp_comm_suspend(void);
+void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst);
+int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst);
+void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst);
+void handle_sys_error(enum hal_command_response cmd, void *data);
+int msm_cvp_comm_smem_cache_operations(struct msm_cvp_inst *inst,
+		struct msm_cvp_smem *mem, enum smem_cache_ops cache_ops);
+int msm_cvp_comm_check_core_init(struct msm_cvp_core *core);
+int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
+	enum hal_command_response cmd);
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst);
+int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst);
+int msm_cvp_noc_error_info(struct msm_cvp_core *core);
+int cvp_print_inst(u32 tag, struct msm_cvp_inst *inst);
+unsigned long long get_aon_time(void);
+#endif

+ 544 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_core.c

@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/dma-direction.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp_internal.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp.h"
+#include "msm_cvp_common.h"
+#include <linux/delay.h>
+#include "cvp_hfi_api.h"
+#include "msm_cvp_clocks.h"
+#include <linux/dma-buf.h>
+
+#define MAX_EVENTS 30
+#define NUM_CYCLES16X16_HCD_FRAME 95
+#define NUM_CYCLES16X16_DMM_FRAME 600
+#define NUM_CYCLES16X16_NCC_FRAME 400
+#define NUM_CYCLES16X16_DS_FRAME  80
+#define NUM_CYCLESFW_FRAME  1680000
+#define NUM_DMM_MAX_FEATURE_POINTS 500
+#define CYCLES_MARGIN_IN_POWEROF2 3
+
+static atomic_t nr_insts;
+
+void *cvp_kmem_cache_zalloc(struct cvp_kmem_cache *k, gfp_t flags)
+{
+	atomic_inc(&k->nr_objs);
+	return kmem_cache_zalloc(k->cache, flags);
+}
+
+void cvp_kmem_cache_free(struct cvp_kmem_cache *k, void *obj)
+{
+	atomic_dec(&k->nr_objs);
+	kmem_cache_free(k->cache, obj);
+}
+
+int msm_cvp_poll(void *instance, struct file *filp,
+		struct poll_table_struct *wait)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_cvp_poll);
+
+int msm_cvp_private(void *cvp_inst, unsigned int cmd,
+		struct eva_kmd_arg *arg)
+{
+	int rc = 0;
+	struct msm_cvp_inst *inst = (struct msm_cvp_inst *)cvp_inst;
+
+	if (!inst || !arg) {
+		dprintk(CVP_ERR, "%s: invalid args\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_cvp_handle_syscall(inst, arg);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_cvp_private);
+
+static bool msm_cvp_check_for_inst_overload(struct msm_cvp_core *core,
+		u32 *instance_count)
+{
+	u32 secure_instance_count = 0;
+	struct msm_cvp_inst *inst = NULL;
+	bool overload = false;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		(*instance_count)++;
+		/* This flag is not updated yet for the current instance */
+		if (inst->flags & CVP_SECURE)
+			secure_instance_count++;
+	}
+	mutex_unlock(&core->lock);
+
+	/* Instance count includes current instance as well. */
+
+	if ((*instance_count >= core->resources.max_inst_count) ||
+		(secure_instance_count >=
+			core->resources.max_secure_inst_count))
+		overload = true;
+	return overload;
+}
+
+static int __init_session_queue(struct msm_cvp_inst *inst)
+{
+	spin_lock_init(&inst->session_queue.lock);
+	INIT_LIST_HEAD(&inst->session_queue.msgs);
+	inst->session_queue.msg_count = 0;
+	init_waitqueue_head(&inst->session_queue.wq);
+	inst->session_queue.state = QUEUE_ACTIVE;
+	return 0;
+}
+
+static void __init_fence_queue(struct msm_cvp_inst *inst)
+{
+	mutex_init(&inst->fence_cmd_queue.lock);
+	INIT_LIST_HEAD(&inst->fence_cmd_queue.wait_list);
+	INIT_LIST_HEAD(&inst->fence_cmd_queue.sched_list);
+	init_waitqueue_head(&inst->fence_cmd_queue.wq);
+	inst->fence_cmd_queue.state = QUEUE_ACTIVE;
+	inst->fence_cmd_queue.mode = OP_NORMAL;
+
+	spin_lock_init(&inst->session_queue_fence.lock);
+	INIT_LIST_HEAD(&inst->session_queue_fence.msgs);
+	inst->session_queue_fence.msg_count = 0;
+	init_waitqueue_head(&inst->session_queue_fence.wq);
+	inst->session_queue_fence.state = QUEUE_ACTIVE;
+}
+
+static void __deinit_fence_queue(struct msm_cvp_inst *inst)
+{
+	mutex_destroy(&inst->fence_cmd_queue.lock);
+	inst->fence_cmd_queue.state = QUEUE_INVALID;
+	inst->fence_cmd_queue.mode = OP_INVALID;
+}
+
+static void __deinit_session_queue(struct msm_cvp_inst *inst)
+{
+	struct cvp_session_msg *msg, *tmpmsg;
+
+	/* free all messages */
+	spin_lock(&inst->session_queue.lock);
+	list_for_each_entry_safe(msg, tmpmsg, &inst->session_queue.msgs, node) {
+		list_del_init(&msg->node);
+		cvp_kmem_cache_free(&cvp_driver->msg_cache, msg);
+	}
+	inst->session_queue.msg_count = 0;
+	inst->session_queue.state = QUEUE_INVALID;
+	spin_unlock(&inst->session_queue.lock);
+
+	wake_up_all(&inst->session_queue.wq);
+}
+
+struct msm_cvp_inst *msm_cvp_open(int session_type, struct task_struct *task)
+{
+	struct msm_cvp_inst *inst = NULL;
+	struct msm_cvp_core *core = NULL;
+	int rc = 0;
+	int i = 0;
+	u32 instance_count;
+
+	core = cvp_driver->cvp_core;
+	if (!core) {
+		dprintk(CVP_ERR, "%s CVP core not initialized\n", __func__);
+		goto err_invalid_core;
+	}
+
+	if (!msm_cvp_auto_pil && session_type == MSM_CVP_BOOT) {
+		dprintk(CVP_SESS, "Auto PIL disabled, bypass CVP init at boot");
+		goto err_invalid_core;
+	}
+
+	core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
+	if (msm_cvp_check_for_inst_overload(core, &instance_count)) {
+		dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
+		mutex_lock(&core->lock);
+		list_for_each_entry(inst, &core->instances, list)
+			cvp_print_inst(CVP_ERR, inst);
+		mutex_unlock(&core->lock);
+
+		return NULL;
+	}
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst) {
+		dprintk(CVP_ERR, "Failed to allocate memory\n");
+		rc = -ENOMEM;
+		goto err_invalid_core;
+	}
+
+	pr_info(CVP_DBG_TAG "%s opening cvp instance: %pK type %d cnt %d\n",
+		"sess", task->comm, inst, session_type, instance_count);
+	mutex_init(&inst->sync_lock);
+	mutex_init(&inst->lock);
+	spin_lock_init(&inst->event_handler.lock);
+
+	INIT_MSM_CVP_LIST(&inst->persistbufs);
+	INIT_DMAMAP_CACHE(&inst->dma_cache);
+	INIT_MSM_CVP_LIST(&inst->cvpdspbufs);
+	INIT_MSM_CVP_LIST(&inst->cvpwnccbufs);
+	INIT_MSM_CVP_LIST(&inst->frames);
+
+	inst->cvpwnccbufs_num = 0;
+	inst->cvpwnccbufs_table = NULL;
+
+	init_waitqueue_head(&inst->event_handler.wq);
+
+	kref_init(&inst->kref);
+
+	inst->session_type = session_type;
+	inst->state = MSM_CVP_CORE_UNINIT_DONE;
+	inst->core = core;
+	inst->clk_data.min_freq = 0;
+	inst->clk_data.curr_freq = 0;
+	inst->clk_data.ddr_bw = 0;
+	inst->clk_data.sys_cache_bw = 0;
+	inst->clk_data.bitrate = 0;
+
+	for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
+		i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
+		init_completion(&inst->completions[i]);
+	}
+
+	msm_cvp_session_init(inst);
+
+	__init_fence_queue(inst);
+	mutex_lock(&core->lock);
+	mutex_lock(&core->clk_lock);
+	list_add_tail(&inst->list, &core->instances);
+	atomic_inc(&nr_insts);
+	mutex_unlock(&core->clk_lock);
+	mutex_unlock(&core->lock);
+
+	rc = __init_session_queue(inst);
+	if (rc)
+		goto fail_init;
+
+	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CORE_INIT_DONE);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to move cvp instance to init state\n");
+		goto fail_init;
+	}
+
+	inst->debugfs_root =
+		msm_cvp_debugfs_init_inst(inst, core->debugfs_root);
+	strlcpy(inst->proc_name, task->comm, TASK_COMM_LEN);
+
+	return inst;
+fail_init:
+	__deinit_session_queue(inst);
+	__deinit_fence_queue(inst);
+	mutex_lock(&core->lock);
+	list_del(&inst->list);
+	mutex_unlock(&core->lock);
+	mutex_destroy(&inst->sync_lock);
+	mutex_destroy(&inst->lock);
+
+	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
+	DEINIT_DMAMAP_CACHE(&inst->dma_cache);
+	DEINIT_MSM_CVP_LIST(&inst->cvpdspbufs);
+	DEINIT_MSM_CVP_LIST(&inst->cvpwnccbufs);
+	DEINIT_MSM_CVP_LIST(&inst->frames);
+
+	kfree(inst);
+	inst = NULL;
+err_invalid_core:
+	return inst;
+}
+EXPORT_SYMBOL(msm_cvp_open);
+
+static void msm_cvp_clean_sess_queue(struct msm_cvp_inst *inst,
+		struct cvp_session_queue *sq)
+{
+	struct cvp_session_msg *mptr, *dummy;
+	u64 ktid  = 0LL;
+
+check_again:
+	spin_lock(&sq->lock);
+	if (sq->msg_count && sq->state != QUEUE_ACTIVE) {
+		list_for_each_entry_safe(mptr, dummy, &sq->msgs, node) {
+			ktid = mptr->pkt.client_data.kdata;
+			if (ktid) {
+				list_del_init(&mptr->node);
+				sq->msg_count--;
+				break;
+			}
+		}
+	}
+	spin_unlock(&sq->lock);
+
+	if (ktid) {
+		msm_cvp_unmap_frame(inst, ktid);
+		cvp_kmem_cache_free(&cvp_driver->msg_cache, mptr);
+		mptr = NULL;
+		ktid = 0LL;
+		goto check_again;
+	}
+}
+
+static int msm_cvp_cleanup_instance(struct msm_cvp_inst *inst)
+{
+	bool empty;
+	int rc, max_retries;
+	struct msm_cvp_frame *frame;
+	struct cvp_session_queue *sq, *sqf;
+	struct cvp_hfi_ops *ops_tbl;
+	struct msm_cvp_inst *tmp;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	sqf = &inst->session_queue_fence;
+	sq = &inst->session_queue;
+
+	max_retries =  inst->core->resources.msm_cvp_hw_rsp_timeout >> 5;
+	msm_cvp_session_queue_stop(inst);
+
+wait_dsp:
+	mutex_lock(&inst->cvpdspbufs.lock);
+	empty = list_empty(&inst->cvpdspbufs.list);
+	if (!empty && max_retries > 0) {
+		mutex_unlock(&inst->cvpdspbufs.lock);
+		usleep_range(2000, 3000);
+		max_retries--;
+		goto wait_dsp;
+	}
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	if (!empty) {
+		dprintk(CVP_WARN, "Failed sess %pK DSP frame pending\n", inst);
+		/*
+		 * A session is either DSP session or CPU session, cannot have both
+		 * DSP and frame buffers
+		 */
+		goto stop_session;
+	}
+
+	max_retries =  inst->core->resources.msm_cvp_hw_rsp_timeout >> 1;
+wait_frame:
+	mutex_lock(&inst->frames.lock);
+	empty = list_empty(&inst->frames.list);
+	if (!empty && max_retries > 0) {
+		mutex_unlock(&inst->frames.lock);
+		usleep_range(1000, 2000);
+		msm_cvp_clean_sess_queue(inst, sqf);
+		msm_cvp_clean_sess_queue(inst, sq);
+		max_retries--;
+		goto wait_frame;
+	}
+	mutex_unlock(&inst->frames.lock);
+
+	if (!empty) {
+		dprintk(CVP_WARN,
+			"Failed to process frames before session %pK close\n",
+			inst);
+		mutex_lock(&inst->frames.lock);
+		list_for_each_entry(frame, &inst->frames.list, list)
+			dprintk(CVP_WARN, "Unprocessed frame %08x ktid %llu\n",
+				frame->pkt_type, frame->ktid);
+		mutex_unlock(&inst->frames.lock);
+		inst->core->synx_ftbl->cvp_dump_fence_queue(inst);
+	}
+
+stop_session:
+	tmp = cvp_get_inst_validate(inst->core, inst);
+	if (!tmp) {
+		dprintk(CVP_ERR, "%s has a invalid session %llx\n",
+			__func__, inst);
+		goto exit;
+	}
+	spin_lock(&sq->lock);
+	if (sq->state == QUEUE_STOP) {
+		spin_unlock(&sq->lock);
+		dprintk(CVP_WARN,
+			"%s: Double stop session - inst %llx, sess %llx, %s of type %d\n",
+			__func__, inst, inst->session, inst->proc_name, inst->session_type);
+		goto release_arp;
+	}
+	spin_unlock(&sq->lock);
+
+	if (!empty) {
+		/* STOP SESSION to avoid SMMU fault after releasing ARP */
+		ops_tbl = inst->core->dev_ops;
+		rc = call_hfi_op(ops_tbl, session_stop, (void *)inst->session);
+		if (rc) {
+			dprintk(CVP_WARN, "%s: cannot stop session rc %d\n",
+				__func__, rc);
+			goto release_arp;
+		}
+
+		/*Fail stop session, release arp later may cause smmu fault*/
+		rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_STOP_DONE);
+		if (rc) {
+			dprintk(CVP_WARN, "%s: wait for sess_stop fail, rc %d\n",
+					__func__, rc);
+		} else {
+			spin_lock(&sq->lock);
+			sq->state = QUEUE_STOP;
+			spin_unlock(&sq->lock);
+		}
+		/* Continue to release ARP anyway */
+	}
+release_arp:
+	cvp_put_inst(tmp);
+exit:
+	if (cvp_release_arp_buffers(inst))
+		dprintk_rl(CVP_WARN,
+			"Failed to release persist buffers\n");
+
+	if (inst->prop.type == HFI_SESSION_FD
+		|| inst->prop.type == HFI_SESSION_DMM) {
+		spin_lock(&inst->core->resources.pm_qos.lock);
+		if (inst->core->resources.pm_qos.off_vote_cnt > 0)
+			inst->core->resources.pm_qos.off_vote_cnt--;
+		else
+			dprintk(CVP_INFO, "%s Unexpected pm_qos off vote %d\n",
+				__func__,
+				inst->core->resources.pm_qos.off_vote_cnt);
+		spin_unlock(&inst->core->resources.pm_qos.lock);
+		ops_tbl = inst->core->dev_ops;
+		call_hfi_op(ops_tbl, pm_qos_update, ops_tbl->hfi_device_data);
+	}
+	return 0;
+}
+
+int msm_cvp_destroy(struct msm_cvp_inst *inst)
+{
+	struct msm_cvp_core *core;
+
+	if (!inst || !inst->core) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+
+	if (inst->session_type == MSM_CVP_DSP) {
+		cvp_dsp_del_sess(inst->dsp_handle, inst);
+		inst->task = NULL;
+	}
+
+	/* Ensure no path has core->clk_lock and core->lock sequence */
+	mutex_lock(&core->lock);
+	mutex_lock(&core->clk_lock);
+	/* inst->list lives in core->instances */
+	list_del(&inst->list);
+	atomic_dec(&nr_insts);
+	mutex_unlock(&core->clk_lock);
+	mutex_unlock(&core->lock);
+
+	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
+	DEINIT_DMAMAP_CACHE(&inst->dma_cache);
+	DEINIT_MSM_CVP_LIST(&inst->cvpdspbufs);
+	DEINIT_MSM_CVP_LIST(&inst->cvpwnccbufs);
+	DEINIT_MSM_CVP_LIST(&inst->frames);
+
+	kfree(inst->cvpwnccbufs_table);
+	inst->cvpwnccbufs_table = NULL;
+
+	mutex_destroy(&inst->sync_lock);
+	mutex_destroy(&inst->lock);
+
+	msm_cvp_debugfs_deinit_inst(inst);
+
+	__deinit_session_queue(inst);
+	__deinit_fence_queue(inst);
+	core->synx_ftbl->cvp_sess_deinit_synx(inst);
+
+	pr_info(CVP_DBG_TAG
+		"closed cvp instance: %pK session_id = %d type %d %d\n",
+		inst->proc_name, inst, hash32_ptr(inst->session),
+		inst->session_type, core->smem_leak_count);
+	inst->session = (void *)0xdeadbeef;
+	if (atomic_read(&inst->smem_count) > 0) {
+		dprintk(CVP_WARN, "Session closed with %d unmapped smems\n",
+			atomic_read(&inst->smem_count));
+		core->smem_leak_count += atomic_read(&inst->smem_count);
+	}
+	kfree(inst);
+	inst = NULL;
+	dprintk(CVP_SESS,
+		"sys-stat: nr_insts %d msgs %d, frames %d, bufs %d, smems %d\n",
+		atomic_read(&nr_insts),
+		atomic_read(&cvp_driver->msg_cache.nr_objs),
+		atomic_read(&cvp_driver->frame_cache.nr_objs),
+		atomic_read(&cvp_driver->buf_cache.nr_objs),
+		atomic_read(&cvp_driver->smem_cache.nr_objs));
+	return 0;
+}
+
+static void close_helper(struct kref *kref)
+{
+	struct msm_cvp_inst *inst;
+
+	if (!kref)
+		return;
+	inst = container_of(kref, struct msm_cvp_inst, kref);
+
+	msm_cvp_destroy(inst);
+}
+
+int msm_cvp_close(void *instance)
+{
+	struct msm_cvp_inst *inst = instance;
+	int rc = 0;
+
+	if (!inst || !inst->core) {
+		dprintk_rl(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_info(CVP_DBG_TAG
+		"to close instance: %pK session_id = %d type %d state %d\n",
+		inst->proc_name, inst, hash32_ptr(inst->session),
+		inst->session_type, inst->state);
+
+	if (inst->session == 0) {
+		if (inst->state >= MSM_CVP_CORE_INIT_DONE &&
+			inst->state < MSM_CVP_OPEN_DONE) {
+			/* Session is not created, no ARP */
+			inst->state = MSM_CVP_CORE_UNINIT;
+			goto exit;
+		}
+		if (inst->state == MSM_CVP_CORE_UNINIT)
+			return -EINVAL;
+	}
+
+	if (inst->session_type != MSM_CVP_BOOT) {
+		rc = msm_cvp_cleanup_instance(inst);
+		if (rc)
+			return -EINVAL;
+		msm_cvp_session_deinit(inst);
+	}
+
+	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CORE_UNINIT);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to move inst %pK to uninit state\n", inst);
+		rc = msm_cvp_deinit_core(inst);
+	}
+
+	msm_cvp_comm_session_clean(inst);
+exit:
+	kref_put(&inst->kref, close_helper);
+	return 0;
+}
+EXPORT_SYMBOL(msm_cvp_close);
+
+int msm_cvp_suspend(void)
+{
+	return msm_cvp_comm_suspend();
+}
+EXPORT_SYMBOL(msm_cvp_suspend);

+ 40 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_core.h

@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_CVP_CORE_H_
+#define _MSM_CVP_CORE_H_
+
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+#include <linux/refcount.h>
+#include <media/msm_eva_private.h>
+#include "msm_cvp_buf.h"
+#include "msm_cvp_synx.h"
+
+#define DDR_TYPE_LPDDR4 0x6
+#define DDR_TYPE_LPDDR4X 0x7
+#define DDR_TYPE_LPDDR4Y 0x8
+#define DDR_TYPE_LPDDR5 0x9
+
+enum session_type {
+	MSM_CVP_USER = 1,
+	MSM_CVP_KERNEL,
+	MSM_CVP_BOOT,
+	MSM_CVP_DSP,
+	MSM_CVP_UNKNOWN,
+	MSM_CVP_MAX_DEVICES = MSM_CVP_UNKNOWN,
+};
+
+struct msm_cvp_inst *msm_cvp_open(int session_type, struct task_struct *task);
+int msm_cvp_close(void *instance);
+int msm_cvp_suspend(void);
+int msm_cvp_poll(void *instance, struct file *filp,
+		struct poll_table_struct *pt);
+int msm_cvp_private(void *cvp_inst, unsigned int cmd,
+		struct eva_kmd_arg *arg);
+
+#endif

+ 630 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_debug.c

@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include "msm_cvp_debug.h"
+#include "msm_cvp_common.h"
+#include "cvp_core_hfi.h"
+#include "cvp_hfi_api.h"
+#include "msm_cvp_dsp.h"
+
+#define MAX_SSR_STRING_LEN 10
+int msm_cvp_debug = CVP_ERR | CVP_WARN | CVP_FW;
+EXPORT_SYMBOL(msm_cvp_debug);
+
+int msm_cvp_debug_out = CVP_OUT_PRINTK;
+EXPORT_SYMBOL(msm_cvp_debug_out);
+
+int msm_cvp_fw_debug = 0x18;
+int msm_cvp_fw_debug_mode = 1;
+int msm_cvp_fw_low_power_mode = 1;
+bool msm_cvp_fw_coverage = !true;
+bool msm_cvp_auto_pil = true;
+bool msm_cvp_cacheop_enabled = true;
+bool msm_cvp_thermal_mitigation_disabled = !true;
+bool msm_cvp_cacheop_disabled = !true;
+int msm_cvp_clock_voting = !1;
+bool msm_cvp_syscache_disable = !true;
+bool msm_cvp_dsp_disable = !true;
+#ifdef CVP_MMRM_ENABLED
+bool msm_cvp_mmrm_enabled = true;
+#else
+bool msm_cvp_mmrm_enabled = !true;
+#endif
+bool msm_cvp_dcvs_disable = !true;
+int msm_cvp_minidump_enable = !1;
+int cvp_kernel_fence_enabled = 2;
+int msm_cvp_hw_wd_recovery = 1;
+
+#define MAX_DBG_BUF_SIZE 4096
+
+struct cvp_core_inst_pair {
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *inst;
+};
+
+static int core_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	dprintk(CVP_INFO, "%s: Enter\n", __func__);
+	return 0;
+}
+
+static u32 write_str(char *buffer,
+		size_t size, const char *fmt, ...)
+{
+	va_list args;
+	u32 len;
+
+	va_start(args, fmt);
+	len = vscnprintf(buffer, size, fmt, args);
+	va_end(args);
+	return len;
+}
+
+static ssize_t core_info_read(struct file *file, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct msm_cvp_core *core = file->private_data;
+	struct cvp_hfi_ops *ops_tbl;
+	struct cvp_hal_fw_info fw_info = { {0} };
+	char *dbuf, *cur, *end;
+	int i = 0, rc = 0;
+	ssize_t len = 0;
+
+	if (!core || !core->dev_ops) {
+		dprintk(CVP_ERR, "Invalid params, core: %pK\n", core);
+		return 0;
+	}
+
+	dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf) {
+		dprintk(CVP_ERR, "%s: Allocation failed!\n", __func__);
+		return -ENOMEM;
+	}
+	cur = dbuf;
+	end = cur + MAX_DBG_BUF_SIZE;
+	ops_tbl = core->dev_ops;
+
+	cur += write_str(cur, end - cur, "===============================\n");
+	cur += write_str(cur, end - cur, "CORE %d: %pK\n", 0, core);
+	cur += write_str(cur, end - cur, "===============================\n");
+	cur += write_str(cur, end - cur, "Core state: %d\n", core->state);
+	rc = call_hfi_op(ops_tbl, get_fw_info, ops_tbl->hfi_device_data, &fw_info);
+	if (rc) {
+		dprintk(CVP_WARN, "Failed to read FW info\n");
+		goto err_fw_info;
+	}
+
+	cur += write_str(cur, end - cur,
+		"FW version : %s\n", &fw_info.version);
+	cur += write_str(cur, end - cur,
+		"base addr: 0x%x\n", fw_info.base_addr);
+	cur += write_str(cur, end - cur,
+		"register_base: 0x%x\n", fw_info.register_base);
+	cur += write_str(cur, end - cur,
+		"register_size: %u\n", fw_info.register_size);
+	cur += write_str(cur, end - cur, "irq: %u\n", fw_info.irq);
+
+err_fw_info:
+	for (i = SYS_MSG_START; i < SYS_MSG_END; i++) {
+		cur += write_str(cur, end - cur, "completions[%d]: %s\n", i,
+			completion_done(&core->completions[SYS_MSG_INDEX(i)]) ?
+			"pending" : "done");
+	}
+	len = simple_read_from_buffer(buf, count, ppos,
+			dbuf, cur - dbuf);
+
+	kfree(dbuf);
+	return len;
+}
+
+static const struct file_operations core_info_fops = {
+	.open = core_info_open,
+	.read = core_info_read,
+};
+
+static int trigger_ssr_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	dprintk(CVP_INFO, "%s: Enter\n", __func__);
+	return 0;
+}
+
+static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long ssr_trigger_val = 0;
+	int rc = 0;
+	struct msm_cvp_core *core = filp->private_data;
+	size_t size = MAX_SSR_STRING_LEN;
+	char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
+
+	if (!buf)
+		return -EINVAL;
+
+	if (!count)
+		goto exit;
+
+	if (count < size)
+		size = count;
+
+	if (copy_from_user(kbuf, buf, size)) {
+		dprintk(CVP_WARN, "%s User memory fault\n", __func__);
+		rc = -EFAULT;
+		goto exit;
+	}
+
+	rc = kstrtoul(kbuf, 0, &ssr_trigger_val);
+	if (rc) {
+		dprintk(CVP_WARN, "returning error err %d\n", rc);
+		rc = -EINVAL;
+	} else {
+		msm_cvp_trigger_ssr(core, ssr_trigger_val);
+		rc = count;
+	}
+exit:
+	return rc;
+}
+
+static const struct file_operations ssr_fops = {
+	.open = trigger_ssr_open,
+	.write = trigger_ssr_write,
+};
+
+static int cvp_power_get(void *data, u64 *val)
+{
+	struct cvp_hfi_ops *ops_tbl;
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hfi_device;
+
+	core = cvp_driver->cvp_core;
+	if (!core)
+		return 0;
+	ops_tbl = core->dev_ops;
+	if (!ops_tbl)
+		return 0;
+
+	hfi_device = ops_tbl->hfi_device_data;
+	if (!hfi_device)
+		return 0;
+
+	*val = hfi_device->power_enabled;
+	return 0;
+}
+
+#define MIN_PC_INTERVAL 1000
+#define MAX_PC_INTERVAL 1000000
+
+static int cvp_power_set(void *data, u64 val)
+{
+	struct cvp_hfi_ops *ops_tbl;
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hfi_device;
+	int rc = 0;
+
+	core = cvp_driver->cvp_core;
+	if (!core)
+		return -EINVAL;
+
+	ops_tbl = core->dev_ops;
+	if (!ops_tbl)
+		return -EINVAL;
+
+	hfi_device = ops_tbl->hfi_device_data;
+	if (!hfi_device)
+		return -EINVAL;
+
+	if (val >= MAX_PC_INTERVAL) {
+		hfi_device->res->sw_power_collapsible = 0;
+	} else if (val > MIN_PC_INTERVAL) {
+		hfi_device->res->sw_power_collapsible = 1;
+		hfi_device->res->msm_cvp_pwr_collapse_delay =
+			(unsigned int)val;
+	}
+
+	if (core->state == CVP_CORE_UNINIT)
+		return -EINVAL;
+
+	if (val > 0) {
+		rc = call_hfi_op(ops_tbl, resume, ops_tbl->hfi_device_data);
+		if (rc)
+			dprintk(CVP_ERR, "debugfs fail to power on cvp\n");
+	}
+	return rc;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cvp_pwr_fops, cvp_power_get, cvp_power_set, "%llu\n");
+
+struct dentry *msm_cvp_debugfs_init_drv(void)
+{
+	struct dentry *dir = NULL;
+
+	dir = debugfs_create_dir("msm_cvp", NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		dir = NULL;
+		goto failed_create_dir;
+	}
+
+	debugfs_create_x32("debug_level", 0644, dir, &msm_cvp_debug);
+	debugfs_create_x32("fw_level", 0644, dir, &msm_cvp_fw_debug);
+	debugfs_create_u32("fw_debug_mode", 0644, dir, &msm_cvp_fw_debug_mode);
+	debugfs_create_u32("fw_low_power_mode", 0644, dir,
+		&msm_cvp_fw_low_power_mode);
+	debugfs_create_u32("debug_output", 0644, dir, &msm_cvp_debug_out);
+	debugfs_create_u32("minidump_enable", 0644, dir,
+			&msm_cvp_minidump_enable);
+	debugfs_create_bool("fw_coverage", 0644, dir, &msm_cvp_fw_coverage);
+	debugfs_create_bool("auto_pil", 0644, dir, &msm_cvp_auto_pil);
+	debugfs_create_u32("kernel_fence", 0644, dir, &cvp_kernel_fence_enabled);
+	debugfs_create_bool("disable_thermal_mitigation", 0644, dir,
+			&msm_cvp_thermal_mitigation_disabled);
+	debugfs_create_bool("enable_cacheop", 0644, dir,
+			&msm_cvp_cacheop_enabled);
+	debugfs_create_bool("disable_cvp_syscache", 0644, dir,
+			&msm_cvp_syscache_disable);
+	debugfs_create_bool("disable_dcvs", 0644, dir,
+			&msm_cvp_dcvs_disable);
+
+	debugfs_create_file("cvp_power", 0644, dir, NULL, &cvp_pwr_fops);
+
+	return dir;
+
+failed_create_dir:
+	if (dir)
+		debugfs_remove_recursive(cvp_driver->debugfs_root);
+
+	dprintk(CVP_WARN, "Failed to create debugfs\n");
+	return NULL;
+}
+
+static int _clk_rate_set(void *data, u64 val)
+{
+	struct msm_cvp_core *core;
+	struct cvp_hfi_ops *ops_tbl;
+	struct allowed_clock_rates_table *tbl = NULL;
+	unsigned int tbl_size, i;
+
+	core = cvp_driver->cvp_core;
+	ops_tbl = core->dev_ops;
+	tbl = core->resources.allowed_clks_tbl;
+	tbl_size = core->resources.allowed_clks_tbl_size;
+
+	if (val == 0) {
+		struct iris_hfi_device *hdev = ops_tbl->hfi_device_data;
+
+		msm_cvp_clock_voting = 0;
+		call_hfi_op(ops_tbl, scale_clocks, hdev, hdev->clk_freq);
+		return 0;
+	}
+
+	for (i = 0; i < tbl_size; i++)
+		if (val <= tbl[i].clock_rate)
+			break;
+
+	if (i == tbl_size)
+		msm_cvp_clock_voting = tbl[tbl_size-1].clock_rate;
+	else
+		msm_cvp_clock_voting = tbl[i].clock_rate;
+
+	dprintk(CVP_WARN, "Override cvp_clk_rate with %d\n",
+			msm_cvp_clock_voting);
+
+	call_hfi_op(ops_tbl, scale_clocks, ops_tbl->hfi_device_data,
+		msm_cvp_clock_voting);
+
+	return 0;
+}
+
+static int _clk_rate_get(void *data, u64 *val)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hdev;
+
+	core = cvp_driver->cvp_core;
+	hdev = core->dev_ops->hfi_device_data;
+	if (msm_cvp_clock_voting)
+		*val = msm_cvp_clock_voting;
+	else
+		*val = hdev->clk_freq;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, _clk_rate_get, _clk_rate_set, "%llu\n");
+
+static int _dsp_dbg_set(void *data, u64 val)
+{
+	gfa_cv.debug_mask = (uint32_t)val;
+
+	cvp_dsp_send_debug_mask();
+
+	return 0;
+}
+
+static int _dsp_dbg_get(void *data, u64 *val)
+{
+	*val = gfa_cv.debug_mask;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(dsp_debug_fops, _dsp_dbg_get, _dsp_dbg_set, "%llu\n");
+
+static int _max_ssr_set(void *data, u64 val)
+{
+	struct msm_cvp_core *core;
+	core = cvp_driver->cvp_core;
+	if (core) {
+		if (val < 1) {
+			dprintk(CVP_WARN,
+				"Invalid max_ssr_allowed value %llx\n", val);
+			return 0;
+		}
+
+		core->resources.max_ssr_allowed = (unsigned int)val;
+	}
+	return 0;
+}
+
+static int _max_ssr_get(void *data, u64 *val)
+{
+	struct msm_cvp_core *core;
+	core = cvp_driver->cvp_core;
+	if (core)
+		*val = core->resources.max_ssr_allowed;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(max_ssr_fops, _max_ssr_get, _max_ssr_set, "%llu\n");
+
+static int _ssr_stall_set(void *data, u64 val)
+{
+	struct msm_cvp_core *core;
+	core = cvp_driver->cvp_core;
+	if (core)
+		core->resources.fatal_ssr = (val >= 1) ? true : false;
+
+	return 0;
+}
+
+static int _ssr_stall_get(void *data, u64 *val)
+{
+	struct msm_cvp_core *core;
+	core = cvp_driver->cvp_core;
+	if (core)
+		*val = core->resources.fatal_ssr ? 1 : 0;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ssr_stall_fops, _ssr_stall_get, _ssr_stall_set, "%llu\n");
+
+
+struct dentry *msm_cvp_debugfs_init_core(struct msm_cvp_core *core,
+		struct dentry *parent)
+{
+	struct dentry *dir = NULL;
+	char debugfs_name[MAX_DEBUGFS_NAME];
+
+	if (!core) {
+		dprintk(CVP_ERR, "Invalid params, core: %pK\n", core);
+		goto failed_create_dir;
+	}
+
+	snprintf(debugfs_name, MAX_DEBUGFS_NAME, "core%d", 0);
+	dir = debugfs_create_dir(debugfs_name, parent);
+	if (IS_ERR_OR_NULL(dir)) {
+		dir = NULL;
+		dprintk(CVP_ERR, "Failed to create debugfs for msm_cvp\n");
+		goto failed_create_dir;
+	}
+	if (!debugfs_create_file("info", 0444, dir, core, &core_info_fops)) {
+		dprintk(CVP_ERR, "debugfs_create_file: fail\n");
+		goto failed_create_dir;
+	}
+	if (!debugfs_create_file("trigger_ssr", 0200,
+			dir, core, &ssr_fops)) {
+		dprintk(CVP_ERR, "debugfs_create_file: fail\n");
+		goto failed_create_dir;
+	}
+	if (!debugfs_create_file("clock_rate", 0644, dir,
+			NULL, &clk_rate_fops)) {
+		dprintk(CVP_ERR, "debugfs_create_file: clock_rate fail\n");
+		goto failed_create_dir;
+	}
+	if (!debugfs_create_file("dsp_debug_level", 0644, dir,
+			NULL, &dsp_debug_fops)) {
+		dprintk(CVP_ERR, "debugfs_create: dsp_debug_level fail\n");
+		goto failed_create_dir;
+	}
+
+	if (!debugfs_create_file("max_ssr_allowed", 0644, dir,
+			NULL, &max_ssr_fops)) {
+		dprintk(CVP_ERR, "debugfs_create: max_ssr_allowed fail\n");
+		goto failed_create_dir;
+	}
+
+	if (!debugfs_create_file("ssr_stall", 0644, dir,
+			NULL, &ssr_stall_fops)) {
+		dprintk(CVP_ERR, "debugfs_create: ssr_stall fail\n");
+		goto failed_create_dir;
+	}
+	debugfs_create_u32("hw_wd_recovery", 0644, dir,
+		&msm_cvp_hw_wd_recovery);
+failed_create_dir:
+	return dir;
+}
+
+static int inst_info_open(struct inode *inode, struct file *file)
+{
+	dprintk(CVP_INFO, "Open inode ptr: %pK\n", inode->i_private);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int publish_unreleased_reference(struct msm_cvp_inst *inst,
+		char **dbuf, char *end)
+{
+	dprintk(CVP_SESS, "%s deprecated function\n", __func__);
+	return 0;
+}
+
+static void put_inst_helper(struct kref *kref)
+{
+	struct msm_cvp_inst *inst;
+
+	if (!kref)
+		return;
+	inst = container_of(kref, struct msm_cvp_inst, kref);
+
+	msm_cvp_destroy(inst);
+}
+
+static ssize_t inst_info_read(struct file *file, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cvp_core_inst_pair *idata = file->private_data;
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *inst, *temp = NULL;
+	char *dbuf, *cur, *end;
+	int i;
+	ssize_t len = 0;
+
+	if (!idata || !idata->core || !idata->inst) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return 0;
+	}
+
+	core = idata->core;
+	inst = idata->inst;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(temp, &core->instances, list) {
+		if (temp == inst)
+			break;
+	}
+	inst = ((temp == inst) && kref_get_unless_zero(&inst->kref)) ?
+		inst : NULL;
+	mutex_unlock(&core->lock);
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: Instance has become obsolete", __func__);
+		return 0;
+	}
+
+	dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf) {
+		dprintk(CVP_ERR, "%s: Allocation failed!\n", __func__);
+		len = -ENOMEM;
+		goto failed_alloc;
+	}
+	cur = dbuf;
+	end = cur + MAX_DBG_BUF_SIZE;
+
+	cur += write_str(cur, end - cur, "==============================\n");
+	cur += write_str(cur, end - cur, "INSTANCE: %pK (%s)\n", inst,
+		inst->session_type == MSM_CVP_USER ? "User" : "Kernel");
+	cur += write_str(cur, end - cur, "==============================\n");
+	cur += write_str(cur, end - cur, "core: %pK\n", inst->core);
+	cur += write_str(cur, end - cur, "state: %d\n", inst->state);
+	cur += write_str(cur, end - cur, "secure: %d\n",
+		!!(inst->flags & CVP_SECURE));
+	for (i = SESSION_MSG_START; i < SESSION_MSG_END; i++) {
+		cur += write_str(cur, end - cur, "completions[%d]: %s\n", i,
+		completion_done(&inst->completions[SESSION_MSG_INDEX(i)]) ?
+		"pending" : "done");
+	}
+
+	publish_unreleased_reference(inst, &cur, end);
+	len = simple_read_from_buffer(buf, count, ppos,
+		dbuf, cur - dbuf);
+
+	kfree(dbuf);
+failed_alloc:
+	kref_put(&inst->kref, put_inst_helper);
+	return len;
+}
+
+static int inst_info_release(struct inode *inode, struct file *file)
+{
+	dprintk(CVP_INFO, "Release inode ptr: %pK\n", inode->i_private);
+	file->private_data = NULL;
+	return 0;
+}
+
+static const struct file_operations inst_info_fops = {
+	.open = inst_info_open,
+	.read = inst_info_read,
+	.release = inst_info_release,
+};
+
+struct dentry *msm_cvp_debugfs_init_inst(struct msm_cvp_inst *inst,
+		struct dentry *parent)
+{
+	struct dentry *dir = NULL, *info = NULL;
+	char debugfs_name[MAX_DEBUGFS_NAME];
+	struct cvp_core_inst_pair *idata = NULL;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "Invalid params, inst: %pK\n", inst);
+		goto exit;
+	}
+	snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%pK", inst);
+
+	idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+	if (!idata) {
+		dprintk(CVP_ERR, "%s: Allocation failed!\n", __func__);
+		goto exit;
+	}
+
+	idata->core = inst->core;
+	idata->inst = inst;
+
+	dir = debugfs_create_dir(debugfs_name, parent);
+	if (IS_ERR_OR_NULL(dir)) {
+		dir = NULL;
+		dprintk(CVP_ERR, "Failed to create debugfs for msm_cvp\n");
+		goto failed_create_dir;
+	}
+
+	info = debugfs_create_file("info", 0444, dir,
+			idata, &inst_info_fops);
+	if (!info) {
+		dprintk(CVP_ERR, "debugfs_create_file: info fail\n");
+		goto failed_create_file;
+	}
+
+	dir->d_inode->i_private = info->d_inode->i_private;
+	inst->debug.pdata[FRAME_PROCESSING].sampling = true;
+	return dir;
+
+failed_create_file:
+	debugfs_remove_recursive(dir);
+	dir = NULL;
+failed_create_dir:
+	kfree(idata);
+exit:
+	return dir;
+}
+
+void msm_cvp_debugfs_deinit_inst(struct msm_cvp_inst *inst)
+{
+	struct dentry *dentry = NULL;
+
+	if (!inst || !inst->debugfs_root)
+		return;
+
+	dentry = inst->debugfs_root;
+	if (dentry->d_inode) {
+		dprintk(CVP_INFO, "Destroy %pK\n", dentry->d_inode->i_private);
+		kfree(dentry->d_inode->i_private);
+		dentry->d_inode->i_private = NULL;
+	}
+	debugfs_remove_recursive(dentry);
+	inst->debugfs_root = NULL;
+}

+ 205 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_debug.h

@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_CVP_DEBUG__
+#define __MSM_CVP_DEBUG__
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include "msm_cvp_internal.h"
+#include "msm_cvp_events.h"
+
+#ifndef CVP_DBG_LABEL
+#define CVP_DBG_LABEL "msm_cvp"
+#endif
+
+#define CVP_DBG_TAG CVP_DBG_LABEL ": %4s: "
+
+/* To enable messages OR these values and
+ * echo the result to debugfs file.
+ *
+ * To enable all messages set debug_level = 0x101F
+ */
+
+enum cvp_msg_prio {
+	CVP_ERR  = 0x000001,
+	CVP_WARN = 0x000002,
+	CVP_INFO = 0x000004,
+	CVP_CMD  = 0x000008,
+	CVP_PROF = 0x000010,
+	CVP_PKT  = 0x000020,
+	CVP_MEM  = 0x000040,
+	CVP_SYNX = 0x000080,
+	CVP_CORE = 0x000100,
+	CVP_REG  = 0x000200,
+	CVP_PWR  = 0x000400,
+	CVP_DSP  = 0x000800,
+	CVP_FW   = 0x001000,
+	CVP_SESS = 0x002000,
+	CVP_HFI  = 0x004000,
+	CVP_VM   = 0x008000,
+	CVP_DBG  = CVP_MEM | CVP_SYNX | CVP_CORE | CVP_REG | CVP_CMD |
+		CVP_PWR | CVP_DSP | CVP_SESS | CVP_HFI | CVP_PKT | CVP_VM,
+};
+
+enum cvp_msg_out {
+	CVP_OUT_PRINTK = 0,
+};
+
+enum msm_cvp_debugfs_event {
+	MSM_CVP_DEBUGFS_EVENT_ETB,
+	MSM_CVP_DEBUGFS_EVENT_EBD,
+	MSM_CVP_DEBUGFS_EVENT_FTB,
+	MSM_CVP_DEBUGFS_EVENT_FBD,
+};
+
+extern int msm_cvp_debug;
+extern int msm_cvp_debug_out;
+extern int msm_cvp_fw_debug;
+extern int msm_cvp_fw_debug_mode;
+extern int msm_cvp_fw_low_power_mode;
+extern bool msm_cvp_fw_coverage;
+extern bool msm_cvp_auto_pil;
+extern bool msm_cvp_thermal_mitigation_disabled;
+extern bool msm_cvp_cacheop_disabled;
+extern int msm_cvp_clock_voting;
+extern bool msm_cvp_syscache_disable;
+extern bool msm_cvp_dsp_disable;
+extern bool msm_cvp_mmrm_enabled;
+extern bool msm_cvp_dcvs_disable;
+extern int msm_cvp_minidump_enable;
+extern int cvp_kernel_fence_enabled;
+extern int msm_cvp_hw_wd_recovery;
+
+#define dprintk(__level, __fmt, arg...)	\
+	do { \
+		if (msm_cvp_debug & __level) { \
+			if (msm_cvp_debug_out == CVP_OUT_PRINTK) { \
+				pr_info(CVP_DBG_TAG __fmt, \
+					get_debug_level_str(__level),   \
+					## arg); \
+			} \
+		} \
+	} while (0)
+
+/* dprintk_rl is designed for printing frequent recurring errors */
+#define dprintk_rl(__level, __fmt, arg...)	\
+	do { \
+		if (msm_cvp_debug & __level) { \
+			if (msm_cvp_debug_out == CVP_OUT_PRINTK) { \
+				pr_info_ratelimited(CVP_DBG_TAG __fmt, \
+					get_debug_level_str(__level),   \
+					## arg); \
+			} \
+		} \
+	} while (0)
+
+#define MSM_CVP_ERROR(value)					\
+	do {	if (value)					\
+			dprintk(CVP_ERR, "BugOn");		\
+		WARN_ON(value);					\
+	} while (0)
+
+
+struct dentry *msm_cvp_debugfs_init_drv(void);
+struct dentry *msm_cvp_debugfs_init_core(struct msm_cvp_core *core,
+		struct dentry *parent);
+struct dentry *msm_cvp_debugfs_init_inst(struct msm_cvp_inst *inst,
+		struct dentry *parent);
+void msm_cvp_debugfs_deinit_inst(struct msm_cvp_inst *inst);
+
+static inline char *get_debug_level_str(int level)
+{
+	switch (level) {
+	case CVP_ERR:
+		return "err";
+	case CVP_WARN:
+		return "warn";
+	case CVP_INFO:
+		return "info";
+	case CVP_CMD:
+		return "cmd";
+	case CVP_DBG:
+		return "dbg";
+	case CVP_PROF:
+		return "prof";
+	case CVP_PKT:
+		return "pkt";
+	case CVP_MEM:
+		return "mem";
+	case CVP_SYNX:
+		return "synx";
+	case CVP_CORE:
+		return "core";
+	case CVP_REG:
+		return "reg";
+	case CVP_PWR:
+		return "pwr";
+	case CVP_DSP:
+		return "dsp";
+	case CVP_FW:
+		return "fw";
+	case CVP_SESS:
+		return "sess";
+	case CVP_HFI:
+		return "hfi";
+	case CVP_VM:
+		return "vm";
+	default:
+		return "???";
+	}
+}
+
+static inline void show_stats(struct msm_cvp_inst *i)
+{
+	int x;
+
+	for (x = 0; x < MAX_PROFILING_POINTS; x++) {
+		if (i->debug.pdata[x].name[0] &&
+				(msm_cvp_debug & CVP_PROF)) {
+			if (i->debug.samples) {
+				dprintk(CVP_PROF, "%s averaged %d ms/sample\n",
+						i->debug.pdata[x].name,
+						i->debug.pdata[x].cumulative /
+						i->debug.samples);
+			}
+
+			dprintk(CVP_PROF, "%s Samples: %d\n",
+					i->debug.pdata[x].name,
+					i->debug.samples);
+		}
+	}
+}
+
+static inline void msm_cvp_res_handle_fatal_hw_error(
+	struct msm_cvp_platform_resources *resources,
+	bool enable_fatal)
+{
+	enable_fatal &= resources->debug_timeout;
+	MSM_CVP_ERROR(enable_fatal);
+}
+
+static inline void msm_cvp_handle_hw_error(struct msm_cvp_core *core)
+{
+	bool enable_fatal = true;
+
+	/*
+	 * In current implementation user-initiated SSR triggers
+	 * a fatal error from hardware. However, there is no way
+	 * to know if fatal error is due to SSR or not. Handle
+	 * user SSR as non-fatal.
+	 */
+	if (core->trigger_ssr) {
+		core->trigger_ssr = false;
+		enable_fatal = false;
+	}
+
+	/* CVP driver can decide FATAL handling of HW errors
+	 * based on multiple factors. This condition check will
+	 * be enhanced later.
+	 */
+	msm_cvp_res_handle_fatal_hw_error(&core->resources, enable_fatal);
+}
+
+#endif

+ 2234 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_dsp.c

@@ -0,0 +1,2234 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/rpmsg.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <linux/qcom_scm.h>
+#include <soc/qcom/secure_buffer.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp.h"
+#include "cvp_hfi.h"
+#include "cvp_dump.h"
+
+static atomic_t nr_maps;
+struct cvp_dsp_apps gfa_cv;
+
+static int cvp_reinit_dsp(void);
+
+static void cvp_remove_dsp_sessions(void);
+
+static int __fastrpc_driver_register(struct fastrpc_driver *driver)
+{
+#ifdef CVP_FASTRPC_ENABLED
+	return fastrpc_driver_register(driver);
+#else
+	return -ENODEV;
+#endif
+}
+
+static void __fastrpc_driver_unregister(struct fastrpc_driver *driver)
+{
+#ifdef CVP_FASTRPC_ENABLED
+	return fastrpc_driver_unregister(driver);
+#endif
+}
+
+#ifdef CVP_FASTRPC_ENABLED
+static int __fastrpc_driver_invoke(struct fastrpc_device *dev,
+				enum fastrpc_driver_invoke_nums invoke_num,
+				unsigned long invoke_param)
+{
+	return fastrpc_driver_invoke(dev, invoke_num, invoke_param);
+}
+#endif	/* End of CVP_FASTRPC_ENABLED */
+
+static int cvp_dsp_send_cmd(struct cvp_dsp_cmd_msg *cmd, uint32_t len)
+{
+	int rc = 0;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	dprintk(CVP_DSP, "%s: cmd = %d\n", __func__, cmd->type);
+
+	if (IS_ERR_OR_NULL(me->chan)) {
+		dprintk(CVP_ERR, "%s: DSP GLink is not ready\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+	rc = rpmsg_send(me->chan->ept, cmd, len);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: DSP rpmsg_send failed rc=%d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+exit:
+	return rc;
+}
+
+static int cvp_dsp_send_cmd_sync(struct cvp_dsp_cmd_msg *cmd,
+		uint32_t len, struct cvp_dsp_rsp_msg *rsp)
+{
+	int rc = 0;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	dprintk(CVP_DSP, "%s: cmd = %d\n", __func__, cmd->type);
+
+	me->pending_dsp2cpu_rsp.type = cmd->type;
+	rc = cvp_dsp_send_cmd(cmd, len);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: cvp_dsp_send_cmd failed rc=%d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+	if (!wait_for_completion_timeout(&me->completions[cmd->type],
+			msecs_to_jiffies(CVP_DSP_RESPONSE_TIMEOUT))) {
+		dprintk(CVP_ERR, "%s cmd %d timeout\n", __func__, cmd->type);
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+exit:
+	rsp->ret = me->pending_dsp2cpu_rsp.ret;
+	rsp->dsp_state = me->pending_dsp2cpu_rsp.dsp_state;
+	me->pending_dsp2cpu_rsp.type = CVP_INVALID_RPMSG_TYPE;
+	return rc;
+}
+
+static int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
+				uint32_t size_in_bytes,
+				struct cvp_dsp_rsp_msg *rsp)
+{
+	int rc = 0;
+	struct cvp_dsp_cmd_msg cmd;
+
+	cmd.type = CPU2DSP_SEND_HFI_QUEUE;
+	cmd.msg_ptr = (uint64_t)phys_addr;
+	cmd.msg_ptr_len = size_in_bytes;
+	cmd.ddr_type = cvp_of_fdt_get_ddrtype();
+	if (cmd.ddr_type < 0) {
+		dprintk(CVP_WARN,
+			"%s: Incorrect DDR type value %d, use default %d\n",
+			__func__, cmd.ddr_type, DDR_TYPE_LPDDR5);
+		/*return -EINVAL;*/
+		cmd.ddr_type =  DDR_TYPE_LPDDR5;
+	}
+
+	dprintk(CVP_DSP,
+		"%s: address of buffer, PA=0x%pK  size_buff=%d ddr_type=%d\n",
+		__func__, phys_addr, size_in_bytes, cmd.ddr_type);
+
+	rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg), rsp);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed rc = %d\n",
+			__func__, rc);
+		goto exit;
+	}
+exit:
+	return rc;
+}
+
+static int cvp_hyp_assign_to_dsp(uint64_t addr, uint32_t size)
+{
+	int rc = 0;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	uint64_t hlosVMid = BIT(VMID_HLOS);
+	struct qcom_scm_vmperm dspVM[DSP_VM_NUM] = {
+		{VMID_HLOS, PERM_READ | PERM_WRITE | PERM_EXEC},
+		{VMID_CDSP_Q6, PERM_READ | PERM_WRITE | PERM_EXEC}
+	};
+
+	if (!me->hyp_assigned) {
+		rc = qcom_scm_assign_mem(addr, size, &hlosVMid, dspVM, DSP_VM_NUM);
+		if (rc) {
+			dprintk(CVP_ERR, "%s failed. rc=%d\n", __func__, rc);
+			return rc;
+		}
+		me->addr = addr;
+		me->size = size;
+		me->hyp_assigned = true;
+	}
+
+	return rc;
+}
+
+static int cvp_hyp_assign_from_dsp(void)
+{
+	int rc = 0;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	uint64_t dspVMids = BIT(VMID_HLOS) | BIT(VMID_CDSP_Q6);
+	struct qcom_scm_vmperm hlosVM[HLOS_VM_NUM] = {
+		{VMID_HLOS, PERM_READ | PERM_WRITE | PERM_EXEC},
+	};
+
+	if (me->hyp_assigned) {
+		rc = qcom_scm_assign_mem(me->addr, me->size, &dspVMids, hlosVM, HLOS_VM_NUM);
+		if (rc) {
+			dprintk(CVP_ERR, "%s failed. rc=%d\n", __func__, rc);
+			return rc;
+		}
+		me->addr = 0;
+		me->size = 0;
+		me->hyp_assigned = false;
+	}
+
+	return rc;
+}
+
+static int cvp_dsp_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	const char *edge_name = NULL;
+	int ret = 0;
+
+	ret = of_property_read_string(rpdev->dev.parent->of_node,
+			"label", &edge_name);
+	if (ret) {
+		dprintk(CVP_ERR, "glink edge 'label' not found in node\n");
+		return ret;
+	}
+
+	if (strcmp(edge_name, "cdsp")) {
+		dprintk(CVP_ERR,
+			"%s: Failed to probe rpmsg device.Node name:%s\n",
+			__func__, edge_name);
+		return -EINVAL;
+	}
+
+	mutex_lock(&me->tx_lock);
+	me->chan = rpdev;
+	me->state = DSP_PROBED;
+	mutex_unlock(&me->tx_lock);
+	complete(&me->completions[CPU2DSP_MAX_CMD]);
+
+	return ret;
+}
+
+static int eva_fastrpc_dev_unmap_dma(
+		struct fastrpc_device *frpc_device,
+		struct cvp_internal_buf *buf);
+
+static int delete_dsp_session(struct msm_cvp_inst *inst,
+		struct cvp_dsp_fastrpc_driver_entry *frpc_node)
+{
+	struct msm_cvp_list *buf_list = NULL;
+	struct list_head *ptr_dsp_buf = NULL, *next_dsp_buf = NULL;
+	struct cvp_internal_buf *buf = NULL;
+	struct task_struct *task = NULL;
+	struct cvp_hfi_ops *ops_tbl;
+	int rc;
+
+	if (!inst)
+		return -EINVAL;
+
+	buf_list = &inst->cvpdspbufs;
+
+	mutex_lock(&buf_list->lock);
+	ptr_dsp_buf = &buf_list->list;
+	list_for_each_safe(ptr_dsp_buf, next_dsp_buf, &buf_list->list) {
+		if (!ptr_dsp_buf)
+			break;
+		buf = list_entry(ptr_dsp_buf, struct cvp_internal_buf, list);
+		if (buf) {
+			dprintk(CVP_DSP, "fd in list 0x%x\n", buf->fd);
+
+			if (!buf->smem) {
+				dprintk(CVP_DSP, "Empyt smem\n");
+				continue;
+			}
+
+			dprintk(CVP_DSP, "%s find device addr 0x%x\n",
+				__func__, buf->smem->device_addr);
+
+			rc = eva_fastrpc_dev_unmap_dma(
+					frpc_node->cvp_fastrpc_device,
+					buf);
+			if (rc)
+				dprintk_rl(CVP_WARN,
+					"%s Failed to unmap buffer 0x%x\n",
+					__func__, rc);
+
+			rc = cvp_release_dsp_buffers(inst, buf);
+			if (rc)
+				dprintk(CVP_ERR,
+					"%s Failed to free buffer 0x%x\n",
+					__func__, rc);
+
+			list_del(&buf->list);
+
+			cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
+		}
+	}
+
+	mutex_unlock(&buf_list->lock);
+
+	task = inst->task;
+
+	spin_lock(&inst->core->resources.pm_qos.lock);
+	if (inst->core->resources.pm_qos.off_vote_cnt > 0)
+		inst->core->resources.pm_qos.off_vote_cnt--;
+	else
+		dprintk(CVP_WARN, "%s Unexpected pm_qos off vote %d\n",
+			__func__,
+			inst->core->resources.pm_qos.off_vote_cnt);
+	spin_unlock(&inst->core->resources.pm_qos.lock);
+
+	ops_tbl = inst->core->dev_ops;
+	call_hfi_op(ops_tbl, pm_qos_update, ops_tbl->hfi_device_data);
+
+	rc = msm_cvp_close(inst);
+	if (rc)
+		dprintk(CVP_ERR, "Warning: Failed to close cvp instance\n");
+
+	if (task)
+		put_task_struct(task);
+
+	dprintk(CVP_DSP, "%s DSP2CPU_DETELE_SESSION Done\n", __func__);
+	return rc;
+}
+
+static int eva_fastrpc_driver_get_name(
+		struct cvp_dsp_fastrpc_driver_entry *frpc_node)
+{
+    int i = 0;
+    struct cvp_dsp_apps *me = &gfa_cv;
+    for (i = 0; i < MAX_FASTRPC_DRIVER_NUM; i++) {
+        if (me->cvp_fastrpc_name[i].status == DRIVER_NAME_AVAILABLE) {
+            frpc_node->driver_name_idx = i;
+            frpc_node->cvp_fastrpc_driver.driver.name =
+			me->cvp_fastrpc_name[i].name;
+            me->cvp_fastrpc_name[i].status = DRIVER_NAME_USED;
+            dprintk(CVP_DSP, "%s -> handle 0x%x get name %s\n",
+			__func__, frpc_node->cvp_fastrpc_driver.handle,
+                frpc_node->cvp_fastrpc_driver.driver.name);
+            return 0;
+        }
+    }
+
+    return -1;
+}
+
+static void eva_fastrpc_driver_release_name(
+		struct cvp_dsp_fastrpc_driver_entry *frpc_node)
+{
+    struct cvp_dsp_apps *me = &gfa_cv;
+    me->cvp_fastrpc_name[frpc_node->driver_name_idx].status =
+		DRIVER_NAME_AVAILABLE;
+}
+
+/* The function may not return for up to 50ms */
+static bool dequeue_frpc_node(struct cvp_dsp_fastrpc_driver_entry *node)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+	struct list_head *ptr = NULL, *next = NULL;
+	u32 refcount, max_count = 10;
+	bool rc = false;
+
+	if (!node)
+		return rc;
+
+search_again:
+	ptr = &me->fastrpc_driver_list.list;
+	mutex_lock(&me->fastrpc_driver_list.lock);
+	list_for_each_safe(ptr, next, &me->fastrpc_driver_list.list) {
+		frpc_node = list_entry(ptr,
+			struct cvp_dsp_fastrpc_driver_entry, list);
+
+		if (frpc_node == node) {
+			refcount = atomic_read(&frpc_node->refcount);
+			if (refcount > 0) {
+				mutex_unlock(&me->fastrpc_driver_list.lock);
+				usleep_range(5000, 10000);
+				if (max_count-- == 0) {
+					dprintk(CVP_ERR, "%s timeout %d\n",
+						__func__, refcount);
+					WARN_ON(true);
+					goto exit;
+				}
+				goto search_again;
+			}
+			list_del(&frpc_node->list);
+			rc = true;
+			break;
+		}
+	}
+	mutex_unlock(&me->fastrpc_driver_list.lock);
+exit:
+	return rc;
+}
+
+/* The function may not return for up to 50ms */
+static struct cvp_dsp_fastrpc_driver_entry *pop_frpc_node(void)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+	struct list_head *ptr = NULL, *next = NULL;
+	u32 refcount, max_count = 10;
+
+search_again:
+	ptr = &me->fastrpc_driver_list.list;
+	if (!ptr) {
+		frpc_node = NULL;
+		goto exit;
+	}
+
+	mutex_lock(&me->fastrpc_driver_list.lock);
+	list_for_each_safe(ptr, next, &me->fastrpc_driver_list.list) {
+		if (!ptr)
+			break;
+		frpc_node = list_entry(ptr,
+			struct cvp_dsp_fastrpc_driver_entry, list);
+
+		if (frpc_node) {
+			refcount = atomic_read(&frpc_node->refcount);
+			if (refcount > 0) {
+				mutex_unlock(&me->fastrpc_driver_list.lock);
+				usleep_range(5000, 10000);
+				if (max_count-- == 0) {
+					dprintk(CVP_ERR, "%s timeout\n",
+							__func__);
+					frpc_node = NULL;
+					goto exit;
+				}
+				goto search_again;
+			}
+			list_del(&frpc_node->list);
+			break;
+		}
+	}
+
+	mutex_unlock(&me->fastrpc_driver_list.lock);
+exit:
+	return frpc_node;
+}
+
+static void cvp_dsp_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	u32 max_num_retries = 100;
+
+	dprintk(CVP_WARN, "%s: CDSP SSR triggered\n", __func__);
+
+	mutex_lock(&me->rx_lock);
+	while (max_num_retries > 0) {
+		if (me->pending_dsp2cpu_cmd.type !=
+				CVP_INVALID_RPMSG_TYPE) {
+			mutex_unlock(&me->rx_lock);
+			usleep_range(1000, 5000);
+			mutex_lock(&me->rx_lock);
+		} else {
+			break;
+		}
+		max_num_retries--;
+	}
+
+	if (!max_num_retries)
+		dprintk(CVP_ERR, "stuck processing pending DSP cmds\n");
+
+	mutex_lock(&me->tx_lock);
+	cvp_hyp_assign_from_dsp();
+
+	me->chan = NULL;
+	me->state = DSP_UNINIT;
+	mutex_unlock(&me->tx_lock);
+	mutex_unlock(&me->rx_lock);
+
+	/* Wait HW finish current frame processing */
+	usleep_range(20000, 50000);
+	cvp_remove_dsp_sessions();
+
+	dprintk(CVP_WARN, "%s: CDSP SSR handled nr_maps %d\n", __func__,
+			atomic_read(&nr_maps));
+}
+
+static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
+	void *data, int len, void *priv, u32 addr)
+{
+	struct cvp_dsp_rsp_msg *rsp = (struct cvp_dsp_rsp_msg *)data;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	dprintk(CVP_DSP, "%s: type = 0x%x ret = 0x%x len = 0x%x\n",
+		__func__, rsp->type, rsp->ret, len);
+
+	if (rsp->type < CPU2DSP_MAX_CMD && len == sizeof(*rsp)) {
+		if (me->pending_dsp2cpu_rsp.type == rsp->type) {
+			memcpy(&me->pending_dsp2cpu_rsp, rsp,
+				sizeof(struct cvp_dsp_rsp_msg));
+			complete(&me->completions[rsp->type]);
+		} else {
+			dprintk(CVP_ERR, "%s: CPU2DSP resp %d, pending %d\n",
+					__func__, rsp->type,
+					me->pending_dsp2cpu_rsp.type);
+			goto exit;
+		}
+	} else if (rsp->type < CVP_DSP_MAX_CMD &&
+			len == sizeof(struct cvp_dsp2cpu_cmd)) {
+		if (me->pending_dsp2cpu_cmd.type != CVP_INVALID_RPMSG_TYPE) {
+			dprintk(CVP_ERR,
+				"%s: DSP2CPU cmd:%d pending %d %d expect %d\n",
+					__func__, rsp->type,
+				me->pending_dsp2cpu_cmd.type, len,
+				sizeof(struct cvp_dsp2cpu_cmd));
+			goto exit;
+		}
+		memcpy(&me->pending_dsp2cpu_cmd, rsp,
+			sizeof(struct cvp_dsp2cpu_cmd));
+		complete(&me->completions[CPU2DSP_MAX_CMD]);
+	} else {
+		dprintk(CVP_ERR, "%s: Invalid type: %d\n", __func__, rsp->type);
+		return 0;
+	}
+
+	return 0;
+exit:
+	dprintk(CVP_ERR, "concurrent dsp cmd type = %d, rsp type = %d\n",
+			me->pending_dsp2cpu_cmd.type,
+			me->pending_dsp2cpu_rsp.type);
+	return 0;
+}
+
+static bool dsp_session_exist(void)
+{
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *inst = NULL;
+
+	core = cvp_driver->cvp_core;
+	if (core) {
+		mutex_lock(&core->lock);
+		list_for_each_entry(inst, &core->instances, list) {
+			if (inst->session_type == MSM_CVP_DSP) {
+				mutex_unlock(&core->lock);
+				return true;
+			}
+		}
+		mutex_unlock(&core->lock);
+	}
+
+	return false;
+}
+
+int cvp_dsp_suspend(bool force)
+{
+	int rc = 0;
+	struct cvp_dsp_cmd_msg cmd;
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_rsp_msg rsp;
+	bool retried = false;
+
+
+	/* If not forced to suspend, check if DSP requested PC earlier */
+	if (force == false)
+		if (dsp_session_exist())
+			if (me->state != DSP_SUSPEND)
+				return -EBUSY;
+
+	cmd.type = CPU2DSP_SUSPEND;
+
+	mutex_lock(&me->tx_lock);
+	if (me->state != DSP_READY)
+		goto exit;
+
+retry:
+	/* Use cvp_dsp_send_cmd_sync after dsp driver is ready */
+	rc = cvp_dsp_send_cmd_sync(&cmd,
+			sizeof(struct cvp_dsp_cmd_msg),
+			&rsp);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed rc = %d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+	if (rsp.ret == CPU2DSP_EUNAVAILABLE)
+		goto fatal_exit;
+
+	if (rsp.ret == CPU2DSP_EFATAL) {
+		dprintk(CVP_ERR, "%s: suspend dsp got EFATAL error\n",
+				__func__);
+		if (!retried) {
+			mutex_unlock(&me->tx_lock);
+			retried = true;
+			rc = cvp_reinit_dsp();
+			mutex_lock(&me->tx_lock);
+			if (rc)
+				goto fatal_exit;
+			else
+				goto retry;
+		} else {
+			goto fatal_exit;
+		}
+	}
+
+	me->state = DSP_SUSPEND;
+	dprintk(CVP_DSP, "DSP suspended, nr_map: %d\n", atomic_read(&nr_maps));
+	goto exit;
+
+fatal_exit:
+	me->state = DSP_INVALID;
+	cvp_hyp_assign_from_dsp();
+	rc = -ENOTSUPP;
+exit:
+	mutex_unlock(&me->tx_lock);
+	return rc;
+}
+
+int cvp_dsp_resume(void)
+{
+	int rc = 0;
+	struct cvp_dsp_cmd_msg cmd;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	cmd.type = CPU2DSP_RESUME;
+
+	/*
+	 * Deadlock against DSP2CPU_CREATE_SESSION in dsp_thread
+	 * Probably get rid of this entirely as discussed before
+	 */
+	if (me->state != DSP_SUSPEND)
+		dprintk(CVP_WARN, "%s DSP not in SUSPEND state\n", __func__);
+
+	return rc;
+}
+
+static void cvp_remove_dsp_sessions(void)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+	struct msm_cvp_inst *inst = NULL;
+	struct list_head *s = NULL, *next_s = NULL;
+
+	while ((frpc_node = pop_frpc_node())) {
+		s = &frpc_node->dsp_sessions.list;
+		if (!s || !(s->next))
+			return;
+		list_for_each_safe(s, next_s,
+				&frpc_node->dsp_sessions.list) {
+			if (!s || !next_s)
+				return;
+			inst = list_entry(s, struct msm_cvp_inst,
+					dsp_list);
+			if (inst) {
+				mutex_lock(&frpc_node->dsp_sessions.lock);
+				list_del(&inst->dsp_list);
+				frpc_node->session_cnt--;
+				mutex_unlock(&frpc_node->dsp_sessions.lock);
+				delete_dsp_session(inst, frpc_node);
+			}
+		}
+
+		dprintk(CVP_DSP, "%s DEINIT_MSM_CVP_LIST 0x%x\n",
+				__func__, frpc_node->dsp_sessions);
+		DEINIT_MSM_CVP_LIST(&frpc_node->dsp_sessions);
+		dprintk(CVP_DSP, "%s list_del fastrpc node 0x%x\n",
+				__func__, frpc_node);
+		__fastrpc_driver_unregister(
+				&frpc_node->cvp_fastrpc_driver);
+		dprintk(CVP_DSP,
+				"%s Unregistered fastrpc handle 0x%x\n",
+				__func__, frpc_node->handle);
+		mutex_lock(&me->driver_name_lock);
+		eva_fastrpc_driver_release_name(frpc_node);
+		mutex_unlock(&me->driver_name_lock);
+		kfree(frpc_node);
+		frpc_node = NULL;
+	}
+
+	dprintk(CVP_WARN, "%s: EVA SSR handled for CDSP\n", __func__);
+}
+
+int cvp_dsp_shutdown(void)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	int rc = 0;
+	struct cvp_dsp_cmd_msg cmd;
+	struct cvp_dsp_rsp_msg rsp;
+
+	cmd.type = CPU2DSP_SHUTDOWN;
+
+	mutex_lock(&me->tx_lock);
+	if (me->state == DSP_INVALID)
+		goto exit;
+
+	me->state = DSP_INACTIVE;
+	rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg), &rsp);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with rc = %d\n",
+			__func__, rc);
+		cvp_hyp_assign_from_dsp();
+		goto exit;
+	}
+
+	rc = cvp_hyp_assign_from_dsp();
+
+exit:
+	mutex_unlock(&me->tx_lock);
+	return rc;
+}
+
+static const struct rpmsg_device_id cvp_dsp_rpmsg_match[] = {
+	{ CVP_APPS_DSP_GLINK_GUID },
+	{ },
+};
+
+static struct rpmsg_driver cvp_dsp_rpmsg_client = {
+	.id_table = cvp_dsp_rpmsg_match,
+	.probe = cvp_dsp_rpmsg_probe,
+	.remove = cvp_dsp_rpmsg_remove,
+	.callback = cvp_dsp_rpmsg_callback,
+	.drv = {
+		.name = "qcom,msm_cvp_dsp_rpmsg",
+	},
+};
+
+static void cvp_dsp_set_queue_hdr_defaults(struct cvp_hfi_queue_header *q_hdr)
+{
+	q_hdr->qhdr_status = 0x1;
+	q_hdr->qhdr_type = CVP_IFACEQ_DFLT_QHDR;
+	q_hdr->qhdr_q_size = CVP_IFACEQ_QUEUE_SIZE / 4;
+	q_hdr->qhdr_pkt_size = 0;
+	q_hdr->qhdr_rx_wm = 0x1;
+	q_hdr->qhdr_tx_wm = 0x1;
+	q_hdr->qhdr_rx_req = 0x1;
+	q_hdr->qhdr_tx_req = 0x0;
+	q_hdr->qhdr_rx_irq_status = 0x0;
+	q_hdr->qhdr_tx_irq_status = 0x0;
+	q_hdr->qhdr_read_idx = 0x0;
+	q_hdr->qhdr_write_idx = 0x0;
+}
+
+void cvp_dsp_init_hfi_queue_hdr(struct iris_hfi_device *device)
+{
+	u32 i;
+	struct cvp_hfi_queue_table_header *q_tbl_hdr;
+	struct cvp_hfi_queue_header *q_hdr;
+	struct cvp_iface_q_info *iface_q;
+
+	for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+		iface_q = &device->dsp_iface_queues[i];
+		iface_q->q_hdr = CVP_IFACEQ_GET_QHDR_START_ADDR(
+			device->dsp_iface_q_table.align_virtual_addr, i);
+		cvp_dsp_set_queue_hdr_defaults(iface_q->q_hdr);
+	}
+	q_tbl_hdr = (struct cvp_hfi_queue_table_header *)
+			device->dsp_iface_q_table.align_virtual_addr;
+	q_tbl_hdr->qtbl_version = 0;
+	q_tbl_hdr->device_addr = (void *)device;
+	strlcpy(q_tbl_hdr->name, "msm_cvp", sizeof(q_tbl_hdr->name));
+	q_tbl_hdr->qtbl_size = CVP_IFACEQ_TABLE_SIZE;
+	q_tbl_hdr->qtbl_qhdr0_offset =
+				sizeof(struct cvp_hfi_queue_table_header);
+	q_tbl_hdr->qtbl_qhdr_size = sizeof(struct cvp_hfi_queue_header);
+	q_tbl_hdr->qtbl_num_q = CVP_IFACEQ_NUMQ;
+	q_tbl_hdr->qtbl_num_active_q = CVP_IFACEQ_NUMQ;
+
+	iface_q = &device->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
+
+	iface_q = &device->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
+
+	iface_q = &device->dsp_iface_queues[CVP_IFACEQ_DBGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
+	/*
+	 * Set receive request to zero on debug queue as there is no
+	 * need of interrupt from cvp hardware for debug messages
+	 */
+	q_hdr->qhdr_rx_req = 0;
+}
+
+static int __reinit_dsp(void)
+{
+	int rc;
+	uint64_t addr;
+	uint32_t size;
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_rsp_msg rsp;
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *device;
+
+	core = cvp_driver->cvp_core;
+	if (core && core->dev_ops)
+		device = core->dev_ops->hfi_device_data;
+	else
+		return -EINVAL;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Force shutdown DSP */
+	rc = cvp_dsp_shutdown();
+	if (rc)
+		return rc;
+	/*
+	 * Workaround to force delete DSP session resources
+	 * To be removed after DSP optimization ready
+	 */
+	cvp_remove_dsp_sessions();
+
+	dprintk(CVP_WARN, "Reinit EVA DSP interface: nr_map %d\n",
+			atomic_read(&nr_maps));
+
+	/* Resend HFI queue */
+	mutex_lock(&me->tx_lock);
+	if (!device->dsp_iface_q_table.align_virtual_addr) {
+		dprintk(CVP_ERR, "%s: DSP HFI queue released\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	addr = (uint64_t)device->dsp_iface_q_table.mem_data.dma_handle;
+	size = device->dsp_iface_q_table.mem_data.size;
+
+	if (!addr || !size) {
+		dprintk(CVP_DSP, "%s: HFI queue is not ready\n", __func__);
+		goto exit;
+	}
+
+	rc = cvp_hyp_assign_to_dsp(addr, size);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: cvp_hyp_assign_to_dsp. rc=%d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+	rc = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)addr, size, &rsp);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: Send HFI Queue failed rc = %d\n",
+			__func__, rc);
+
+		goto exit;
+	}
+	if (rsp.ret) {
+		dprintk(CVP_ERR, "%s: DSP error %d %d\n", __func__,
+				rsp.ret, rsp.dsp_state);
+		rc = -ENODEV;
+	}
+exit:
+	mutex_unlock(&me->tx_lock);
+	return rc;
+}
+
+static int cvp_reinit_dsp(void)
+{
+	int rc;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	rc = __reinit_dsp();
+	if (rc)	{
+		mutex_lock(&me->tx_lock);
+		me->state = DSP_INVALID;
+		cvp_hyp_assign_from_dsp();
+		mutex_unlock(&me->tx_lock);
+	}
+	return rc;
+}
+
+static void cvp_put_fastrpc_node(struct cvp_dsp_fastrpc_driver_entry *node)
+{
+	if (node && (atomic_read(&node->refcount) > 0))
+		atomic_dec(&node->refcount);
+}
+
+static struct cvp_dsp_fastrpc_driver_entry *cvp_get_fastrpc_node_with_handle(
+			uint32_t handle)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct list_head *ptr = NULL, *next = NULL;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL, *tmp_node = NULL;
+
+	mutex_lock(&me->fastrpc_driver_list.lock);
+	list_for_each_safe(ptr, next, &me->fastrpc_driver_list.list) {
+		if (!ptr)
+			break;
+		tmp_node = list_entry(ptr,
+				struct cvp_dsp_fastrpc_driver_entry, list);
+		if (handle == tmp_node->handle) {
+			frpc_node = tmp_node;
+			atomic_inc(&frpc_node->refcount);
+			dprintk(CVP_DSP, "Find tmp_node with handle 0x%x\n",
+				handle);
+			break;
+		}
+	}
+	mutex_unlock(&me->fastrpc_driver_list.lock);
+
+	dprintk(CVP_DSP, "%s found fastrpc probe handle %pK pid 0x%x\n",
+		__func__, frpc_node, handle);
+	return frpc_node;
+}
+
+static void eva_fastrpc_driver_unregister(uint32_t handle, bool force_exit);
+
+static int cvp_fastrpc_probe(struct fastrpc_device *rpc_dev)
+{
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+
+	dprintk(CVP_DSP, "%s fastrpc probe handle 0x%x\n",
+		__func__, rpc_dev->handle);
+
+	frpc_node = cvp_get_fastrpc_node_with_handle(rpc_dev->handle);
+	if (frpc_node) {
+		frpc_node->cvp_fastrpc_device = rpc_dev;
+		complete(&frpc_node->fastrpc_probe_completion);
+		cvp_put_fastrpc_node(frpc_node);
+	}
+
+	return 0;
+}
+
+static int cvp_fastrpc_callback(struct fastrpc_device *rpc_dev,
+			enum fastrpc_driver_status fastrpc_proc_num)
+{
+	dprintk(CVP_DSP, "%s handle 0x%x, proc %d\n", __func__,
+			rpc_dev->handle, fastrpc_proc_num);
+
+	/* fastrpc drive down when process gone
+	 * any handling can happen here, such as
+	 * eva_fastrpc_driver_unregister(rpc_dev->handle, true);
+	 */
+	eva_fastrpc_driver_unregister(rpc_dev->handle, true);
+
+	return 0;
+}
+
+
+static struct fastrpc_driver cvp_fastrpc_client = {
+	.probe = cvp_fastrpc_probe,
+	.callback = cvp_fastrpc_callback,
+};
+
+
+static int eva_fastrpc_dev_map_dma(struct fastrpc_device *frpc_device,
+			struct cvp_internal_buf *buf,
+			uint32_t dsp_remote_map,
+			uint64_t *v_dsp_addr)
+{
+#ifdef CVP_FASTRPC_ENABLED
+	struct fastrpc_dev_map_dma frpc_map_buf = {0};
+	int rc = 0;
+
+	if (dsp_remote_map == 1) {
+		frpc_map_buf.buf = buf->smem->dma_buf;
+		frpc_map_buf.size = buf->smem->size;
+		frpc_map_buf.attrs = 0;
+
+		dprintk(CVP_DSP,
+			"%s frpc_map_buf size %d, dma_buf %pK, map %pK, 0x%x\n",
+			__func__, frpc_map_buf.size, frpc_map_buf.buf,
+			&frpc_map_buf, (unsigned long)&frpc_map_buf);
+		rc = __fastrpc_driver_invoke(frpc_device, FASTRPC_DEV_MAP_DMA,
+			(unsigned long)(&frpc_map_buf));
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s Failed to map buffer 0x%x\n", __func__, rc);
+			return rc;
+		}
+		buf->fd = (s32)frpc_map_buf.v_dsp_addr;
+		*v_dsp_addr = frpc_map_buf.v_dsp_addr;
+		atomic_inc(&nr_maps);
+	} else {
+		dprintk(CVP_DSP, "%s Buffer not mapped to dsp\n", __func__);
+		buf->fd = 0;
+	}
+
+	return rc;
+#else
+	return -ENODEV;
+#endif	/* End of CVP_FASTRPC_ENABLED */
+}
+
+static int eva_fastrpc_dev_unmap_dma(struct fastrpc_device *frpc_device,
+			struct cvp_internal_buf *buf)
+{
+#ifdef CVP_FASTRPC_ENABLED
+	struct fastrpc_dev_unmap_dma frpc_unmap_buf = {0};
+	int rc = 0;
+
+	/* Only if buffer is mapped to dsp */
+	if (buf->fd != 0) {
+		frpc_unmap_buf.buf = buf->smem->dma_buf;
+		rc = __fastrpc_driver_invoke(frpc_device, FASTRPC_DEV_UNMAP_DMA,
+				(unsigned long)(&frpc_unmap_buf));
+		if (rc) {
+			dprintk_rl(CVP_ERR, "%s Failed to unmap buffer %d\n",
+				__func__, rc);
+			return rc;
+		}
+		if (atomic_read(&nr_maps) > 0)
+			atomic_dec(&nr_maps);
+	} else {
+		dprintk(CVP_DSP, "%s buffer not mapped to dsp\n", __func__);
+	}
+
+	return rc;
+#else
+	return -ENODEV;
+#endif	/* End of CVP_FASTRPC_ENABLED */
+}
+
+static int eva_fastrpc_dev_get_pid(struct fastrpc_device *frpc_device, int *pid)
+{
+#ifdef CVP_FASTRPC_ENABLED
+	struct fastrpc_dev_get_hlos_pid get_pid = {0};
+	int rc = 0;
+
+	rc = __fastrpc_driver_invoke(frpc_device, FASTRPC_DEV_GET_HLOS_PID,
+				(unsigned long)(&get_pid));
+	if (rc) {
+		dprintk(CVP_ERR, "%s Failed to get PID %x\n",
+				__func__, rc);
+		return rc;
+	}
+	*pid = get_pid.hlos_pid;
+
+	return rc;
+#else
+	return -ENODEV;
+#endif	/* End of CVP_FASTRPC_ENABLED */
+}
+
+static void eva_fastrpc_driver_add_sess(
+	struct cvp_dsp_fastrpc_driver_entry *frpc,
+	struct msm_cvp_inst *inst)
+{
+	mutex_lock(&frpc->dsp_sessions.lock);
+	if (inst)
+		list_add_tail(&inst->dsp_list, &frpc->dsp_sessions.list);
+	else
+		dprintk(CVP_ERR, "%s incorrect input %pK\n", __func__, inst);
+	frpc->session_cnt++;
+	mutex_unlock(&frpc->dsp_sessions.lock);
+	dprintk(CVP_DSP, "add dsp sess %pK fastrpc_driver %pK\n", inst, frpc);
+}
+
+int cvp_dsp_fastrpc_unmap(uint32_t handle, struct cvp_internal_buf *buf)
+{
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+	struct fastrpc_device *frpc_device = NULL;
+	int rc = 0;
+
+	frpc_node = cvp_get_fastrpc_node_with_handle(handle);
+	if (!frpc_node) {
+		dprintk(CVP_ERR, "%s no frpc node for dsp handle %d\n",
+			__func__, handle);
+		return -EINVAL;
+	}
+	frpc_device = frpc_node->cvp_fastrpc_device;
+	rc = eva_fastrpc_dev_unmap_dma(frpc_device, buf);
+	if (rc)
+		dprintk(CVP_ERR, "%s Fail to unmap buffer 0x%x\n",
+				__func__, rc);
+
+	cvp_put_fastrpc_node(frpc_node);
+	return rc;
+}
+
+int cvp_dsp_del_sess(uint32_t handle, struct msm_cvp_inst *inst)
+{
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+	struct list_head *ptr = NULL, *next = NULL;
+	struct msm_cvp_inst *sess;
+	bool found = false;
+
+	frpc_node = cvp_get_fastrpc_node_with_handle(handle);
+	if (!frpc_node) {
+		dprintk(CVP_ERR, "%s no frpc node for dsp handle %d\n",
+				__func__, handle);
+		return -EINVAL;
+	}
+	mutex_lock(&frpc_node->dsp_sessions.lock);
+	list_for_each_safe(ptr, next, &frpc_node->dsp_sessions.list) {
+		if (!ptr)
+			break;
+		sess = list_entry(ptr, struct msm_cvp_inst, dsp_list);
+		if (sess == inst) {
+			dprintk(CVP_DSP, "%s Find sess %pK to be deleted\n",
+				__func__, inst);
+			found = true;
+			break;
+		}
+	}
+	if (found) {
+		list_del(&inst->dsp_list);
+		frpc_node->session_cnt--;
+	}
+
+	mutex_unlock(&frpc_node->dsp_sessions.lock);
+
+	cvp_put_fastrpc_node(frpc_node);
+	return 0;
+}
+
+static int eva_fastrpc_driver_register(uint32_t handle)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	int rc = 0;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+	bool skip_deregister = true;
+
+	dprintk(CVP_DSP, "%s -> cvp_get_fastrpc_node_with_handle hdl 0x%x\n",
+			__func__, handle);
+	frpc_node = cvp_get_fastrpc_node_with_handle(handle);
+
+	if (frpc_node == NULL) {
+		dprintk(CVP_DSP, "%s new fastrpc node hdl 0x%x\n",
+				__func__, handle);
+		frpc_node = kzalloc(sizeof(*frpc_node), GFP_KERNEL);
+		if (!frpc_node) {
+			dprintk(CVP_DSP, "%s allocate frpc node fail\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		memset(frpc_node, 0, sizeof(*frpc_node));
+
+		/* Setup fastrpc_node */
+		frpc_node->handle = handle;
+		frpc_node->cvp_fastrpc_driver = cvp_fastrpc_client;
+		frpc_node->cvp_fastrpc_driver.handle = handle;
+		mutex_lock(&me->driver_name_lock);
+		rc = eva_fastrpc_driver_get_name(frpc_node);
+		mutex_unlock(&me->driver_name_lock);
+		if (rc) {
+			dprintk(CVP_ERR, "%s fastrpc get name fail err %d\n",
+				__func__, rc);
+			goto fail_fastrpc_driver_get_name;
+		}
+
+		/* Init completion */
+		init_completion(&frpc_node->fastrpc_probe_completion);
+
+		mutex_lock(&me->fastrpc_driver_list.lock);
+		list_add_tail(&frpc_node->list, &me->fastrpc_driver_list.list);
+		INIT_MSM_CVP_LIST(&frpc_node->dsp_sessions);
+		mutex_unlock(&me->fastrpc_driver_list.lock);
+		dprintk(CVP_DSP, "Add frpc node 0x%x to list\n", frpc_node);
+
+		/* register fastrpc device to this session */
+		rc = __fastrpc_driver_register(&frpc_node->cvp_fastrpc_driver);
+		if (rc) {
+			dprintk(CVP_ERR, "%s fastrpc driver reg fail err %d\n",
+				__func__, rc);
+			skip_deregister = true;
+			goto fail_fastrpc_driver_register;
+		}
+
+		/* signal wait reuse dsp timeout setup for now */
+		if (!wait_for_completion_timeout(
+				&frpc_node->fastrpc_probe_completion,
+				msecs_to_jiffies(CVP_DSP_RESPONSE_TIMEOUT))) {
+			dprintk(CVP_ERR, "%s fastrpc driver_register timeout %#x\n",
+				__func__, frpc_node->handle);
+			skip_deregister = false;
+			goto fail_fastrpc_driver_register;
+		}
+	} else {
+		dprintk(CVP_DSP, "%s fastrpc probe frpc_node %pK hdl 0x%x\n",
+			__func__, frpc_node, handle);
+		cvp_put_fastrpc_node(frpc_node);
+	}
+
+	return rc;
+
+fail_fastrpc_driver_register:
+	dequeue_frpc_node(frpc_node);
+	if (!skip_deregister)
+		__fastrpc_driver_unregister(&frpc_node->cvp_fastrpc_driver);
+
+	mutex_lock(&me->driver_name_lock);
+	eva_fastrpc_driver_release_name(frpc_node);
+	mutex_unlock(&me->driver_name_lock);
+fail_fastrpc_driver_get_name:
+	kfree(frpc_node);
+	return -EINVAL;
+}
+
+static void eva_fastrpc_driver_unregister(uint32_t handle, bool force_exit)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+
+	dprintk(CVP_DSP, "%s Unregister fastrpc driver hdl %#x hdl %#x, f %d\n",
+		__func__, handle, dsp2cpu_cmd->pid, (uint32_t)force_exit);
+
+	if (handle != dsp2cpu_cmd->pid)
+		dprintk(CVP_ERR, "Unregister pid != hndl %#x %#x\n",
+				handle, dsp2cpu_cmd->pid);
+
+	/* Foundd fastrpc node */
+	frpc_node = cvp_get_fastrpc_node_with_handle(handle);
+
+	if (frpc_node == NULL) {
+		dprintk(CVP_DSP, "%s fastrpc handle 0x%x unregistered\n",
+			__func__, handle);
+		return;
+	}
+
+	if ((frpc_node->session_cnt == 0) || force_exit) {
+		dprintk(CVP_DSP, "%s session cnt %d, force %d\n",
+		__func__, frpc_node->session_cnt, (uint32_t)force_exit);
+
+		DEINIT_MSM_CVP_LIST(&frpc_node->dsp_sessions);
+
+		cvp_put_fastrpc_node(frpc_node);
+		if (!dequeue_frpc_node(frpc_node))
+			/* Don't find the node */
+			return;
+
+		__fastrpc_driver_unregister(&frpc_node->cvp_fastrpc_driver);
+		mutex_lock(&me->driver_name_lock);
+		eva_fastrpc_driver_release_name(frpc_node);
+		mutex_unlock(&me->driver_name_lock);
+		kfree(frpc_node);
+	} else {
+		cvp_put_fastrpc_node(frpc_node);
+	}
+}
+
+void cvp_dsp_send_debug_mask(void)
+{
+	struct cvp_dsp_cmd_msg cmd;
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_rsp_msg rsp;
+	int rc;
+
+	cmd.type = CPU2DSP_SET_DEBUG_LEVEL;
+	cmd.eva_dsp_debug_mask = me->debug_mask;
+
+	dprintk(CVP_DSP,
+		"%s: debug mask 0x%x\n",
+		__func__, cmd.eva_dsp_debug_mask);
+
+	rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg), &rsp);
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed rc = %d\n",
+			__func__, rc);
+}
+
+void cvp_dsp_send_hfi_queue(void)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *device;
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_rsp_msg rsp = {0};
+	uint64_t addr;
+	uint32_t size;
+	int rc;
+
+	core = cvp_driver->cvp_core;
+	if (core && core->dev_ops)
+		device = core->dev_ops->hfi_device_data;
+	else
+		return;
+
+	if (!device) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	dprintk(CVP_DSP, "Entering %s\n", __func__);
+
+	mutex_lock(&device->lock);
+	mutex_lock(&me->tx_lock);
+
+	if (!device->dsp_iface_q_table.align_virtual_addr) {
+		dprintk(CVP_ERR, "%s: DSP HFI queue released\n", __func__);
+		goto exit;
+	}
+
+	addr = (uint64_t)device->dsp_iface_q_table.mem_data.dma_handle;
+	size = device->dsp_iface_q_table.mem_data.size;
+
+	if (!addr || !size) {
+		dprintk(CVP_DSP, "%s: HFI queue is not ready\n", __func__);
+		goto exit;
+	}
+
+	if (me->state != DSP_PROBED && me->state != DSP_INACTIVE) {
+		dprintk(CVP_DSP,
+			"%s: Either DSP is not probed or is not in proper state. me->state = %d\n",
+			__func__, me->state);
+		goto exit;
+	}
+	dprintk(CVP_DSP,
+		"%s: DSP probe Successful, going ahead with hyp_assign, me->state = %d\n",
+		__func__, me->state);
+
+	rc = cvp_hyp_assign_to_dsp(addr, size);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: cvp_hyp_assign_to_dsp. rc=%d\n",
+			__func__, rc);
+		goto exit;
+	}
+
+	if (me->state == DSP_PROBED) {
+		cvp_dsp_init_hfi_queue_hdr(device);
+		dprintk(CVP_WARN,
+			"%s: Done init of HFI queue headers\n", __func__);
+	}
+
+	rc = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)addr, size, &rsp);
+	if (rc) {
+		dprintk(CVP_WARN, "%s: Send HFI Queue failed rc = %d\n",
+			__func__, rc);
+
+		goto exit;
+	}
+
+	if (rsp.ret == CPU2DSP_EUNSUPPORTED) {
+		dprintk(CVP_WARN, "%s unsupported cmd %d\n",
+			__func__, rsp.type);
+		goto exit;
+	}
+
+	if (rsp.ret == CPU2DSP_EFATAL || rsp.ret == CPU2DSP_EUNAVAILABLE) {
+		dprintk(CVP_ERR, "%s fatal error returned %d %d\n",
+				__func__, rsp.dsp_state, rsp.ret);
+		me->state = DSP_INVALID;
+		cvp_hyp_assign_from_dsp();
+		goto exit;
+	} else if (rsp.ret == CPU2DSP_EINVALSTATE) {
+		dprintk(CVP_ERR, "%s dsp invalid state %d\n",
+				__func__, rsp.dsp_state);
+		mutex_unlock(&me->tx_lock);
+		if (cvp_reinit_dsp()) {
+			dprintk(CVP_ERR, "%s reinit dsp fail\n", __func__);
+			mutex_unlock(&device->lock);
+			return;
+		}
+		mutex_lock(&me->tx_lock);
+	}
+
+	dprintk(CVP_DSP, "%s: dsp initialized\n", __func__);
+	me->state = DSP_READY;
+
+exit:
+	mutex_unlock(&me->tx_lock);
+	mutex_unlock(&device->lock);
+}
+/* 32 or 64 bit CPU Side Ptr <-> 2 32 bit DSP Pointers. Dirty Fix. */
+static void *get_inst_from_dsp(uint32_t session_cpu_high, uint32_t session_cpu_low)
+{
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *sess_inst;
+	void *inst;
+
+	if ((session_cpu_high == 0) && (sizeof(void *) == BITPTRSIZE32)) {
+		inst = (void *)((uintptr_t)session_cpu_low);
+	} else if ((session_cpu_high != 0) && (sizeof(void *) == BITPTRSIZE64)) {
+		inst = (void *)((uintptr_t)(((uint64_t)session_cpu_high) << 32
+							| session_cpu_low));
+	} else {
+		dprintk(CVP_ERR,
+			"%s Invalid _cpu_high = 0x%x _cpu_low = 0x%x\n",
+				__func__, session_cpu_high, session_cpu_low);
+		inst = NULL;
+		return inst;
+	}
+
+	core = cvp_driver->cvp_core;
+	if (core) {
+		mutex_lock(&core->lock);
+		list_for_each_entry(sess_inst, &core->instances, list) {
+			if (sess_inst->session_type == MSM_CVP_DSP) {
+				if (sess_inst == (struct msm_cvp_inst *)inst) {
+					mutex_unlock(&core->lock);
+					return inst;
+				}
+			}
+		}
+		mutex_unlock(&core->lock);
+		inst = NULL;
+	} else {
+		return NULL;
+	}
+
+	return inst;
+}
+
+static void print_power(const struct eva_power_req *pwr_req)
+{
+	if (pwr_req) {
+		dprintk(CVP_DSP, "Clock: Fdu %d Ica %d Od %d Mpu %d Fw %d",
+				pwr_req->clock_fdu, pwr_req->clock_ica,
+				pwr_req->clock_od, pwr_req->clock_mpu,
+				pwr_req->clock_fw);
+		dprintk(CVP_DSP, "OpClock: Fdu %d Ica %d Od %d Mpu %d Fw %d",
+				pwr_req->op_clock_fdu, pwr_req->op_clock_ica,
+				pwr_req->op_clock_od, pwr_req->op_clock_mpu,
+				pwr_req->op_clock_fw);
+		dprintk(CVP_DSP, "Actual Bw: Ddr %d, SysCache %d",
+				pwr_req->bw_ddr, pwr_req->bw_sys_cache);
+		dprintk(CVP_DSP, "OpBw: Ddr %d, SysCache %d",
+				pwr_req->op_bw_ddr, pwr_req->op_bw_sys_cache);
+	}
+}
+
+void __dsp_cvp_sess_create(struct cvp_dsp_cmd_msg *cmd)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct msm_cvp_inst *inst = NULL;
+	uint64_t inst_handle = 0;
+	uint32_t pid;
+	int rc = 0;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+	struct pid *pid_s = NULL;
+	struct task_struct *task = NULL;
+	struct cvp_hfi_ops *ops_tbl;
+	struct fastrpc_device *frpc_device;
+
+	cmd->ret = 0;
+
+	dprintk(CVP_DSP,
+		"%s sess Type %d Mask %d Prio %d Sec %d hdl 0x%x\n",
+		__func__, dsp2cpu_cmd->session_type,
+		dsp2cpu_cmd->kernel_mask,
+		dsp2cpu_cmd->session_prio,
+		dsp2cpu_cmd->is_secure,
+		dsp2cpu_cmd->pid);
+
+	rc = eva_fastrpc_driver_register(dsp2cpu_cmd->pid);
+	if (rc) {
+		dprintk(CVP_ERR, "%s Register fastrpc driver fail\n", __func__);
+		cmd->ret = -1;
+		return;
+	}
+	frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
+	if (!frpc_node) {
+		dprintk(CVP_WARN, "%s cannot get fastrpc node from pid %x\n",
+				__func__, dsp2cpu_cmd->pid);
+		goto fail_lookup;
+	}
+	if (!frpc_node->cvp_fastrpc_device) {
+		dprintk(CVP_WARN, "%s invalid fastrpc device from pid %x\n",
+				__func__, dsp2cpu_cmd->pid);
+		goto fail_pid;
+	}
+
+	frpc_device = frpc_node->cvp_fastrpc_device;
+
+	rc = eva_fastrpc_dev_get_pid(frpc_device, &pid);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s Failed to map buffer 0x%x\n", __func__, rc);
+		goto fail_pid;
+	}
+	pid_s = find_get_pid(pid);
+	if (pid_s == NULL) {
+		dprintk(CVP_WARN, "%s incorrect pid %x\n", __func__, pid);
+		goto fail_pid;
+	}
+	dprintk(CVP_DSP, "%s get pid_s 0x%x from hdl 0x%x\n", __func__,
+			pid_s, dsp2cpu_cmd->pid);
+
+	task = get_pid_task(pid_s, PIDTYPE_TGID);
+	if (!task) {
+		dprintk(CVP_WARN, "%s task doesn't exist\n", __func__);
+		goto fail_pid;
+	}
+
+	inst = msm_cvp_open(MSM_CVP_DSP, task);
+	if (!inst) {
+		dprintk(CVP_ERR, "%s Failed create instance\n", __func__);
+		goto fail_msm_cvp_open;
+	}
+
+	inst->dsp_handle = dsp2cpu_cmd->pid;
+	inst->prop.kernel_mask = dsp2cpu_cmd->kernel_mask;
+	inst->prop.type =  dsp2cpu_cmd->session_type;
+	inst->prop.priority = dsp2cpu_cmd->session_prio;
+	inst->prop.is_secure = dsp2cpu_cmd->is_secure;
+	inst->prop.dsp_mask = dsp2cpu_cmd->dsp_access_mask;
+
+	eva_fastrpc_driver_add_sess(frpc_node, inst);
+	rc = msm_cvp_session_create(inst);
+	if (rc) {
+		dprintk(CVP_ERR, "Warning: send Session Create failed\n");
+		goto fail_get_session_info;
+	} else {
+		dprintk(CVP_DSP, "%s DSP Session Create done\n", __func__);
+	}
+
+	/* Get session id */
+	rc = msm_cvp_get_session_info(inst, &cmd->session_id);
+	if (rc) {
+		dprintk(CVP_ERR, "Warning: get session index failed %d\n", rc);
+		goto fail_get_session_info;
+	}
+
+	inst_handle = (uint64_t)inst;
+	cmd->session_cpu_high = (uint32_t)((inst_handle & HIGH32) >> 32);
+	cmd->session_cpu_low = (uint32_t)(inst_handle & LOW32);
+
+	cvp_put_fastrpc_node(frpc_node);
+
+	inst->task = task;
+	dprintk(CVP_DSP,
+		"%s CREATE_SESS id 0x%x, cpu_low 0x%x, cpu_high 0x%x, inst %pK, inst->session %pK\n",
+		__func__, cmd->session_id, cmd->session_cpu_low,
+		cmd->session_cpu_high, inst, inst->session);
+
+	spin_lock(&inst->core->resources.pm_qos.lock);
+	inst->core->resources.pm_qos.off_vote_cnt++;
+	spin_unlock(&inst->core->resources.pm_qos.lock);
+	ops_tbl = inst->core->dev_ops;
+	call_hfi_op(ops_tbl, pm_qos_update, ops_tbl->hfi_device_data);
+
+	return;
+
+fail_get_session_info:
+	msm_cvp_close(inst);
+fail_msm_cvp_open:
+	put_task_struct(task);
+fail_pid:
+	cvp_put_fastrpc_node(frpc_node);
+fail_lookup:
+	/* unregister fastrpc driver */
+	eva_fastrpc_driver_unregister(dsp2cpu_cmd->pid, false);
+	cmd->ret = -1;
+}
+
+void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct msm_cvp_inst *inst;
+	int rc;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+	struct task_struct *task = NULL;
+	struct cvp_hfi_ops *ops_tbl;
+
+	cmd->ret = 0;
+
+	dprintk(CVP_DSP,
+		"%s sess id 0x%x low 0x%x high 0x%x, pid 0x%x\n",
+		__func__, dsp2cpu_cmd->session_id,
+		dsp2cpu_cmd->session_cpu_low,
+		dsp2cpu_cmd->session_cpu_high,
+		dsp2cpu_cmd->pid);
+
+	frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
+	if (!frpc_node) {
+		dprintk(CVP_ERR, "%s pid 0x%x not registered with fastrpc\n",
+			__func__, dsp2cpu_cmd->pid);
+		cmd->ret = -1;
+		return;
+	}
+
+	cvp_put_fastrpc_node(frpc_node);
+	inst = (struct msm_cvp_inst *)get_inst_from_dsp(
+			dsp2cpu_cmd->session_cpu_high,
+			dsp2cpu_cmd->session_cpu_low);
+	if (!inst || !is_cvp_inst_valid(inst)) {
+		dprintk(CVP_ERR, "%s incorrect session ID %llx\n", __func__, inst);
+		cmd->ret = -1;
+		goto dsp_fail_delete;
+	}
+
+	task = inst->task;
+
+	spin_lock(&inst->core->resources.pm_qos.lock);
+	if (inst->core->resources.pm_qos.off_vote_cnt > 0)
+		inst->core->resources.pm_qos.off_vote_cnt--;
+	else
+		dprintk(CVP_WARN, "%s Unexpected pm_qos off vote %d\n",
+			__func__,
+			inst->core->resources.pm_qos.off_vote_cnt);
+	spin_unlock(&inst->core->resources.pm_qos.lock);
+
+	ops_tbl = inst->core->dev_ops;
+	call_hfi_op(ops_tbl, pm_qos_update, ops_tbl->hfi_device_data);
+
+	rc = msm_cvp_close(inst);
+	if (rc) {
+		dprintk(CVP_ERR, "Warning: Failed to close cvp instance\n");
+		cmd->ret = -1;
+		goto dsp_fail_delete;
+	}
+
+	/* unregister fastrpc driver */
+	eva_fastrpc_driver_unregister(dsp2cpu_cmd->pid, false);
+
+	if (task)
+		put_task_struct(task);
+
+	dprintk(CVP_DSP, "%s DSP2CPU_DETELE_SESSION Done, nr_maps %d\n",
+			__func__, atomic_read(&nr_maps));
+dsp_fail_delete:
+	return;
+}
+
+void __dsp_cvp_power_req(struct cvp_dsp_cmd_msg *cmd)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct msm_cvp_inst *inst;
+	int rc;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+
+	cmd->ret = 0;
+	dprintk(CVP_DSP,
+		"%s sess id 0x%x, low 0x%x, high 0x%x\n",
+		__func__, dsp2cpu_cmd->session_id,
+		dsp2cpu_cmd->session_cpu_low,
+		dsp2cpu_cmd->session_cpu_high);
+
+	inst = (struct msm_cvp_inst *)get_inst_from_dsp(
+			dsp2cpu_cmd->session_cpu_high,
+			dsp2cpu_cmd->session_cpu_low);
+
+	if (!inst) {
+		cmd->ret = -1;
+		goto dsp_fail_power_req;
+	}
+
+	print_power(&dsp2cpu_cmd->power_req);
+
+	inst->prop.cycles[HFI_HW_FDU] = dsp2cpu_cmd->power_req.clock_fdu;
+	inst->prop.cycles[HFI_HW_ICA] = dsp2cpu_cmd->power_req.clock_ica;
+	inst->prop.cycles[HFI_HW_OD] = dsp2cpu_cmd->power_req.clock_od;
+	inst->prop.cycles[HFI_HW_MPU] = dsp2cpu_cmd->power_req.clock_mpu;
+	inst->prop.fw_cycles = dsp2cpu_cmd->power_req.clock_fw;
+	inst->prop.ddr_bw = dsp2cpu_cmd->power_req.bw_ddr;
+	inst->prop.ddr_cache = dsp2cpu_cmd->power_req.bw_sys_cache;
+	inst->prop.op_cycles[HFI_HW_FDU] = dsp2cpu_cmd->power_req.op_clock_fdu;
+	inst->prop.op_cycles[HFI_HW_ICA] = dsp2cpu_cmd->power_req.op_clock_ica;
+	inst->prop.op_cycles[HFI_HW_OD] = dsp2cpu_cmd->power_req.op_clock_od;
+	inst->prop.op_cycles[HFI_HW_MPU] = dsp2cpu_cmd->power_req.op_clock_mpu;
+	inst->prop.fw_op_cycles = dsp2cpu_cmd->power_req.op_clock_fw;
+	inst->prop.ddr_op_bw = dsp2cpu_cmd->power_req.op_bw_ddr;
+	inst->prop.ddr_op_cache = dsp2cpu_cmd->power_req.op_bw_sys_cache;
+
+	rc = msm_cvp_update_power(inst);
+	if (rc) {
+		/*
+		 *May need to define more error types
+		 * Check UMD implementation
+		 */
+		dprintk(CVP_ERR, "%s Failed update power\n", __func__);
+		cmd->ret = -1;
+		goto dsp_fail_power_req;
+	}
+
+	dprintk(CVP_DSP, "%s DSP2CPU_POWER_REQUEST Done\n", __func__);
+dsp_fail_power_req:
+	return;
+}
+
+void __dsp_cvp_buf_register(struct cvp_dsp_cmd_msg *cmd)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct msm_cvp_inst *inst;
+	struct eva_kmd_arg *kmd;
+	struct eva_kmd_buffer *kmd_buf;
+	int rc;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+
+	cmd->ret = 0;
+
+	dprintk(CVP_DSP,
+		"%s sess id 0x%x, low 0x%x, high 0x%x, pid 0x%x\n",
+		__func__, dsp2cpu_cmd->session_id,
+		dsp2cpu_cmd->session_cpu_low,
+		dsp2cpu_cmd->session_cpu_high,
+		dsp2cpu_cmd->pid);
+
+	kmd = kzalloc(sizeof(*kmd), GFP_KERNEL);
+        if (!kmd) {
+		dprintk(CVP_ERR, "%s kzalloc failure\n", __func__);
+		cmd->ret = -1;
+		return;
+	}
+
+	inst = (struct msm_cvp_inst *)get_inst_from_dsp(
+			dsp2cpu_cmd->session_cpu_high,
+			dsp2cpu_cmd->session_cpu_low);
+
+	kmd->type = EVA_KMD_REGISTER_BUFFER;
+	kmd_buf = (struct eva_kmd_buffer *)&(kmd->data.regbuf);
+	kmd_buf->type = EVA_KMD_BUFTYPE_INPUT;
+	kmd_buf->index = dsp2cpu_cmd->sbuf.index;
+	kmd_buf->fd = dsp2cpu_cmd->sbuf.fd;
+	kmd_buf->size = dsp2cpu_cmd->sbuf.size;
+	kmd_buf->offset = dsp2cpu_cmd->sbuf.offset;
+	kmd_buf->pixelformat = 0;
+	kmd_buf->flags = EVA_KMD_FLAG_UNSECURE;
+
+	rc = msm_cvp_register_buffer(inst, kmd_buf);
+	if (rc) {
+		dprintk(CVP_ERR, "%s Failed to register buffer\n", __func__);
+		cmd->ret = -1;
+		goto dsp_fail_buf_reg;
+	}
+	dprintk(CVP_DSP, "%s register buffer done\n", __func__);
+
+	cmd->sbuf.iova = kmd_buf->reserved[0];
+	cmd->sbuf.size = kmd_buf->size;
+	cmd->sbuf.fd = kmd_buf->fd;
+	cmd->sbuf.index = kmd_buf->index;
+	cmd->sbuf.offset = kmd_buf->offset;
+	dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__,
+			cmd->sbuf.fd, cmd->sbuf.iova);
+dsp_fail_buf_reg:
+	kfree(kmd);
+}
+
+void __dsp_cvp_buf_deregister(struct cvp_dsp_cmd_msg *cmd)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct msm_cvp_inst *inst;
+	struct eva_kmd_arg *kmd;
+	struct eva_kmd_buffer *kmd_buf;
+	int rc;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+
+	cmd->ret = 0;
+
+	dprintk(CVP_DSP,
+		"%s : sess id 0x%x, low 0x%x, high 0x%x, hdl 0x%x\n",
+		__func__, dsp2cpu_cmd->session_id,
+		dsp2cpu_cmd->session_cpu_low,
+		dsp2cpu_cmd->session_cpu_high,
+		dsp2cpu_cmd->pid);
+
+	kmd = kzalloc(sizeof(*kmd), GFP_KERNEL);
+        if (!kmd) {
+		dprintk(CVP_ERR, "%s kzalloc failure\n", __func__);
+		cmd->ret = -1;
+		return;
+	}
+
+	inst = (struct msm_cvp_inst *)get_inst_from_dsp(
+			dsp2cpu_cmd->session_cpu_high,
+			dsp2cpu_cmd->session_cpu_low);
+
+	kmd->type = EVA_KMD_UNREGISTER_BUFFER;
+	kmd_buf = (struct eva_kmd_buffer *)&(kmd->data.regbuf);
+	kmd_buf->type = EVA_KMD_UNREGISTER_BUFFER;
+
+	kmd_buf->type = EVA_KMD_BUFTYPE_INPUT;
+	kmd_buf->index = dsp2cpu_cmd->sbuf.index;
+	kmd_buf->fd = dsp2cpu_cmd->sbuf.fd;
+	kmd_buf->size = dsp2cpu_cmd->sbuf.size;
+	kmd_buf->offset = dsp2cpu_cmd->sbuf.offset;
+	kmd_buf->pixelformat = 0;
+	kmd_buf->flags = EVA_KMD_FLAG_UNSECURE;
+
+	rc = msm_cvp_unregister_buffer(inst, kmd_buf);
+	if (rc) {
+		dprintk(CVP_ERR, "%s Failed to deregister buffer\n", __func__);
+		cmd->ret = -1;
+		goto fail_dsp_buf_dereg;
+	}
+
+	dprintk(CVP_DSP, "%s deregister buffer done\n", __func__);
+fail_dsp_buf_dereg:
+	kfree(kmd);
+}
+
+void __dsp_cvp_mem_alloc(struct cvp_dsp_cmd_msg *cmd)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct msm_cvp_inst *inst;
+	int rc;
+	struct cvp_internal_buf *buf = NULL;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+	uint64_t v_dsp_addr = 0;
+
+	struct fastrpc_device *frpc_device = NULL;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+
+	cmd->ret = 0;
+
+	dprintk(CVP_DSP,
+		"%s sess id 0x%x, low 0x%x, high 0x%x, hdl 0x%x\n",
+		__func__, dsp2cpu_cmd->session_id,
+		dsp2cpu_cmd->session_cpu_low,
+		dsp2cpu_cmd->session_cpu_high,
+		dsp2cpu_cmd->pid);
+
+	frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
+	if (!frpc_node) {
+		dprintk(CVP_ERR, "%s Failed to find fastrpc node 0x%x\n",
+				__func__, dsp2cpu_cmd->pid);
+		goto fail_fastrpc_node;
+	}
+	frpc_device = frpc_node->cvp_fastrpc_device;
+
+	inst = (struct msm_cvp_inst *)get_inst_from_dsp(
+			dsp2cpu_cmd->session_cpu_high,
+			dsp2cpu_cmd->session_cpu_low);
+
+	buf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
+	if (!buf)
+		goto fail_kzalloc_buf;
+
+	rc = cvp_allocate_dsp_bufs(inst, buf,
+			dsp2cpu_cmd->sbuf.size,
+			dsp2cpu_cmd->sbuf.type);
+	if (rc)
+		goto fail_allocate_dsp_buf;
+
+	rc = eva_fastrpc_dev_map_dma(frpc_device, buf,
+			dsp2cpu_cmd->sbuf.dsp_remote_map,
+			&v_dsp_addr);
+	if (rc) {
+		dprintk(CVP_ERR, "%s Failed to map buffer 0x%x\n", __func__,
+			rc);
+		goto fail_fastrpc_dev_map_dma;
+	}
+
+	mutex_lock(&inst->cvpdspbufs.lock);
+	list_add_tail(&buf->list, &inst->cvpdspbufs.list);
+	mutex_unlock(&inst->cvpdspbufs.lock);
+
+	dprintk(CVP_DSP, "%s allocate buffer done, addr 0x%llx\n",
+		__func__, v_dsp_addr);
+
+	cmd->sbuf.size = buf->smem->size;
+	cmd->sbuf.fd = buf->fd;
+	cmd->sbuf.offset = 0;
+	cmd->sbuf.iova = buf->smem->device_addr;
+	cmd->sbuf.v_dsp_addr = v_dsp_addr;
+	dprintk(CVP_DSP, "%s: size %d, iova 0x%x, v_dsp_addr 0x%llx\n",
+		__func__, cmd->sbuf.size, cmd->sbuf.iova, cmd->sbuf.v_dsp_addr);
+	dprintk(CVP_DSP, "%s: DSP2CPU_session_id 0x%x, smem_fd 0x%x, smem_refcount %d\n",
+		__func__, dsp2cpu_cmd->session_id, buf->smem->fd, buf->smem->refcount);
+
+	cvp_put_fastrpc_node(frpc_node);
+	return;
+
+fail_fastrpc_dev_map_dma:
+	cvp_release_dsp_buffers(inst, buf);
+fail_allocate_dsp_buf:
+	cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
+fail_kzalloc_buf:
+fail_fastrpc_node:
+	cmd->ret = -1;
+	cvp_put_fastrpc_node(frpc_node);
+	return;
+
+}
+
+void __dsp_cvp_mem_free(struct cvp_dsp_cmd_msg *cmd)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct msm_cvp_inst *inst;
+	int rc;
+	struct cvp_internal_buf *buf = NULL;
+	struct list_head *ptr = NULL, *next = NULL;
+	struct msm_cvp_list *buf_list = NULL;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+
+	struct fastrpc_device *frpc_device = NULL;
+	struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+
+	cmd->ret = 0;
+
+	dprintk(CVP_DSP,
+		"%s sess 0x%x, low 0x%x, high 0x%x, hnl 0x%x, iova 0x%x, fd 0x%x\n",
+		__func__, dsp2cpu_cmd->session_id,
+		dsp2cpu_cmd->session_cpu_low,
+		dsp2cpu_cmd->session_cpu_high,
+		dsp2cpu_cmd->pid, dsp2cpu_cmd->sbuf.iova,
+		dsp2cpu_cmd->sbuf.fd);
+
+	inst = (struct msm_cvp_inst *)get_inst_from_dsp(
+			dsp2cpu_cmd->session_cpu_high,
+			dsp2cpu_cmd->session_cpu_low);
+	if (!inst) {
+		dprintk(CVP_ERR, "%s Failed to get inst\n",
+			__func__);
+		cmd->ret = -1;
+		return;
+	}
+
+	frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
+	if (!frpc_node) {
+		dprintk(CVP_ERR, "%s Failed to find fastrpc node 0x%x\n",
+				__func__, dsp2cpu_cmd->pid);
+		cmd->ret = -1;
+		return;
+	}
+	frpc_device = frpc_node->cvp_fastrpc_device;
+
+	buf_list = &inst->cvpdspbufs;
+	mutex_lock(&buf_list->lock);
+	list_for_each_safe(ptr, next, &buf_list->list) {
+		if (!ptr)
+			break;
+		buf = list_entry(ptr, struct cvp_internal_buf, list);
+
+		if (!buf->smem) {
+			dprintk(CVP_DSP, "Empyt smem\n");
+			continue;
+		}
+
+		/* Verify with device addr */
+		if ((buf->smem->device_addr == dsp2cpu_cmd->sbuf.iova) &&
+			(buf->fd == dsp2cpu_cmd->sbuf.fd)) {
+			dprintk(CVP_DSP, "%s find device addr 0x%x\n",
+				__func__, buf->smem->device_addr);
+			dprintk(CVP_DSP, "fd in list 0x%x, fd from dsp 0x%x\n",
+				buf->fd, dsp2cpu_cmd->sbuf.fd);
+
+			rc = eva_fastrpc_dev_unmap_dma(frpc_device, buf);
+			if (rc) {
+				cmd->ret = -1;
+				goto fail_fastrpc_dev_unmap_dma;
+			}
+
+			rc = cvp_release_dsp_buffers(inst, buf);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s Failed to free buffer 0x%x\n",
+					__func__, rc);
+				cmd->ret = -1;
+				goto fail_release_buf;
+			}
+
+			list_del(&buf->list);
+
+			cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
+			break;
+		}
+	}
+
+fail_release_buf:
+fail_fastrpc_dev_unmap_dma:
+	mutex_unlock(&buf_list->lock);
+	cvp_put_fastrpc_node(frpc_node);
+}
+
+void __dsp_cvp_sess_start(struct cvp_dsp_cmd_msg *cmd)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct msm_cvp_inst *inst;
+	struct cvp_session_queue *sq;
+	int rc;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+
+	cmd->ret = 0;
+
+	dprintk(CVP_DSP,
+		"%s sess id 0x%x, low 0x%x, high 0x%x, pid 0x%x\n",
+		__func__, dsp2cpu_cmd->session_id,
+		dsp2cpu_cmd->session_cpu_low,
+		dsp2cpu_cmd->session_cpu_high,
+		dsp2cpu_cmd->pid);
+
+	inst = (struct msm_cvp_inst *)get_inst_from_dsp(
+			dsp2cpu_cmd->session_cpu_high,
+			dsp2cpu_cmd->session_cpu_low);
+
+	if (!inst || !is_cvp_inst_valid(inst)) {
+		dprintk(CVP_ERR, "%s incorrect session ID %llx\n", __func__, inst);
+		cmd->ret = -1;
+		return;
+	}
+
+	sq = &inst->session_queue;
+	spin_lock(&sq->lock);
+	if (sq->state == QUEUE_START) {
+		spin_unlock(&sq->lock);
+		dprintk(CVP_WARN, "DSP double started session %llx\n", inst);
+		return;
+	}
+	spin_unlock(&sq->lock);
+
+	rc = msm_cvp_session_start(inst, (struct eva_kmd_arg *)NULL);
+	if (rc) {
+		dprintk(CVP_ERR, "%s Failed to start session %llx\n", __func__, inst);
+		cmd->ret = -1;
+		return;
+	}
+	dprintk(CVP_DSP, "%s session started\n", __func__);
+}
+
+void __dsp_cvp_sess_stop(struct cvp_dsp_cmd_msg *cmd)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct msm_cvp_inst *inst;
+	int rc;
+	struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+
+	cmd->ret = 0;
+
+	inst = (struct msm_cvp_inst *)get_inst_from_dsp(
+			dsp2cpu_cmd->session_cpu_high,
+			dsp2cpu_cmd->session_cpu_low);
+
+	if (!inst || !is_cvp_inst_valid(inst)) {
+		dprintk(CVP_ERR, "%s incorrect session ID %llx\n", __func__, inst);
+		cmd->ret = -1;
+		return;
+	}
+
+	dprintk(CVP_DSP,
+		"%s sess id 0x%x low 0x%x high 0x%x, pid 0x%x, inst_kref_refcount 0x%x\n",
+		__func__, dsp2cpu_cmd->session_id,
+		dsp2cpu_cmd->session_cpu_low,
+		dsp2cpu_cmd->session_cpu_high,
+		dsp2cpu_cmd->pid, kref_read(&inst->kref));
+
+	rc = msm_cvp_session_stop(inst, (struct eva_kmd_arg *)NULL);
+	if (rc) {
+		dprintk(CVP_ERR, "%s Failed to stop session\n", __func__);
+		cmd->ret = -1;
+		return;
+	}
+	dprintk(CVP_DSP, "%s session stoppd\n", __func__);
+}
+
+static int cvp_dsp_thread(void *data)
+{
+	int rc = 0, old_state;
+	struct cvp_dsp_apps *me = &gfa_cv;
+	struct cvp_dsp_cmd_msg cmd;
+	struct cvp_hfi_ops *ops_tbl;
+	struct msm_cvp_core *core;
+
+	core = cvp_driver->cvp_core;
+	if (!core) {
+		dprintk(CVP_ERR, "%s: Failed to find core\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	ops_tbl = (struct cvp_hfi_ops *)core->dev_ops;
+	if (!ops_tbl) {
+		dprintk(CVP_ERR, "%s Invalid device handle\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+wait_dsp:
+	rc = wait_for_completion_interruptible(
+			&me->completions[CPU2DSP_MAX_CMD]);
+
+	if (me->state == DSP_INVALID)
+		goto exit;
+
+	if (me->state == DSP_UNINIT)
+		goto wait_dsp;
+
+	if (me->state == DSP_PROBED) {
+		cvp_dsp_send_hfi_queue();
+		goto wait_dsp;
+	}
+
+	/* Set the cmd to 0 to avoid sending previous session values in case the command fails*/
+	memset(&cmd, 0, sizeof(struct cvp_dsp_cmd_msg));
+	cmd.type = me->pending_dsp2cpu_cmd.type;
+
+	if (rc == -ERESTARTSYS) {
+		dprintk(CVP_WARN, "%s received interrupt signal\n", __func__);
+	} else {
+		mutex_lock(&me->rx_lock);
+		if (me->state == DSP_UNINIT) {
+			/* DSP SSR may have happened */
+			mutex_unlock(&me->rx_lock);
+			goto wait_dsp;
+		}
+		switch (me->pending_dsp2cpu_cmd.type) {
+		case DSP2CPU_POWERON:
+		{
+			if (me->state == DSP_READY) {
+				cmd.ret = 0;
+				break;
+			}
+
+			mutex_lock(&me->tx_lock);
+			old_state = me->state;
+			me->state = DSP_READY;
+			rc = call_hfi_op(ops_tbl, resume, ops_tbl->hfi_device_data);
+			if (rc) {
+				dprintk(CVP_WARN, "%s Failed to resume cvp\n",
+						__func__);
+				me->state = old_state;
+				mutex_unlock(&me->tx_lock);
+				cmd.ret = 1;
+				break;
+			}
+			mutex_unlock(&me->tx_lock);
+			cmd.ret = 0;
+			break;
+		}
+		case DSP2CPU_POWEROFF:
+		{
+			me->state = DSP_SUSPEND;
+			cmd.ret = 0;
+			break;
+		}
+		case DSP2CPU_CREATE_SESSION:
+		{
+			__dsp_cvp_sess_create(&cmd);
+
+			break;
+		}
+		case DSP2CPU_DETELE_SESSION:
+		{
+			__dsp_cvp_sess_delete(&cmd);
+
+			break;
+		}
+		case DSP2CPU_POWER_REQUEST:
+		{
+			__dsp_cvp_power_req(&cmd);
+
+			break;
+		}
+		case DSP2CPU_REGISTER_BUFFER:
+		{
+			__dsp_cvp_buf_register(&cmd);
+
+			break;
+		}
+		case DSP2CPU_DEREGISTER_BUFFER:
+		{
+			__dsp_cvp_buf_deregister(&cmd);
+
+			break;
+		}
+		case DSP2CPU_MEM_ALLOC:
+		{
+			__dsp_cvp_mem_alloc(&cmd);
+
+			break;
+		}
+		case DSP2CPU_MEM_FREE:
+		{
+			__dsp_cvp_mem_free(&cmd);
+
+			break;
+		}
+		case DSP2CPU_START_SESSION:
+		{
+			__dsp_cvp_sess_start(&cmd);
+
+			break;
+		}
+		case DSP2CPU_STOP_SESSION:
+		{
+			__dsp_cvp_sess_stop(&cmd);
+
+			break;
+		}
+		default:
+			dprintk(CVP_ERR, "unrecognaized dsp cmds: %d\n",
+					me->pending_dsp2cpu_cmd.type);
+			break;
+		}
+		me->pending_dsp2cpu_cmd.type = CVP_INVALID_RPMSG_TYPE;
+		mutex_unlock(&me->rx_lock);
+	}
+	/* Responds to DSP */
+	rc = cvp_dsp_send_cmd(&cmd, sizeof(struct cvp_dsp_cmd_msg));
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed rc = %d cmd type=%d\n",
+			__func__, rc, cmd.type);
+	goto wait_dsp;
+exit:
+	dprintk(CVP_DBG, "dsp thread exit\n");
+	return rc;
+}
+
+int cvp_dsp_device_init(void)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	char tname[16];
+	int rc;
+	int i;
+	char name[CVP_FASTRPC_DRIVER_NAME_SIZE] = "qcom,fastcv0\0";
+
+	add_va_node_to_list(CVP_DBG_DUMP, &gfa_cv, sizeof(struct cvp_dsp_apps),
+        "cvp_dsp_apps-gfa_cv", false);
+
+	mutex_init(&me->tx_lock);
+	mutex_init(&me->rx_lock);
+	me->state = DSP_INVALID;
+	me->hyp_assigned = false;
+
+	for (i = 0; i <= CPU2DSP_MAX_CMD; i++)
+		init_completion(&me->completions[i]);
+
+	me->pending_dsp2cpu_cmd.type = CVP_INVALID_RPMSG_TYPE;
+	me->pending_dsp2cpu_rsp.type = CVP_INVALID_RPMSG_TYPE;
+
+	INIT_MSM_CVP_LIST(&me->fastrpc_driver_list);
+
+	mutex_init(&me->driver_name_lock);
+	for (i = 0; i < MAX_FASTRPC_DRIVER_NUM; i++) {
+		me->cvp_fastrpc_name[i].status = DRIVER_NAME_AVAILABLE;
+		snprintf(me->cvp_fastrpc_name[i].name, sizeof(name), name);
+		name[11]++;
+	}
+
+	rc = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s : register_rpmsg_driver failed rc = %d\n",
+			__func__, rc);
+		goto register_bail;
+	}
+	snprintf(tname, sizeof(tname), "cvp-dsp-thread");
+	mutex_lock(&me->tx_lock);
+
+	if (me->state == DSP_INVALID)
+		me->state = DSP_UNINIT;
+
+	mutex_unlock(&me->tx_lock);
+
+	me->dsp_thread = kthread_run(cvp_dsp_thread, me, tname);
+	if (!me->dsp_thread) {
+		dprintk(CVP_ERR, "%s create %s fail", __func__, tname);
+		rc = -ECHILD;
+		me->state = DSP_INVALID;
+		goto register_bail;
+	}
+	return 0;
+
+register_bail:
+	return rc;
+}
+
+void cvp_dsp_device_exit(void)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+	int i;
+
+	mutex_lock(&me->tx_lock);
+	me->state = DSP_INVALID;
+	mutex_unlock(&me->tx_lock);
+
+	DEINIT_MSM_CVP_LIST(&me->fastrpc_driver_list);
+
+	for (i = 0; i <= CPU2DSP_MAX_CMD; i++)
+		complete_all(&me->completions[i]);
+
+	mutex_destroy(&me->tx_lock);
+	mutex_destroy(&me->rx_lock);
+	mutex_destroy(&me->driver_name_lock);
+	unregister_rpmsg_driver(&cvp_dsp_rpmsg_client);
+}

+ 315 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_dsp.h

@@ -0,0 +1,315 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef MSM_CVP_DSP_H
+#define MSM_CVP_DSP_H
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+#include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
+
+#include <linux/pid.h>
+#include <linux/sched.h>
+
+#ifdef CVP_FASTRPC_ENABLED
+#include <fastrpc.h>
+#else
+struct fastrpc_device {
+	int handle;
+};
+
+enum fastrpc_driver_status {
+	FASTRPC_CVP_B,
+};
+
+enum fastrpc_driver_invoke_nums {
+	FASTRPC_DEV_MAP_DMA = 1,
+	FASTRPC_DEV_UNMAP_DMA,
+	FASTRPC_DEV_GET_HLOS_PID,
+};
+
+struct fastrpc_driver {
+	struct device_driver driver;
+	int handle;
+	int (*probe)(struct fastrpc_device *dev);
+	int (*callback)(struct fastrpc_device *dev,
+			enum fastrpc_driver_status status);
+};
+#endif	/* End of CVP_FASTRPC_ENABLED */
+
+#define CVP_APPS_DSP_GLINK_GUID "cvp-glink-apps-dsp"
+#define CVP_APPS_DSP_SMD_GUID "cvp-smd-apps-dsp"
+
+#define VMID_CDSP_Q6 (30)
+#define HLOS_VM_NUM 1
+#define DSP_VM_NUM 2
+#define CVP_DSP_MAX_RESERVED 5
+#define CVP_DSP2CPU_RESERVED 8
+#define CVP_DSP_RESPONSE_TIMEOUT 1000
+#define CVP_INVALID_RPMSG_TYPE 0xBADDFACE
+#define MAX_FRAME_BUF_NUM 16
+
+#define BITPTRSIZE32 (4)
+#define BITPTRSIZE64 (8)
+#define HIGH32                      (0xFFFFFFFF00000000LL)
+#define LOW32                       (0xFFFFFFFFLL)
+
+#define CVP_FASTRPC_DRIVER_NAME_SIZE    16
+
+/* Supports up to 8 DSP sessions in 8 processes */
+#define MAX_DSP_SESSION_NUM			(8)
+#define MAX_FASTRPC_DRIVER_NUM		(MAX_DSP_SESSION_NUM)
+
+int cvp_dsp_device_init(void);
+void cvp_dsp_device_exit(void);
+void cvp_dsp_send_hfi_queue(void);
+void cvp_dsp_init_hfi_queue_hdr(struct iris_hfi_device *device);
+
+enum CPU2DSP_STATUS {
+	CPU2DSP_SUCCESS = 0,
+	CPU2DSP_EFAIL = 1,
+	CPU2DSP_EFATAL = 2,
+	CPU2DSP_EUNAVAILABLE = 3,
+	CPU2DSP_EINVALSTATE = 4,
+	CPU2DSP_EUNSUPPORTED = 5,
+};
+
+enum CVP_DSP_COMMAND {
+	CPU2DSP_SEND_HFI_QUEUE = 0,
+	CPU2DSP_SUSPEND = 1,
+	CPU2DSP_RESUME = 2,
+	CPU2DSP_SHUTDOWN = 3,
+	CPU2DSP_REGISTER_BUFFER = 4,
+	CPU2DSP_DEREGISTER_BUFFER = 5,
+	CPU2DSP_INIT = 6,
+	CPU2DSP_SET_DEBUG_LEVEL = 7,
+	CPU2DSP_MAX_CMD = 8,
+	DSP2CPU_POWERON = 11,
+	DSP2CPU_POWEROFF = 12,
+	DSP2CPU_CREATE_SESSION = 13,
+	DSP2CPU_DETELE_SESSION = 14,
+	DSP2CPU_POWER_REQUEST = 15,
+	DSP2CPU_POWER_CANCEL = 16,
+	DSP2CPU_REGISTER_BUFFER = 17,
+	DSP2CPU_DEREGISTER_BUFFER = 18,
+	DSP2CPU_MEM_ALLOC = 19,
+	DSP2CPU_MEM_FREE = 20,
+	DSP2CPU_START_SESSION = 21,
+	DSP2CPU_STOP_SESSION = 22,
+	CVP_DSP_MAX_CMD = 23,
+};
+
+struct eva_power_req {
+	uint32_t clock_fdu;
+	uint32_t clock_ica;
+	uint32_t clock_od;
+	uint32_t clock_mpu;
+	uint32_t clock_fw;
+	uint32_t bw_ddr;
+	uint32_t bw_sys_cache;
+	uint32_t op_clock_fdu;
+	uint32_t op_clock_ica;
+	uint32_t op_clock_od;
+	uint32_t op_clock_mpu;
+	uint32_t op_clock_fw;
+	uint32_t op_bw_ddr;
+	uint32_t op_bw_sys_cache;
+};
+
+struct eva_mem_remote {
+	uint32_t type;
+	uint32_t size;
+	uint32_t fd;
+	uint32_t offset;
+	uint32_t index;
+	uint32_t iova;
+	uint32_t dsp_remote_map;
+	uint64_t v_dsp_addr;
+};
+
+/*
+ * command: defined as a packet initiated from one party.
+ * message: defined as a packet sent as response to a command
+ */
+
+/*
+ * cvp_dsp_cmd_msg contains
+ * the message sent from CPU to DSP
+ * or
+ * the command sent from CPU to DSP
+ */
+struct cvp_dsp_cmd_msg {
+	uint32_t type;
+	int32_t ret;
+	uint64_t msg_ptr;
+	uint32_t msg_ptr_len;
+	uint32_t buff_fd_iova;
+	uint32_t buff_index;
+	uint32_t buff_size;
+	uint32_t session_id;
+	int32_t ddr_type;
+	uint32_t buff_fd;
+	uint32_t buff_offset;
+	uint32_t buff_fd_size;
+
+	uint32_t eva_dsp_debug_mask;
+
+	/* Create Session */
+	uint32_t session_cpu_low;
+	uint32_t session_cpu_high;
+
+	struct eva_mem_remote sbuf;
+
+	uint32_t reserved1;
+	uint32_t reserved2;
+};
+
+/* cvp_dsp_rsp_msg contains the message sent from DSP to CPU */
+struct cvp_dsp_rsp_msg {
+	uint32_t type;
+	int32_t ret;
+	uint32_t dsp_state;
+	uint32_t reserved[CVP_DSP_MAX_RESERVED - 1];
+};
+
+/* cvp_dsp2cpu_cmd contains the command sent from DSP to cpu*/
+struct cvp_dsp2cpu_cmd {
+	uint32_t type;
+	uint32_t ver;
+	uint32_t len;
+
+	/* Create Session */
+	uint32_t session_type;
+	uint32_t kernel_mask;
+	uint32_t session_prio;
+	uint32_t is_secure;
+	uint32_t dsp_access_mask;
+
+	uint32_t session_id;
+	uint32_t session_cpu_low;
+	uint32_t session_cpu_high;
+	int32_t pid;
+	struct eva_power_req power_req;
+	struct eva_mem_remote sbuf;
+
+	uint32_t data[CVP_DSP2CPU_RESERVED];
+};
+
+struct driver_name {
+    uint32_t status;
+    char name[CVP_FASTRPC_DRIVER_NAME_SIZE];
+};
+
+enum DRIVER_NAME_STATUS {
+	DRIVER_NAME_INVALID = 0,
+	DRIVER_NAME_AVAILABLE = 1,
+	DRIVER_NAME_USED = 2,
+};
+
+struct cvp_dsp_fastrpc_driver_entry {
+	struct list_head list;
+	uint32_t handle;	/*handle is not PID*/
+	uint32_t session_cnt;
+	uint32_t driver_name_idx;
+	atomic_t refcount;
+	struct fastrpc_driver cvp_fastrpc_driver;
+	struct fastrpc_device *cvp_fastrpc_device;
+	struct completion fastrpc_probe_completion;
+	/* all dsp sessions list */
+	struct msm_cvp_list dsp_sessions;
+};
+
+struct cvp_dsp_apps {
+	/*
+	 * tx_lock for sending CPU2DSP cmds or msgs
+	 * and dsp state change
+	 */
+	struct mutex tx_lock;
+	/* rx_lock for receiving DSP2CPU cmds or msgs */
+	struct mutex rx_lock;
+	struct mutex driver_name_lock;
+	struct rpmsg_device *chan;
+	uint32_t state;
+	uint32_t debug_mask;
+	bool hyp_assigned;
+	uint64_t addr;
+	uint32_t size;
+	struct completion completions[CPU2DSP_MAX_CMD + 1];
+	struct cvp_dsp2cpu_cmd pending_dsp2cpu_cmd;
+	struct cvp_dsp_rsp_msg pending_dsp2cpu_rsp;
+	struct task_struct *dsp_thread;
+	/* dsp buffer mapping, set of dma function pointer */
+	const struct file_operations *dmabuf_f_op;
+	uint32_t buf_num;
+	struct msm_cvp_list fastrpc_driver_list;
+	struct driver_name cvp_fastrpc_name[MAX_FASTRPC_DRIVER_NUM];
+};
+
+#define EVA_TRACE_MAX_SESSION_NUM       16
+#define EVA_TRACE_MAX_INSTANCE_NUM      6
+#define EVA_TRACE_MAX_BUF_NUM           256
+
+#define CONFIG_SIZE_IN_BYTES        2048
+#define CONFIG_SIZE_IN_WORDS        (CONFIG_SIZE_IN_BYTES >> 2)
+
+// iova is eva_dsp_buf->iova
+// pkt_type is frame packet type using the buffer
+// buf_idx is the index of the buffer in a frame packet
+// transaction_id is the transaction id of frame packet
+struct cvp_dsp_trace_buf {
+	u32	iova;
+	u32	pkt_type;
+	u32	buf_idx;
+	u32	transaction_id;
+	u32	fd;
+};
+
+// Saving config packet for each intance
+struct cvp_dsp_trace_instance {
+	u32    feature_type;
+	u32    config_pkt[CONFIG_SIZE_IN_WORDS];
+};
+
+struct cvp_dsp_trace_session {
+	u32                session_id;
+	u32                buf_cnt;
+	u32                inst_cnt;
+	struct cvp_dsp_trace_instance  instance[EVA_TRACE_MAX_INSTANCE_NUM];
+	struct cvp_dsp_trace_buf       buf[EVA_TRACE_MAX_BUF_NUM];
+};
+
+struct cvp_dsp_trace {
+	struct cvp_dsp_trace_session   sessions[EVA_TRACE_MAX_SESSION_NUM];
+};
+
+extern struct cvp_dsp_apps gfa_cv;
+/*
+ * API for CVP driver to suspend CVP session during
+ * power collapse
+ */
+int cvp_dsp_suspend(bool force);
+
+/*
+ * API for CVP driver to resume CVP session during
+ * power collapse
+ */
+int cvp_dsp_resume(void);
+
+/*
+ * API for CVP driver to shutdown CVP session during
+ * cvp subsystem error.
+ */
+int cvp_dsp_shutdown(void);
+
+int cvp_dsp_fastrpc_unmap(uint32_t handle, struct cvp_internal_buf *buf);
+
+int cvp_dsp_del_sess(uint32_t handle, struct msm_cvp_inst *inst);
+
+void cvp_dsp_send_debug_mask(void);
+
+#endif // MSM_CVP_DSP_H
+

+ 375 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_events.h

@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#if !defined(_MSM_CVP_EVENTS_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_CVP_EVENTS_H_
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_cvp
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE msm_cvp_events
+
+// Since Chrome supports to parse the event “tracing_mark_write” by default
+// so we can re-use this to display your own events in Chrome
+// enable command as below:
+// adb shell "echo 1 > /sys/kernel/tracing/events/msm_cvp/tracing_mark_write/enable"
+TRACE_EVENT(tracing_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+		__field(int, pid)
+		__string(trace_name, name)
+		__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+		__entry->pid = pid;
+		__assign_str(trace_name, name);
+		__entry->trace_begin = trace_begin;
+		),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+)
+#define CVPKERNEL_ATRACE_END(name) \
+		trace_tracing_mark_write(current->tgid, name, 0)
+#define CVPKERNEL_ATRACE_BEGIN(name) \
+		trace_tracing_mark_write(current->tgid, name, 1)
+
+
+DECLARE_EVENT_CLASS(msm_v4l2_cvp,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(char *, dummy)
+	),
+
+	TP_fast_assign(
+		__entry->dummy = dummy;
+	),
+
+	TP_printk("%s", __entry->dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_open_start,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_open_end,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_close_start,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_close_end,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_fw_load_start,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_fw_load_end,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy)
+);
+
+DECLARE_EVENT_CLASS(msm_cvp_common,
+
+	TP_PROTO(void *instp, int old_state, int new_state),
+
+	TP_ARGS(instp, old_state, new_state),
+
+	TP_STRUCT__entry(
+		__field(void *, instp)
+		__field(int, old_state)
+		__field(int, new_state)
+	),
+
+	TP_fast_assign(
+		__entry->instp = instp;
+		__entry->old_state = old_state;
+		__entry->new_state = new_state;
+	),
+
+	TP_printk("Moved inst: %p from 0x%x to 0x%x",
+		__entry->instp,
+		__entry->old_state,
+		__entry->new_state)
+);
+
+DEFINE_EVENT(msm_cvp_common, msm_cvp_common_state_change,
+
+	TP_PROTO(void *instp, int old_state, int new_state),
+
+	TP_ARGS(instp, old_state, new_state)
+);
+
+DECLARE_EVENT_CLASS(cvp_venus_hfi_var,
+
+	TP_PROTO(u32 cp_start, u32 cp_size,
+		u32 cp_nonpixel_start, u32 cp_nonpixel_size),
+
+	TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size),
+
+	TP_STRUCT__entry(
+		__field(u32, cp_start)
+		__field(u32, cp_size)
+		__field(u32, cp_nonpixel_start)
+		__field(u32, cp_nonpixel_size)
+	),
+
+	TP_fast_assign(
+		__entry->cp_start = cp_start;
+		__entry->cp_size = cp_size;
+		__entry->cp_nonpixel_start = cp_nonpixel_start;
+		__entry->cp_nonpixel_size = cp_nonpixel_size;
+	),
+
+	TP_printk(
+		"TZBSP_MEM_PROTECT_VIDEO_VAR done, cp_start : 0x%x, cp_size : 0x%x, cp_nonpixel_start : 0x%x, cp_nonpixel_size : 0x%x",
+		__entry->cp_start,
+		__entry->cp_size,
+		__entry->cp_nonpixel_start,
+		__entry->cp_nonpixel_size)
+);
+
+DEFINE_EVENT(cvp_venus_hfi_var, cvp_venus_hfi_var_done,
+
+	TP_PROTO(u32 cp_start, u32 cp_size,
+		u32 cp_nonpixel_start, u32 cp_nonpixel_size),
+
+	TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size)
+);
+
+DECLARE_EVENT_CLASS(msm_v4l2_cvp_buffer_events,
+
+	TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
+		u32 alloc_len, u32 filled_len, u32 offset),
+
+	TP_ARGS(event_type, device_addr, timestamp, alloc_len,
+		filled_len, offset),
+
+	TP_STRUCT__entry(
+		__field(char *, event_type)
+		__field(u32, device_addr)
+		__field(int64_t, timestamp)
+		__field(u32, alloc_len)
+		__field(u32, filled_len)
+		__field(u32, offset)
+	),
+
+	TP_fast_assign(
+		__entry->event_type = event_type;
+		__entry->device_addr = device_addr;
+		__entry->timestamp = timestamp;
+		__entry->alloc_len = alloc_len;
+		__entry->filled_len = filled_len;
+		__entry->offset = offset;
+	),
+
+	TP_printk(
+		"%s, device_addr : 0x%x, timestamp : %lld, alloc_len : 0x%x, filled_len : 0x%x, offset : 0x%x",
+		__entry->event_type,
+		__entry->device_addr,
+		__entry->timestamp,
+		__entry->alloc_len,
+		__entry->filled_len,
+		__entry->offset)
+);
+
+DEFINE_EVENT(msm_v4l2_cvp_buffer_events, msm_v4l2_cvp_buffer_event_start,
+
+	TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
+		u32 alloc_len, u32 filled_len, u32 offset),
+
+	TP_ARGS(event_type, device_addr, timestamp, alloc_len,
+		filled_len, offset)
+);
+
+DEFINE_EVENT(msm_v4l2_cvp_buffer_events, msm_v4l2_cvp_buffer_event_end,
+
+	TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
+		u32 alloc_len, u32 filled_len, u32 offset),
+
+	TP_ARGS(event_type, device_addr, timestamp, alloc_len,
+		filled_len, offset)
+);
+
+DECLARE_EVENT_CLASS(msm_cvp_smem_buffer_dma_ops,
+
+	TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
+		size_t size, u32 align, u32 flags, int map_kernel),
+
+	TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
+		flags, map_kernel),
+
+	TP_STRUCT__entry(
+		__field(char *, buffer_op)
+		__field(u32, buffer_type)
+		__field(u32, heap_mask)
+		__field(u32, size)
+		__field(u32, align)
+		__field(u32, flags)
+		__field(int, map_kernel)
+	),
+
+	TP_fast_assign(
+		__entry->buffer_op = buffer_op;
+		__entry->buffer_type = buffer_type;
+		__entry->heap_mask = heap_mask;
+		__entry->size = size;
+		__entry->align = align;
+		__entry->flags = flags;
+		__entry->map_kernel = map_kernel;
+	),
+
+	TP_printk(
+		"%s, buffer_type : 0x%x, heap_mask : 0x%x, size : 0x%x, align : 0x%x, flags : 0x%x, map_kernel : %d",
+		__entry->buffer_op,
+		__entry->buffer_type,
+		__entry->heap_mask,
+		__entry->size,
+		__entry->align,
+		__entry->flags,
+		__entry->map_kernel)
+);
+
+DEFINE_EVENT(msm_cvp_smem_buffer_dma_ops, msm_cvp_smem_buffer_dma_op_start,
+
+	TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
+		size_t size, u32 align, u32 flags, int map_kernel),
+
+	TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
+		flags, map_kernel)
+);
+
+DEFINE_EVENT(msm_cvp_smem_buffer_dma_ops, msm_cvp_smem_buffer_dma_op_end,
+
+	TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
+		size_t size, u32 align, u32 flags, int map_kernel),
+
+	TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
+		flags, map_kernel)
+);
+
+DECLARE_EVENT_CLASS(msm_cvp_smem_buffer_iommu_ops,
+
+	TP_PROTO(char *buffer_op, int domain_num, int partition_num,
+		unsigned long align, unsigned long iova,
+		unsigned long buffer_size),
+
+	TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size),
+
+	TP_STRUCT__entry(
+		__field(char *, buffer_op)
+		__field(int, domain_num)
+		__field(int, partition_num)
+		__field(unsigned long, align)
+		__field(unsigned long, iova)
+		__field(unsigned long, buffer_size)
+	),
+
+	TP_fast_assign(
+		__entry->buffer_op = buffer_op;
+		__entry->domain_num = domain_num;
+		__entry->partition_num = partition_num;
+		__entry->align = align;
+		__entry->iova = iova;
+		__entry->buffer_size = buffer_size;
+	),
+
+	TP_printk(
+		"%s, domain : %d, partition : %d, align : %lx, iova : 0x%lx, buffer_size=%lx",
+		__entry->buffer_op,
+		__entry->domain_num,
+		__entry->partition_num,
+		__entry->align,
+		__entry->iova,
+		__entry->buffer_size)
+);
+
+DEFINE_EVENT(msm_cvp_smem_buffer_iommu_ops, msm_cvp_smem_buffer_iommu_op_start,
+
+	TP_PROTO(char *buffer_op, int domain_num, int partition_num,
+		unsigned long align, unsigned long iova,
+		unsigned long buffer_size),
+
+	TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size)
+);
+
+DEFINE_EVENT(msm_cvp_smem_buffer_iommu_ops, msm_cvp_smem_buffer_iommu_op_end,
+
+	TP_PROTO(char *buffer_op, int domain_num, int partition_num,
+		unsigned long align, unsigned long iova,
+		unsigned long buffer_size),
+
+	TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size)
+);
+
+DECLARE_EVENT_CLASS(msm_cvp_perf,
+
+	TP_PROTO(const char *name, unsigned long value),
+
+	TP_ARGS(name, value),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(unsigned long, value)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->value = value;
+	),
+
+	TP_printk("%s %lu", __entry->name, __entry->value)
+);
+
+DEFINE_EVENT(msm_cvp_perf, msm_cvp_perf_clock_scale,
+
+	TP_PROTO(const char *clock_name, unsigned long frequency),
+
+	TP_ARGS(clock_name, frequency)
+);
+
+DEFINE_EVENT(msm_cvp_perf, msm_cvp_perf_bus_vote,
+
+	TP_PROTO(const char *governor_mode, unsigned long ab),
+
+	TP_ARGS(governor_mode, ab)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#include <trace/define_trace.h>

+ 408 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_internal.h

@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _MSM_CVP_INTERNAL_H_
+#define _MSM_CVP_INTERNAL_H_
+
+#include <linux/atomic.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/interconnect.h>
+#include <linux/kref.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
+#include "msm_cvp_core.h"
+#include <media/msm_eva_private.h>
+#include "cvp_hfi_api.h"
+#include "cvp_hfi_helper.h"
+
+#define MAX_SUPPORTED_INSTANCES 16
+#define MAX_DEBUGFS_NAME 50
+#define MAX_DSP_INIT_ATTEMPTS 16
+#define FENCE_WAIT_SIGNAL_TIMEOUT 100
+#define FENCE_WAIT_SIGNAL_RETRY_TIMES 20
+#define FENCE_BIT (1ULL << 63)
+
+#define FENCE_DMM_ICA_ENABLED_IDX 0
+#define FENCE_DMM_DS_IDX 1
+#define FENCE_DMM_OUTPUT_IDX 7
+
+#define SYS_MSG_START HAL_SYS_INIT_DONE
+#define SYS_MSG_END HAL_SYS_ERROR
+#define SESSION_MSG_START HAL_SESSION_EVENT_CHANGE
+#define SESSION_MSG_END HAL_SESSION_ERROR
+#define SYS_MSG_INDEX(__msg) (__msg - SYS_MSG_START)
+#define SESSION_MSG_INDEX(__msg) (__msg - SESSION_MSG_START)
+
+#define ARP_BUF_SIZE 0x300000
+
+#define CVP_RT_PRIO_THRESHOLD 1
+
+struct msm_cvp_inst;
+
+enum cvp_core_state {
+	CVP_CORE_UNINIT = 0,
+	CVP_CORE_INIT,
+	CVP_CORE_INIT_DONE,
+};
+
+enum instance_state {
+	MSM_CVP_CORE_UNINIT_DONE = 0x0001,
+	MSM_CVP_CORE_INIT,
+	MSM_CVP_CORE_INIT_DONE,
+	MSM_CVP_OPEN,
+	MSM_CVP_OPEN_DONE,
+	MSM_CVP_CLOSE,
+	MSM_CVP_CLOSE_DONE,
+	MSM_CVP_CORE_UNINIT,
+	MSM_CVP_CORE_INVALID
+};
+
+enum dsp_state {
+	DSP_INVALID,
+	DSP_UNINIT,
+	DSP_PROBED,
+	DSP_READY,
+	DSP_SUSPEND,
+	DSP_INACTIVE,
+};
+
+struct msm_cvp_common_data {
+	char key[128];
+	int value;
+};
+
+enum sku_version {
+	SKU_VERSION_0 = 0,
+	SKU_VERSION_1,
+	SKU_VERSION_2,
+};
+
+enum vpu_version {
+	VPU_VERSION_4 = 1,
+	VPU_VERSION_5,
+};
+
+struct msm_cvp_ubwc_config_data {
+	struct {
+		u32 max_channel_override : 1;
+		u32 mal_length_override : 1;
+		u32 hb_override : 1;
+		u32 bank_swzl_level_override : 1;
+		u32 bank_spreading_override : 1;
+		u32 reserved : 27;
+	} override_bit_info;
+
+	u32 max_channels;
+	u32 mal_length;
+	u32 highest_bank_bit;
+	u32 bank_swzl_level;
+	u32 bank_spreading;
+};
+
+struct msm_cvp_qos_setting {
+	u32 axi_qos;
+	u32 prioritylut_low;
+	u32 prioritylut_high;
+	u32 urgency_low;
+	u32 urgency_low_ro;
+	u32 dangerlut_low;
+	u32 safelut_low;
+};
+
+struct msm_cvp_platform_data {
+	struct msm_cvp_common_data *common_data;
+	unsigned int common_data_length;
+	unsigned int sku_version;
+	uint32_t vpu_ver;
+	unsigned int vm_id;	/* pvm: 1; tvm: 2 */
+	struct msm_cvp_ubwc_config_data *ubwc_config;
+	struct msm_cvp_qos_setting *noc_qos;
+};
+
+struct cvp_kmem_cache {
+	struct kmem_cache *cache;
+	atomic_t nr_objs;
+};
+
+struct msm_cvp_drv {
+	struct mutex lock;
+	struct msm_cvp_core *cvp_core;
+	struct dentry *debugfs_root;
+	int thermal_level;
+	u32 sku_version;
+	struct cvp_kmem_cache msg_cache;
+	struct cvp_kmem_cache frame_cache;
+	struct cvp_kmem_cache buf_cache;
+	struct cvp_kmem_cache smem_cache;
+	char fw_version[CVP_VERSION_LENGTH];
+};
+
+enum profiling_points {
+	SYS_INIT = 0,
+	SESSION_INIT,
+	LOAD_RESOURCES,
+	FRAME_PROCESSING,
+	FW_IDLE,
+	MAX_PROFILING_POINTS,
+};
+
+struct cvp_clock_data {
+	int buffer_counter;
+	int load;
+	int load_low;
+	int load_norm;
+	int load_high;
+	int min_threshold;
+	int max_threshold;
+	unsigned long bitrate;
+	unsigned long min_freq;
+	unsigned long curr_freq;
+	u32 ddr_bw;
+	u32 sys_cache_bw;
+	u32 operating_rate;
+	bool low_latency_mode;
+	bool turbo_mode;
+};
+
+struct cvp_profile_data {
+	int start;
+	int stop;
+	int cumulative;
+	char name[64];
+	int sampling;
+	int average;
+};
+
+struct msm_cvp_debug {
+	struct cvp_profile_data pdata[MAX_PROFILING_POINTS];
+	int profile;
+	int samples;
+};
+
+enum msm_cvp_modes {
+	CVP_SECURE = BIT(0),
+	CVP_TURBO = BIT(1),
+	CVP_THUMBNAIL = BIT(2),
+	CVP_LOW_POWER = BIT(3),
+	CVP_REALTIME = BIT(4),
+};
+
+#define MAX_NUM_MSGS_PER_SESSION	128
+
+struct cvp_session_msg {
+	struct list_head node;
+	struct cvp_hfi_msg_session_hdr_ext pkt;
+};
+
+struct cvp_session_queue {
+	spinlock_t lock;
+	enum queue_state state;
+	unsigned int msg_count;
+	struct list_head msgs;
+	wait_queue_head_t wq;
+};
+
+struct cvp_session_prop {
+	u32 type;
+	u32 kernel_mask;
+	u32 priority;
+	u32 is_secure;
+	u32 dsp_mask;
+	u32 fthread_nr;
+	u32 cycles[HFI_MAX_HW_THREADS];
+	u32 fw_cycles;
+	u32 op_cycles[HFI_MAX_HW_THREADS];
+	u32 fw_op_cycles;
+	u32 ddr_bw;
+	u32 ddr_op_bw;
+	u32 ddr_cache;
+	u32 ddr_op_cache;
+	u32 fps[HFI_MAX_HW_THREADS];
+	u32 dump_offset;
+	u32 dump_size;
+};
+
+enum cvp_event_t {
+	CVP_NO_EVENT,
+	CVP_SSR_EVENT = 1,
+	CVP_SYS_ERROR_EVENT,
+	CVP_MAX_CLIENTS_EVENT,
+	CVP_HW_UNSUPPORTED_EVENT,
+	CVP_INVALID_EVENT,
+	CVP_DUMP_EVENT,
+};
+
+struct cvp_session_event {
+	spinlock_t lock;
+	enum cvp_event_t event;
+	wait_queue_head_t wq;
+};
+
+#define MAX_ENTRIES 64
+
+struct smem_data {
+	u32 size;
+	u32 flags;
+	u32 device_addr;
+	u32 bitmap_index;
+	u32 refcount;
+	u32 pkt_type;
+	u32 buf_idx;
+};
+
+struct cvp_buf_data {
+	u32 device_addr;
+	u32 size;
+};
+
+struct inst_snapshot {
+	void *session;
+	u32 smem_index;
+	u32 dsp_index;
+	u32 persist_index;
+	struct smem_data smem_log[MAX_ENTRIES];
+	struct cvp_buf_data dsp_buf_log[MAX_ENTRIES];
+	struct cvp_buf_data persist_buf_log[MAX_ENTRIES];
+};
+
+struct cvp_noc_log {
+	u32 used;
+	u32 err_ctrl_swid_low;
+	u32 err_ctrl_swid_high;
+	u32 err_ctrl_mainctl_low;
+	u32 err_ctrl_errvld_low;
+	u32 err_ctrl_errclr_low;
+	u32 err_ctrl_errlog0_low;
+	u32 err_ctrl_errlog0_high;
+	u32 err_ctrl_errlog1_low;
+	u32 err_ctrl_errlog1_high;
+	u32 err_ctrl_errlog2_low;
+	u32 err_ctrl_errlog2_high;
+	u32 err_ctrl_errlog3_low;
+	u32 err_ctrl_errlog3_high;
+	u32 err_core_swid_low;
+	u32 err_core_swid_high;
+	u32 err_core_mainctl_low;
+	u32 err_core_errvld_low;
+	u32 err_core_errclr_low;
+	u32 err_core_errlog0_low;
+	u32 err_core_errlog0_high;
+	u32 err_core_errlog1_low;
+	u32 err_core_errlog1_high;
+	u32 err_core_errlog2_low;
+	u32 err_core_errlog2_high;
+	u32 err_core_errlog3_low;
+	u32 err_core_errlog3_high;
+	u32 arp_test_bus[16];
+	u32 dma_test_bus[512];
+};
+
+struct cvp_debug_log {
+	struct cvp_noc_log noc_log;
+	u32 snapshot_index;
+	struct inst_snapshot snapshot[16];
+};
+
+struct msm_cvp_core {
+	struct mutex lock;
+	struct mutex clk_lock;
+	dev_t dev_num;
+	struct cdev cdev;
+	struct class *class;
+	struct device *dev;
+	struct cvp_hfi_ops *dev_ops;
+	struct msm_cvp_platform_data *platform_data;
+	struct msm_cvp_synx_ops *synx_ftbl;
+	struct list_head instances;
+	struct dentry *debugfs_root;
+	enum cvp_core_state state;
+	struct completion completions[SYS_MSG_END - SYS_MSG_START + 1];
+	enum msm_cvp_hfi_type hfi_type;
+	struct msm_cvp_platform_resources resources;
+	struct msm_cvp_capability *capabilities;
+	struct delayed_work fw_unload_work;
+	struct work_struct ssr_work;
+	enum hal_ssr_trigger_type ssr_type;
+	u32 smmu_fault_count;
+	u32 last_fault_addr;
+	u32 ssr_count;
+	u32 smem_leak_count;
+	bool trigger_ssr;
+	unsigned long curr_freq;
+	unsigned long orig_core_sum;
+	unsigned long bw_sum;
+	atomic64_t kernel_trans_id;
+	struct cvp_debug_log log;
+};
+
+struct msm_cvp_inst {
+	struct list_head list;
+	struct list_head dsp_list;
+	struct mutex sync_lock, lock;
+	struct msm_cvp_core *core;
+	enum session_type session_type;
+	u32 dsp_handle;
+	struct task_struct *task;
+	atomic_t smem_count;
+	struct cvp_session_queue session_queue;
+	struct cvp_session_queue session_queue_fence;
+	struct cvp_session_event event_handler;
+	void *session;
+	enum instance_state state;
+	struct msm_cvp_list freqs;
+	struct msm_cvp_list persistbufs;
+	struct cvp_dmamap_cache dma_cache;
+	struct msm_cvp_list cvpdspbufs;
+	struct msm_cvp_list cvpwnccbufs;
+	struct msm_cvp_list frames;
+	struct cvp_frame_bufs last_frame;
+	struct cvp_frame_bufs unused_dsp_bufs;
+	struct cvp_frame_bufs unused_wncc_bufs;
+	u32 cvpwnccbufs_num;
+	struct msm_cvp_wncc_buffer* cvpwnccbufs_table;
+	struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
+	struct dentry *debugfs_root;
+	struct msm_cvp_debug debug;
+	struct cvp_clock_data clk_data;
+	enum msm_cvp_modes flags;
+	struct msm_cvp_capability capability;
+	struct kref kref;
+	struct cvp_session_prop prop;
+	/* error_code will be cleared after being returned to user mode */
+	u32 error_code;
+	/* prev_error_code saves value of error_code before it's cleared */
+	u32 prev_error_code;
+	struct synx_session *synx_session_id;
+	struct cvp_fence_queue fence_cmd_queue;
+	char proc_name[TASK_COMM_LEN];
+};
+
+extern struct msm_cvp_drv *cvp_driver;
+
+void cvp_handle_cmd_response(enum hal_command_response cmd, void *data);
+int msm_cvp_trigger_ssr(struct msm_cvp_core *core,
+	enum hal_ssr_trigger_type type);
+int msm_cvp_noc_error_info(struct msm_cvp_core *core);
+void msm_cvp_comm_handle_thermal_event(void);
+
+void msm_cvp_ssr_handler(struct work_struct *work);
+/*
+ * XXX: normally should be in msm_cvp_core.h, but that's meant for public APIs,
+ * whereas this is private
+ */
+int msm_cvp_destroy(struct msm_cvp_inst *inst);
+void *cvp_get_drv_data(struct device *dev);
+void *cvp_kmem_cache_zalloc(struct cvp_kmem_cache *k, gfp_t flags);
+void cvp_kmem_cache_free(struct cvp_kmem_cache *k, void *obj);
+#endif

+ 670 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_ioctl.c

@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/compat.h>
+#include "cvp_private.h"
+#include "cvp_hfi_api.h"
+
+static int _get_pkt_hdr_from_user(struct eva_kmd_arg __user *up,
+		struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+	struct eva_kmd_hfi_packet *u;
+	struct cvp_hfi_msg_session_hdr *hdr;
+
+	hdr = (struct cvp_hfi_msg_session_hdr *)pkt_hdr;
+
+	u = &up->data.hfi_pkt;
+
+	if (get_user(pkt_hdr->size, &u->pkt_data[0]))
+		return -EFAULT;
+
+	if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+		return -EFAULT;
+
+	if (get_pkt_index(pkt_hdr) < 0) {
+		dprintk(CVP_ERR, "user mode provides incorrect hfi\n");
+		goto set_default_pkt_hdr;
+	}
+
+	if (pkt_hdr->size > MAX_HFI_PKT_SIZE*sizeof(unsigned int)) {
+		dprintk(CVP_ERR, "user HFI packet too large %x\n",
+				pkt_hdr->size);
+		return -EINVAL;
+	}
+
+	return 0;
+
+set_default_pkt_hdr:
+	pkt_hdr->size = get_msg_size(hdr);
+	return 0;
+}
+
+static int _get_fence_pkt_hdr_from_user(struct eva_kmd_arg __user *up,
+		struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+	struct eva_kmd_hfi_synx_packet __user *u;
+
+	u = &up->data.hfi_synx_pkt;
+
+	if (get_user(pkt_hdr->size, &u->pkt_data[0]))
+		return -EFAULT;
+
+	if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+		return -EFAULT;
+
+	if (pkt_hdr->size > (MAX_HFI_PKT_SIZE*sizeof(unsigned int)))
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_pkt_from_user(struct eva_kmd_arg *kp,
+		struct eva_kmd_arg __user *up,
+		unsigned int start, unsigned int size)
+{
+	struct eva_kmd_hfi_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_pkt;
+	u = &up->data.hfi_pkt;
+	for (i = start; i < start + size; i++)
+		if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	if (get_user(k->oob_buf, &u->oob_buf))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int _copy_synx_data_from_user(
+	struct eva_kmd_hfi_synx_packet *k,
+	struct eva_kmd_hfi_synx_packet __user *u)
+{
+	int i;
+
+	for (i = 0; i < MAX_FENCE_DATA_SIZE; i++) {
+		if (get_user(k->fence_data[i], &u->fence_data[i]))
+			return -EFAULT;
+	}
+
+	if (get_user(k->oob_buf, &u->oob_buf))
+		return -EFAULT;
+
+	return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_fence_data_from_user_deprecate(
+	struct eva_kmd_hfi_fence_packet *k,
+	struct eva_kmd_hfi_fence_packet __user *u)
+{
+	int i;
+
+	for (i = 0; i < MAX_HFI_FENCE_SIZE; i++) {
+		if (get_user(k->fence_data[i], &u->fence_data[i]))
+			return -EFAULT;
+	}
+
+	if (get_user(k->frame_id, &u->frame_id)) {
+		dprintk(CVP_ERR, "Failed to get frame id from fence pkt\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int _copy_fence_pkt_from_user(struct eva_kmd_arg *kp,
+		struct eva_kmd_arg __user *up)
+{	struct eva_kmd_hfi_synx_packet *k;
+	struct eva_kmd_hfi_synx_packet __user *u;
+	struct eva_kmd_hfi_fence_packet __user *u1;
+	int i;
+
+	k = &kp->data.hfi_synx_pkt;
+	u = &up->data.hfi_synx_pkt;
+	u1 = &up->data.hfi_fence_pkt;
+
+	for (i = 0; i < MAX_HFI_PKT_SIZE; i++)
+		if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	if (get_user(k->fence_data[0], &u->fence_data[0]))
+		return -EFAULT;
+
+	if (k->fence_data[0] == 0xFEEDFACE)
+		return _copy_synx_data_from_user(k, u);
+	else
+		return _copy_fence_data_from_user_deprecate(
+				(struct eva_kmd_hfi_fence_packet *)k, u1);
+}
+
+static int _copy_frameid_from_user(struct eva_kmd_arg *kp,
+		struct eva_kmd_arg __user *up)
+{
+	if (get_user(kp->data.frame_id, &up->data.frame_id)) {
+		dprintk(CVP_ERR, "Failed to get frame id from user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int _copy_sysprop_from_user(struct eva_kmd_arg *kp,
+		struct eva_kmd_arg __user *up)
+{
+	struct eva_kmd_sys_properties *k, *u;
+
+	k = &kp->data.sys_properties;
+	u = &up->data.sys_properties;
+
+	if (get_user(k->prop_num, &u->prop_num))
+		return -EFAULT;
+
+	if (k->prop_num < 1 || k->prop_num > MAX_KMD_PROP_NUM_PER_PACKET) {
+		dprintk(CVP_ERR, "Num of prop out of range %d\n", k->prop_num);
+		return -EFAULT;
+	}
+
+	return _copy_pkt_from_user(kp, up, 1,
+		(k->prop_num * (sizeof(struct eva_kmd_sys_property) >> 2)));
+}
+
+static int _copy_pkt_to_user(struct eva_kmd_arg *kp,
+		struct eva_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct eva_kmd_hfi_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_pkt;
+	u = &up->data.hfi_pkt;
+	for (i = 0; i < size; i++)
+		if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	if (put_user(k->oob_buf, &u->oob_buf))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int _copy_fence_pkt_to_user(struct eva_kmd_arg *kp,
+		struct eva_kmd_arg __user *up)
+{
+	struct eva_kmd_hfi_synx_packet *k;
+	struct eva_kmd_hfi_synx_packet __user *u;
+	int i;
+
+	k = &kp->data.hfi_synx_pkt;
+	u = &up->data.hfi_synx_pkt;
+	for (i = 0; i < MAX_HFI_PKT_SIZE; i++) {
+		if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+	}
+
+	if (put_user(k->oob_buf, &u->oob_buf))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int _copy_sysprop_to_user(struct eva_kmd_arg *kp,
+		struct eva_kmd_arg __user *up)
+{
+	struct eva_kmd_sys_properties *k;
+	struct eva_kmd_sys_properties __user *u;
+	int i;
+
+	k = &kp->data.sys_properties;
+	u = &up->data.sys_properties;
+
+	for (i = 0; i < 8; i++)
+		if (put_user(k->prop_data[i].data, &u->prop_data[i].data))
+			return -EFAULT;
+
+	return 0;
+
+}
+
+static void print_hfi_short(struct eva_kmd_arg __user *up)
+{
+	struct eva_kmd_hfi_packet *pkt;
+	unsigned int words[5];
+
+	pkt = &up->data.hfi_pkt;
+	if (get_user(words[0], &up->type) ||
+			get_user(words[1], &up->buf_offset) ||
+			get_user(words[2], &up->buf_num) ||
+			get_user(words[3], &pkt->pkt_data[0]) ||
+			get_user(words[4], &pkt->pkt_data[1]))
+		dprintk(CVP_ERR, "Failed to print ioctl cmd\n");
+
+	dprintk(CVP_HFI, "IOCTL cmd type %#x, offset %d, num %d, pkt %d %#x\n",
+			words[0], words[1], words[2], words[3], words[4]);
+}
+
+static int _copy_session_ctrl_to_user(
+	struct eva_kmd_session_control *k,
+	struct eva_kmd_session_control *u)
+{
+	int i;
+
+	if (put_user(k->ctrl_type, &u->ctrl_type))
+		return -EFAULT;
+	for (i = 0; i < 8; i++)
+		if (put_user(k->ctrl_data[i], &u->ctrl_data[i]))
+			return -EFAULT;
+	return 0;
+}
+
+static int _get_session_ctrl_from_user(
+	struct eva_kmd_session_control *k,
+	struct eva_kmd_session_control *u)
+{
+	int i;
+
+	if (get_user(k->ctrl_type, &u->ctrl_type))
+		return -EFAULT;
+
+	for (i = 0; i < 8; i++)
+		if (get_user(k->ctrl_data[i], &u->ctrl_data[i]))
+			return -EFAULT;
+	return 0;
+}
+
+static int _get_session_info_from_user(
+	struct eva_kmd_session_info *k,
+	struct eva_kmd_session_info __user *u)
+{
+	int i;
+
+	if (get_user(k->session_id, &u->session_id))
+		return -EFAULT;
+
+	for (i = 0; i < 10; i++)
+		if (get_user(k->reserved[i], &u->reserved[i]))
+			return -EFAULT;
+	return 0;
+}
+
+static int convert_from_user(struct eva_kmd_arg *kp,
+		unsigned long arg,
+		struct msm_cvp_inst *inst)
+{
+	int rc = 0;
+	int i;
+	struct eva_kmd_arg __user *up = (struct eva_kmd_arg *)arg;
+	struct cvp_hal_session_cmd_pkt pkt_hdr;
+	int pkt_idx;
+
+	if (!kp || !up) {
+		dprintk_rl(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	print_hfi_short(up);
+
+	if (get_user(kp->type, &up->type))
+		return -EFAULT;
+
+	if (get_user(kp->buf_offset, &up->buf_offset) ||
+		get_user(kp->buf_num, &up->buf_num))
+		return -EFAULT;
+
+	switch (kp->type) {
+	case EVA_KMD_GET_SESSION_INFO:
+	{
+		struct eva_kmd_session_info *k;
+		struct eva_kmd_session_info __user *u;
+
+		k = &kp->data.session;
+		u = &up->data.session;
+		if (_get_session_info_from_user(k, u)) {
+			dprintk(CVP_ERR, "fail to get sess info\n");
+			return -EFAULT;
+		}
+
+		break;
+	}
+	case EVA_KMD_REGISTER_BUFFER:
+	{
+		struct eva_kmd_buffer *k, *u;
+
+		k = &kp->data.regbuf;
+		u = &up->data.regbuf;
+		if (get_user(k->type, &u->type) ||
+			get_user(k->index, &u->index) ||
+			get_user(k->fd, &u->fd) ||
+			get_user(k->size, &u->size) ||
+			get_user(k->offset, &u->offset) ||
+			get_user(k->pixelformat, &u->pixelformat) ||
+			get_user(k->flags, &u->flags))
+			return -EFAULT;
+		for (i = 0; i < 5; i++)
+			if (get_user(k->reserved[i], &u->reserved[i]))
+				return -EFAULT;
+		break;
+	}
+	case EVA_KMD_UNREGISTER_BUFFER:
+	{
+		struct eva_kmd_buffer *k, *u;
+
+		k = &kp->data.unregbuf;
+		u = &up->data.unregbuf;
+		if (get_user(k->type, &u->type) ||
+			get_user(k->index, &u->index) ||
+			get_user(k->fd, &u->fd) ||
+			get_user(k->size, &u->size) ||
+			get_user(k->offset, &u->offset) ||
+			get_user(k->pixelformat, &u->pixelformat) ||
+			get_user(k->flags, &u->flags))
+			return -EFAULT;
+		for (i = 0; i < 5; i++)
+			if (get_user(k->reserved[i], &u->reserved[i]))
+				return -EFAULT;
+		break;
+	}
+	case EVA_KMD_SEND_CMD_PKT:
+	{
+		if (_get_pkt_hdr_from_user(up, &pkt_hdr)) {
+			dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+				kp->type, pkt_hdr.size, pkt_hdr.packet_type);
+			return -EFAULT;
+		}
+
+		rc = _copy_pkt_from_user(kp, up, 0, (pkt_hdr.size >> 2));
+		break;
+	}
+	case EVA_KMD_SEND_FENCE_CMD_PKT:
+	{
+		if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr)) {
+			dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+				kp->type, pkt_hdr.size, pkt_hdr.packet_type);
+			return -EFAULT;
+		}
+		dprintk(CVP_HFI, "system call cmd pkt: %d 0x%x\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+
+		pkt_idx = get_pkt_index(&pkt_hdr);
+		if (pkt_idx < 0) {
+			dprintk(CVP_ERR, "%s incorrect packet %d, %x\n",
+				__func__,
+				pkt_hdr.size,
+				pkt_hdr.packet_type);
+			return -EFAULT;
+		}
+
+		rc = _copy_fence_pkt_from_user(kp, up);
+		break;
+	}
+	case EVA_KMD_RECEIVE_MSG_PKT:
+		break;
+	case EVA_KMD_SESSION_CONTROL:
+	{
+		struct eva_kmd_session_control *k, *u;
+
+		k = &kp->data.session_ctrl;
+		u = &up->data.session_ctrl;
+
+		rc = _get_session_ctrl_from_user(k, u);
+		break;
+	}
+	case EVA_KMD_GET_SYS_PROPERTY:
+	{
+		if (_copy_sysprop_from_user(kp, up)) {
+			dprintk(CVP_ERR, "Failed to get sysprop from user\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	case EVA_KMD_SET_SYS_PROPERTY:
+	{
+		if (_copy_sysprop_from_user(kp, up)) {
+			dprintk(CVP_ERR, "Failed to set sysprop from user\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	case EVA_KMD_FLUSH_ALL:
+	case EVA_KMD_UPDATE_POWER:
+		break;
+	case EVA_KMD_FLUSH_FRAME:
+	{
+		if (_copy_frameid_from_user(kp, up))
+			return -EFAULT;
+		break;
+	}
+	default:
+		dprintk_rl(CVP_ERR, "%s: unknown cmd type 0x%x\n",
+			__func__, kp->type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int _put_user_session_info(
+		struct eva_kmd_session_info *k,
+		struct eva_kmd_session_info __user *u)
+{
+	int i;
+
+	if (put_user(k->session_id, &u->session_id))
+		return -EFAULT;
+
+	for (i = 0; i < 10; i++)
+		if (put_user(k->reserved[i], &u->reserved[i]))
+			return -EFAULT;
+
+	return 0;
+}
+
+static int convert_to_user(struct eva_kmd_arg *kp, unsigned long arg)
+{
+	int rc = 0;
+	int i, size;
+	struct eva_kmd_arg __user *up = (struct eva_kmd_arg *)arg;
+	struct cvp_hal_session_cmd_pkt pkt_hdr;
+
+	if (!kp || !up) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (put_user(kp->type, &up->type))
+		return -EFAULT;
+
+	switch (kp->type) {
+	case EVA_KMD_RECEIVE_MSG_PKT:
+	{
+		struct eva_kmd_hfi_packet *k, *u;
+		struct cvp_hfi_msg_session_hdr *hdr;
+
+		k = &kp->data.hfi_pkt;
+		u = &up->data.hfi_pkt;
+		hdr = (struct cvp_hfi_msg_session_hdr *)k;
+		size = get_msg_size(hdr) >> 2;
+		for (i = 0; i < size; i++)
+			if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+				return -EFAULT;
+		break;
+	}
+	case EVA_KMD_GET_SESSION_INFO:
+	{
+		struct eva_kmd_session_info *k;
+		struct eva_kmd_session_info __user *u;
+
+		k = &kp->data.session;
+		u = &up->data.session;
+		if (_put_user_session_info(k, u)) {
+			dprintk(CVP_ERR, "fail to copy sess info to user\n");
+			return -EFAULT;
+		}
+
+		break;
+	}
+	case EVA_KMD_REGISTER_BUFFER:
+	{
+		struct eva_kmd_buffer *k, *u;
+
+		k = &kp->data.regbuf;
+		u = &up->data.regbuf;
+		if (put_user(k->type, &u->type) ||
+			put_user(k->index, &u->index) ||
+			put_user(k->fd, &u->fd) ||
+			put_user(k->size, &u->size) ||
+			put_user(k->offset, &u->offset) ||
+			put_user(k->pixelformat, &u->pixelformat) ||
+			put_user(k->flags, &u->flags))
+			return -EFAULT;
+		for (i = 0; i < 5; i++)
+			if (put_user(k->reserved[i], &u->reserved[i]))
+				return -EFAULT;
+		break;
+	}
+	case EVA_KMD_UNREGISTER_BUFFER:
+	{
+		struct eva_kmd_buffer *k, *u;
+
+		k = &kp->data.unregbuf;
+		u = &up->data.unregbuf;
+		if (put_user(k->type, &u->type) ||
+			put_user(k->index, &u->index) ||
+			put_user(k->fd, &u->fd) ||
+			put_user(k->size, &u->size) ||
+			put_user(k->offset, &u->offset) ||
+			put_user(k->pixelformat, &u->pixelformat) ||
+			put_user(k->flags, &u->flags))
+			return -EFAULT;
+		for (i = 0; i < 5; i++)
+			if (put_user(k->reserved[i], &u->reserved[i]))
+				return -EFAULT;
+		break;
+	}
+	case EVA_KMD_SEND_CMD_PKT:
+	{
+		if (_get_pkt_hdr_from_user(up, &pkt_hdr))
+			return -EFAULT;
+
+		dprintk(CVP_HFI, "Send user cmd pkt: %d %d\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_pkt_to_user(kp, up, (pkt_hdr.size >> 2));
+		break;
+	}
+	case EVA_KMD_SEND_FENCE_CMD_PKT:
+	{
+		if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr))
+			return -EFAULT;
+
+		dprintk(CVP_HFI, "Send user cmd pkt: %d %d\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+
+		rc = _copy_fence_pkt_to_user(kp, up);
+		break;
+	}
+	case EVA_KMD_SESSION_CONTROL:
+	{
+		struct eva_kmd_session_control *k, *u;
+
+		k = &kp->data.session_ctrl;
+		u = &up->data.session_ctrl;
+		rc = _copy_session_ctrl_to_user(k, u);
+		break;
+	}
+	case EVA_KMD_GET_SYS_PROPERTY:
+	{
+		if (_copy_sysprop_to_user(kp, up)) {
+			dprintk(CVP_ERR, "Fail to copy sysprop to user\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	case EVA_KMD_FLUSH_ALL:
+	case EVA_KMD_FLUSH_FRAME:
+	case EVA_KMD_SET_SYS_PROPERTY:
+	case EVA_KMD_UPDATE_POWER:
+		break;
+	default:
+		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
+			__func__, kp->type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static long cvp_ioctl(struct msm_cvp_inst *inst,
+	unsigned int cmd, unsigned long arg)
+{
+	int rc;
+	struct eva_kmd_arg *karg;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	karg = kzalloc(sizeof(*karg), GFP_KERNEL);
+	if (!karg)
+		return -ENOMEM;
+
+	if (convert_from_user(karg, arg, inst)) {
+		dprintk_rl(CVP_ERR, "%s: failed to get from user cmd %x\n",
+			__func__, karg->type);
+		kfree(karg);
+		return -EFAULT;
+	}
+
+	rc = msm_cvp_private((void *)inst, cmd, karg);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: failed cmd type %x %d\n",
+			__func__, karg->type, rc);
+		kfree(karg);
+		return rc;
+	}
+
+	if (convert_to_user(karg, arg)) {
+		dprintk(CVP_ERR, "%s: failed to copy to user cmd %x\n",
+			__func__, karg->type);
+		kfree(karg);
+		return -EFAULT;
+	}
+
+	kfree(karg);
+	return rc;
+}
+
+long cvp_unblocked_ioctl(struct file *filp,
+		unsigned int cmd, unsigned long arg)
+{
+	struct msm_cvp_inst *inst;
+
+	if (!filp || !filp->private_data) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	inst = filp->private_data;
+	return cvp_ioctl(inst, cmd, arg);
+}
+
+long cvp_compat_ioctl(struct file *filp,
+		unsigned int cmd, unsigned long arg)
+{
+	struct msm_cvp_inst *inst;
+
+	if (!filp || !filp->private_data) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	inst = filp->private_data;
+	return cvp_ioctl(inst, cmd, (unsigned long)compat_ptr(arg));
+}

+ 1042 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_platform.c

@@ -0,0 +1,1042 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include <soc/qcom/of_common.h>
+#include "msm_cvp_internal.h"
+#include "msm_cvp_debug.h"
+#include "cvp_hfi_api.h"
+#include "cvp_hfi.h"
+
+#define UBWC_CONFIG(mco, mlo, hbo, bslo, bso, rs, mc, ml, hbb, bsl, bsp) \
+{	\
+	.override_bit_info.max_channel_override = mco,	\
+	.override_bit_info.mal_length_override = mlo,	\
+	.override_bit_info.hb_override = hbo,	\
+	.override_bit_info.bank_swzl_level_override = bslo,	\
+	.override_bit_info.bank_spreading_override = bso,	\
+	.override_bit_info.reserved = rs,	\
+	.max_channels = mc,	\
+	.mal_length = ml,	\
+	.highest_bank_bit = hbb,	\
+	.bank_swzl_level = bsl,	\
+	.bank_spreading = bsp,	\
+}
+
+static struct msm_cvp_common_data default_common_data[] = {
+	{
+		.key = "qcom,auto-pil",
+		.value = 1,
+	},
+};
+
+static struct msm_cvp_common_data sm8450_common_data[] = {
+	{
+		.key = "qcom,pm-qos-latency-us",
+		.value = 50,
+	},
+	{
+		.key = "qcom,sw-power-collapse",
+		.value = 1,
+	},
+	{
+		.key = "qcom,domain-attr-non-fatal-faults",
+		.value = 1,
+	},
+	{
+		.key = "qcom,max-secure-instances",
+		.value = 2,             /*
+					 * As per design driver allows 3rd
+					 * instance as well since the secure
+					 * flags were updated later for the
+					 * current instance. Hence total
+					 * secure sessions would be
+					 * max-secure-instances + 1.
+					 */
+	},
+	{
+		.key = "qcom,max-ssr-allowed",
+		.value = 1,		/*
+					 * Maxinum number of SSR before BUG_ON
+					 */
+	},
+	{
+		.key = "qcom,power-collapse-delay",
+		.value = 3000,
+	},
+	{
+		.key = "qcom,hw-resp-timeout",
+		.value = 2000,
+	},
+	{
+		.key = "qcom,dsp-resp-timeout",
+		.value = 1000,
+	},
+	{
+		.key = "qcom,debug-timeout",
+		.value = 0,
+	},
+	{
+		.key = "qcom,dsp-enabled",
+		.value = 1,
+	}
+};
+
+static struct msm_cvp_common_data sm8550_common_data[] = {
+	{
+		.key = "qcom,pm-qos-latency-us",
+		.value = 50,
+	},
+	{
+		.key = "qcom,sw-power-collapse",
+		.value = 1,
+	},
+	{
+		.key = "qcom,domain-attr-non-fatal-faults",
+		.value = 0,
+	},
+	{
+		.key = "qcom,max-secure-instances",
+		.value = 2,             /*
+					 * As per design driver allows 3rd
+					 * instance as well since the secure
+					 * flags were updated later for the
+					 * current instance. Hence total
+					 * secure sessions would be
+					 * max-secure-instances + 1.
+					 */
+	},
+	{
+		.key = "qcom,max-ssr-allowed",
+		.value = 1,		/*
+					 * Maxinum number of SSR before BUG_ON
+					 */
+	},
+	{
+		.key = "qcom,power-collapse-delay",
+		.value = 3000,
+	},
+	{
+		.key = "qcom,hw-resp-timeout",
+		.value = 2000,
+	},
+	{
+		.key = "qcom,dsp-resp-timeout",
+		.value = 1000,
+	},
+	{
+		.key = "qcom,debug-timeout",
+		.value = 0,
+	},
+	{
+		.key = "qcom,dsp-enabled",
+		.value = 1,
+	}
+};
+
+static struct msm_cvp_common_data sm8550_tvm_common_data[] = {
+	{
+		.key = "qcom,pm-qos-latency-us",
+		.value = 50,
+	},
+	{
+		.key = "qcom,sw-power-collapse",
+		.value = 0,
+	},
+	{
+		.key = "qcom,domain-attr-non-fatal-faults",
+		.value = 0,
+	},
+	{
+		.key = "qcom,max-secure-instances",
+		.value = 2,             /*
+					 * As per design driver allows 3rd
+					 * instance as well since the secure
+					 * flags were updated later for the
+					 * current instance. Hence total
+					 * secure sessions would be
+					 * max-secure-instances + 1.
+					 */
+	},
+	{
+		.key = "qcom,max-ssr-allowed",
+		.value = 1,		/*
+					 * Maxinum number of SSR before BUG_ON
+					 */
+	},
+	{
+		.key = "qcom,power-collapse-delay",
+		.value = 3000,
+	},
+	{
+		.key = "qcom,hw-resp-timeout",
+		.value = 2000,
+	},
+	{
+		.key = "qcom,dsp-resp-timeout",
+		.value = 1000,
+	},
+	{
+		.key = "qcom,debug-timeout",
+		.value = 0,
+	},
+	{
+		.key = "qcom,dsp-enabled",
+		.value = 0,
+	}
+};
+
+static struct msm_cvp_common_data sm8650_common_data[] = {
+	{
+		.key = "qcom,pm-qos-latency-us",
+		.value = 50,
+	},
+	{
+		.key = "qcom,sw-power-collapse",
+		.value = 1,
+	},
+	{
+		.key = "qcom,domain-attr-non-fatal-faults",
+		.value = 0,
+	},
+	{
+		.key = "qcom,max-secure-instances",
+		.value = 2, /*
+					* As per design driver allows 3rd
+					* instance as well since the secure
+					* flags were updated later for the
+					* current instance. Hence total
+					* secure sessions would be
+					* max-secure-instances + 1.
+					*/
+	},
+	{
+		.key = "qcom,max-ssr-allowed",
+		.value = 1,	/*
+					* Maxinum number of SSR before BUG_ON
+					*/
+	},
+	{
+		.key = "qcom,power-collapse-delay",
+		.value = 3000,
+	},
+	{
+		.key = "qcom,hw-resp-timeout",
+		.value = 2000,
+	},
+	{
+		.key = "qcom,dsp-resp-timeout",
+		.value = 1000,
+	},
+	{
+		.key = "qcom,debug-timeout",
+		.value = 0,
+	},
+	{
+		.key = "qcom,dsp-enabled",
+		.value = 1,
+	},
+	{
+		.key = "qcom,qos_noc_rge_niu_offset",
+		.value = 0x0,
+	},
+	{
+		.key = "qcom,qos_noc_gce_vadl_tof_niu_offset",
+		.value = 0x0,
+	},
+	{
+		.key = "qcom,qos_noc_cdm_niu_offset",
+		.value = 0x0,
+	},
+	{
+		.key = "qcom,noc_core_err_offset",
+		.value = 0xA000,
+	},
+	{
+		.key = "qcom,noc_main_sidebandmanager_offset",
+		.value = 0x6E00,
+	}
+};
+
+static struct msm_cvp_common_data sm7650_common_data[] = {
+	{
+		.key = "qcom,pm-qos-latency-us",
+		.value = 50,
+	},
+	{
+		.key = "qcom,sw-power-collapse",
+		.value = 1,
+	},
+	{
+		.key = "qcom,domain-attr-non-fatal-faults",
+		.value = 0,
+	},
+	{
+		.key = "qcom,max-secure-instances",
+		.value = 2,
+	},
+	{
+		.key = "qcom,max-ssr-allowed",
+		.value = 1,
+	},
+	{
+		.key = "qcom,power-collapse-delay",
+		.value = 3000,
+	},
+	{
+		.key = "qcom,hw-resp-timeout",
+		.value = 2000,
+	},
+	{
+		.key = "qcom,dsp-resp-timeout",
+		.value = 1000,
+	},
+	{
+		.key = "qcom,debug-timeout",
+		.value = 0,
+	},
+	{
+		.key = "qcom,dsp-enabled",
+		.value = 1,
+	},
+	{
+		.key = "qcom,qos_noc_rge_niu_offset",
+		.value = 0x200,
+	},
+	{
+		.key = "qcom,qos_noc_gce_vadl_tof_niu_offset",
+		.value = 0xE00,
+	},
+	{
+		.key = "qcom,qos_noc_cdm_niu_offset",
+		.value = 0x1A00,
+	},
+	{
+		.key = "qcom,noc_core_err_offset",
+		.value = 0x0,
+	},
+	{
+		.key = "qcom,noc_main_sidebandmanager_offset",
+		.value = 0x0,
+	}
+};
+
+/* Default UBWC config for LPDDR5 */
+static struct msm_cvp_ubwc_config_data kona_ubwc_data[] = {
+	UBWC_CONFIG(1, 1, 1, 0, 0, 0, 8, 32, 16, 0, 0),
+};
+
+static struct msm_cvp_qos_setting waipio_noc_qos = {
+	.axi_qos = 0x99,
+	.prioritylut_low = 0x22222222,
+	.prioritylut_high = 0x33333333,
+	.urgency_low = 0x1022,
+	.dangerlut_low = 0x0,
+	.safelut_low = 0xffff,
+};
+
+static struct msm_cvp_qos_setting lanai_noc_qos = {
+	.axi_qos = 0x99,
+	.prioritylut_low = 0x33333333,
+	.prioritylut_high = 0x33333333,
+	.urgency_low = 0x1033,
+	.urgency_low_ro = 0x1003,
+	.dangerlut_low = 0x0,
+	.safelut_low = 0xffff,
+};
+
+static struct msm_cvp_qos_setting palawan_noc_qos = {
+	.axi_qos = 0x99,
+	.prioritylut_low = 0x33333333,
+	.prioritylut_high = 0x33333333,
+	.urgency_low = 0x1003,
+	.urgency_low_ro = 0x1003,
+	.dangerlut_low = 0x0,
+	.safelut_low = 0xffff,
+};
+
+static struct msm_cvp_platform_data default_data = {
+	.common_data = default_common_data,
+	.common_data_length =  ARRAY_SIZE(default_common_data),
+	.sku_version = 0,
+	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = 0x0,
+	.noc_qos = 0x0,
+	.vm_id = 1,
+};
+
+static struct msm_cvp_platform_data sm8450_data = {
+	.common_data = sm8450_common_data,
+	.common_data_length =  ARRAY_SIZE(sm8450_common_data),
+	.sku_version = 0,
+	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = kona_ubwc_data,
+	.noc_qos = &waipio_noc_qos,
+	.vm_id = 1,
+};
+
+static struct msm_cvp_platform_data sm8550_data = {
+	.common_data = sm8550_common_data,
+	.common_data_length =  ARRAY_SIZE(sm8550_common_data),
+	.sku_version = 0,
+	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = kona_ubwc_data,	/*Reuse Kona setting*/
+	.noc_qos = &waipio_noc_qos,	/*Reuse Waipio setting*/
+	.vm_id = 1,
+};
+
+static struct msm_cvp_platform_data sm8550_tvm_data = {
+	.common_data = sm8550_tvm_common_data,
+	.common_data_length =  ARRAY_SIZE(sm8550_tvm_common_data),
+	.sku_version = 0,
+	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = kona_ubwc_data,	/*Reuse Kona setting*/
+	.noc_qos = &waipio_noc_qos,	/*Reuse Waipio setting*/
+	.vm_id = 2,
+};
+
+static struct msm_cvp_platform_data sm8650_data = {
+	.common_data = sm8650_common_data,
+	.common_data_length = ARRAY_SIZE(sm8650_common_data),
+	.sku_version = 0,
+	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = kona_ubwc_data,	/*Reuse Kona setting*/
+	.noc_qos = &lanai_noc_qos,
+	.vm_id = 1,
+};
+
+static struct msm_cvp_platform_data sm7650_data = {
+	.common_data = sm7650_common_data,
+	.common_data_length = ARRAY_SIZE(sm7650_common_data),
+	.sku_version = 0,
+	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = kona_ubwc_data,	/*Reuse Kona setting*/
+	.noc_qos = &palawan_noc_qos,
+	.vm_id = 1,
+};
+
+static const struct of_device_id msm_cvp_dt_match[] = {
+	{
+		.compatible = "qcom,waipio-cvp",
+		.data = &sm8450_data,
+	},
+	{
+		.compatible = "qcom,kalama-cvp",
+		.data = &sm8550_data,
+	},
+	{
+		.compatible = "qcom,kalama-cvp-tvm",
+		.data = &sm8550_tvm_data,
+	},
+	{
+		.compatible = "qcom,pineapple-cvp",
+		.data = &sm8650_data,
+	},
+	{
+		.compatible = "qcom,cliffs-cvp",
+		.data = &sm7650_data,
+	},
+	{},
+};
+
+/*
+ * WARN: name field CAN NOT hold more than 23 chars
+ *	 excluding the ending '\0'
+ *
+ * NOTE: the def entry index for the command packet is
+ *	 "the packet type - HFI_CMD_SESSION_CVP_START"
+ */
+const struct msm_cvp_hfi_defs cvp_hfi_defs[MAX_PKT_IDX] = {
+	[HFI_CMD_SESSION_CVP_DFS_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DFS_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_DFS_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "DFS",
+		},
+	[HFI_CMD_SESSION_CVP_DFS_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DFS_FRAME_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_DFS_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "DFS_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_SGM_OF_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_SGM_OF_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "SGM_OF",
+		},
+	[HFI_CMD_SESSION_CVP_SGM_OF_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_SGM_OF_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "SGM_OF_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_WARP_NCC_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_WARP_NCC_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "WARP_NCC",
+		},
+	[HFI_CMD_SESSION_CVP_WARP_NCC_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_WARP_NCC_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "WARP_NCC_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_WARP_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_WARP_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "WARP",
+		},
+	[HFI_CMD_SESSION_CVP_WARP_DS_PARAMS - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_WARP_DS_PARAMS,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "WARP_DS_PARAMS",
+		},
+	[HFI_CMD_SESSION_CVP_WARP_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_WARP_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "WARP_FRAME",
+			.force_kernel_fence = true,
+		},
+	[HFI_CMD_SESSION_CVP_DMM_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DMM_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_DMM_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "DMM",
+		},
+	[HFI_CMD_SESSION_CVP_DMM_PARAMS - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_DMM_PARAMS,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "DMM_PARAMS",
+		},
+	[HFI_CMD_SESSION_CVP_DMM_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DMM_FRAME_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_DMM_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "DMM_FRAME",
+			.force_kernel_fence = true,
+		},
+	[HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_PERSIST_CMD_SIZE,
+			.type =HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "SET_PERSIST",
+		},
+	[HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xffffffff,
+			.type =HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "REL_PERSIST",
+		},
+	[HFI_CMD_SESSION_CVP_DS_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DS_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_DS_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "DS_CONFIG",
+		},
+	[HFI_CMD_SESSION_CVP_DS - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DS_CMD_SIZE,
+			.type =HFI_CMD_SESSION_CVP_DS,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "DS",
+		},
+	[HFI_CMD_SESSION_CVP_CV_TME_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_OF_CONFIG_CMD_SIZE,
+			.type =HFI_CMD_SESSION_CVP_CV_TME_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "TME",
+		},
+	[HFI_CMD_SESSION_CVP_CV_TME_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_OF_FRAME_CMD_SIZE,
+			.type =HFI_CMD_SESSION_CVP_CV_TME_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "TME_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_CV_ODT_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_ODT_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_CV_ODT_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "ODT",
+		},
+	[HFI_CMD_SESSION_CVP_CV_ODT_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_ODT_FRAME_CMD_SIZE,
+			.type =HFI_CMD_SESSION_CVP_CV_ODT_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "ODT_FRAME",
+		},
+	[HFI_CMD_SESSION_CVP_CV_OD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_OD_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_CV_OD_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "OD",
+		},
+	[HFI_CMD_SESSION_CVP_CV_OD_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_OD_FRAME_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_CV_OD_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "OD_FRAME",
+		},
+	[HFI_CMD_SESSION_CVP_NCC_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_NCC_CONFIG_CMD_SIZE,
+			.type =HFI_CMD_SESSION_CVP_NCC_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "NCC",
+		},
+	[HFI_CMD_SESSION_CVP_NCC_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_NCC_FRAME_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_NCC_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "NCC_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_ICA_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_ICA_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_ICA_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "ICA",
+		},
+	[HFI_CMD_SESSION_CVP_ICA_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_ICA_FRAME_CMD_SIZE,
+			.type =HFI_CMD_SESSION_CVP_ICA_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "ICA_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_HCD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_HCD_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_HCD_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "HCD",
+		},
+	[HFI_CMD_SESSION_CVP_HCD_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_HCD_FRAME_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_HCD_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "HCD_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_DC_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DCM_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_DC_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "DC",
+		},
+	[HFI_CMD_SESSION_CVP_DC_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DCM_FRAME_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_DC_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "DC_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_DCM_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DCM_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_DCM_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "DCM",
+		},
+	[HFI_CMD_SESSION_CVP_DCM_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_DCM_FRAME_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_DCM_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "DCM_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_PYS_HCD_CONFIG_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "PYS_HCD",
+		},
+	[HFI_CMD_SESSION_CVP_PYS_HCD_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = HFI_PYS_HCD_FRAME_CMD_SIZE,
+			.type = HFI_CMD_SESSION_CVP_PYS_HCD_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "PYS_HCD_FRAME",
+			.force_kernel_fence = true,
+		},
+	[HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "SET_MODEL",
+		},
+	[HFI_CMD_SESSION_CVP_SET_SNAPSHOT_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_SET_SNAPSHOT_BUFFERS,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "SET_SNAPSHOT",
+		},
+	[HFI_CMD_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "REL_SNAPSHOT",
+		},
+	[HFI_CMD_SESSION_CVP_SET_SNAPSHOT_MODE - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_SET_SNAPSHOT_MODE,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "SNAPSHOT_MODE",
+		},
+	[HFI_CMD_SESSION_CVP_SNAPSHOT_WRITE_DONE - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_SNAPSHOT_WRITE_DONE,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "SNAPSHOT_DONE",
+		},
+	[HFI_CMD_SESSION_CVP_FD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_FD_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "FD",
+		},
+	[HFI_CMD_SESSION_CVP_FD_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_FD_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "FD_FRAME",
+		},
+	[HFI_CMD_SESSION_CVP_XRA_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_XRA_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "XRA_FRAME",
+		},
+	[HFI_CMD_SESSION_CVP_XRA_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_XRA_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "XRA_CONFIG",
+		},
+	[HFI_CMD_SESSION_CVP_XRA_BLOB_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_XRA_BLOB_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "XRA_BLOB_FRAME",
+		},
+	[HFI_CMD_SESSION_CVP_XRA_BLOB_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_XRA_BLOB_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "XRA_BLOB_CONFIG",
+		},
+	[HFI_CMD_SESSION_CVP_XRA_PATCH_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_XRA_PATCH_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "XRA_PATCH_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_XRA_PATCH_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_XRA_PATCH_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "XRA_PATCH_CONFIG",
+		},
+	[HFI_CMD_SESSION_CVP_XRA_MATCH_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_XRA_MATCH_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "XRA_MATCH_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_CVP_XRA_MATCH_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_XRA_MATCH_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "XRA_MATCH_CONFIG",
+		},
+	[HFI_CMD_SESSION_CVP_RGE_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_RGE_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "RGE_FRAME",
+			.force_kernel_fence = true,
+		},
+	[HFI_CMD_SESSION_CVP_RGE_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_RGE_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "RGE_CONFIG",
+		},
+	[HFI_CMD_SESSION_EVA_ITOF_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_EVA_ITOF_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "ITOF_FRAME",
+			.force_kernel_fence = true,
+		},
+	[HFI_CMD_SESSION_EVA_ITOF_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_EVA_ITOF_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "ITOF_CONFIG",
+		},
+	[HFI_CMD_SESSION_EVA_DLFD_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_EVA_DLFD_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "DLFD_FRAME",
+		},
+	[HFI_CMD_SESSION_EVA_DLFD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_EVA_DLFD_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "DLFD_CONFIG",
+		},
+	[HFI_CMD_SESSION_EVA_DLFL_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_EVA_DLFL_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "DLFL_FRAME",
+			.force_kernel_fence = false,
+		},
+	[HFI_CMD_SESSION_EVA_DLFL_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_EVA_DLFL_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "DLFL_CONFIG",
+		},
+	[HFI_CMD_SESSION_CVP_SYNX - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_CVP_SYNX,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "SYNX_TEST",
+		},
+	[HFI_CMD_SESSION_EVA_DME_ONLY_CONFIG - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_EVA_DME_ONLY_CONFIG,
+			.is_config_pkt = true,
+			.resp = HAL_NO_RESP,
+			.name = "DME_CONFIG",
+		},
+	[HFI_CMD_SESSION_EVA_DME_ONLY_FRAME - HFI_CMD_SESSION_CVP_START] =
+		{
+			.size = 0xFFFFFFFF,
+			.type = HFI_CMD_SESSION_EVA_DME_ONLY_FRAME,
+			.is_config_pkt = false,
+			.resp = HAL_NO_RESP,
+			.name = "DME_FRAME",
+			.force_kernel_fence = true,
+		},
+
+};
+
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr)
+{
+	if (!hdr || (hdr->packet_type < HFI_CMD_SESSION_CVP_START)
+		|| hdr->packet_type >= (HFI_CMD_SESSION_CVP_START + MAX_PKT_IDX))
+		return -EINVAL;
+
+	if (cvp_hfi_defs[hdr->packet_type - HFI_CMD_SESSION_CVP_START].size)
+		return (hdr->packet_type - HFI_CMD_SESSION_CVP_START);
+
+	return -EINVAL;
+}
+
+int get_pkt_fenceoverride(struct cvp_hal_session_cmd_pkt* hdr)
+{
+	return cvp_hfi_defs[hdr->packet_type - HFI_CMD_SESSION_CVP_START].force_kernel_fence;
+}
+
+
+
+int get_pkt_index_from_type(u32 pkt_type)
+{
+	if ((pkt_type < HFI_CMD_SESSION_CVP_START) ||
+		pkt_type >= (HFI_CMD_SESSION_CVP_START + MAX_PKT_IDX))
+		return -EINVAL;
+
+	if (cvp_hfi_defs[pkt_type - HFI_CMD_SESSION_CVP_START].size)
+		return (pkt_type - HFI_CMD_SESSION_CVP_START);
+
+	return -EINVAL;
+}
+MODULE_DEVICE_TABLE(of, msm_cvp_dt_match);
+
+int cvp_of_fdt_get_ddrtype(void)
+{
+#ifdef FIXED_DDR_TYPE
+	/* of_fdt_get_ddrtype() is usually unavailable during pre-sil */
+	return DDR_TYPE_LPDDR5;
+#else
+	return of_fdt_get_ddrtype();
+#endif
+}
+
+void *cvp_get_drv_data(struct device *dev)
+{
+	struct msm_cvp_platform_data *driver_data;
+	const struct of_device_id *match;
+	uint32_t ddr_type = DDR_TYPE_LPDDR5;
+
+	driver_data = &default_data;
+
+	if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
+		goto exit;
+
+	match = of_match_node(msm_cvp_dt_match, dev->of_node);
+
+	if (!match)
+		return NULL;
+
+	driver_data = (struct msm_cvp_platform_data *)match->data;
+
+	if (!strcmp(match->compatible, "qcom,waipio-cvp")) {
+		ddr_type = cvp_of_fdt_get_ddrtype();
+		if (ddr_type == -ENOENT) {
+			dprintk(CVP_ERR,
+				"Failed to get ddr type, use LPDDR5\n");
+		}
+
+		if (driver_data->ubwc_config &&
+			(ddr_type == DDR_TYPE_LPDDR4 ||
+			ddr_type == DDR_TYPE_LPDDR4X))
+			driver_data->ubwc_config->highest_bank_bit = 15;
+		dprintk(CVP_CORE, "DDR Type 0x%x hbb 0x%x\n",
+			ddr_type, driver_data->ubwc_config ?
+			driver_data->ubwc_config->highest_bank_bit : -1);
+	}
+exit:
+	return driver_data;
+}

+ 1265 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_res_parse.c

@@ -0,0 +1,1265 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/of_reserved_mem.h>
+#include "msm_cvp_debug.h"
+#include "msm_cvp_resources.h"
+#include "msm_cvp_res_parse.h"
+#include "cvp_core_hfi.h"
+#include "soc/qcom/secure_buffer.h"
+
+enum clock_properties {
+	CLOCK_PROP_HAS_SCALING = 1 << 0,
+	CLOCK_PROP_HAS_MEM_RETENTION    = 1 << 1,
+};
+
+#define PERF_GOV "performance"
+
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+	return NULL;
+}
+
+static size_t get_u32_array_num_elements(struct device_node *np,
+					char *name)
+{
+	int len;
+	size_t num_elements = 0;
+
+	if (!of_get_property(np, name, &len)) {
+		dprintk(CVP_ERR, "Failed to read %s from device tree\n",
+			name);
+		goto fail_read;
+	}
+
+	num_elements = len / sizeof(u32);
+	if (num_elements <= 0) {
+		dprintk(CVP_ERR, "%s not specified in device tree\n",
+			name);
+		goto fail_read;
+	}
+	return num_elements;
+
+fail_read:
+	return 0;
+}
+
+static inline void msm_cvp_free_allowed_clocks_table(
+		struct msm_cvp_platform_resources *res)
+{
+	res->allowed_clks_tbl = NULL;
+}
+
+static inline void msm_cvp_free_cycles_per_mb_table(
+		struct msm_cvp_platform_resources *res)
+{
+	res->clock_freq_tbl.clk_prof_entries = NULL;
+}
+
+static inline void msm_cvp_free_reg_table(
+			struct msm_cvp_platform_resources *res)
+{
+	res->reg_set.reg_tbl = NULL;
+}
+
+static inline void msm_cvp_free_qdss_addr_table(
+			struct msm_cvp_platform_resources *res)
+{
+	res->qdss_addr_set.addr_tbl = NULL;
+}
+
+static inline void msm_cvp_free_bus_vectors(
+			struct msm_cvp_platform_resources *res)
+{
+	kfree(res->bus_set.bus_tbl);
+	res->bus_set.bus_tbl = NULL;
+	res->bus_set.count = 0;
+}
+
+static inline void msm_cvp_free_regulator_table(
+			struct msm_cvp_platform_resources *res)
+{
+	int c = 0;
+
+	for (c = 0; c < res->regulator_set.count; ++c) {
+		struct regulator_info *rinfo =
+			&res->regulator_set.regulator_tbl[c];
+
+		rinfo->name = NULL;
+	}
+
+	res->regulator_set.regulator_tbl = NULL;
+	res->regulator_set.count = 0;
+}
+
+static inline void msm_cvp_free_clock_table(
+			struct msm_cvp_platform_resources *res)
+{
+	res->clock_set.clock_tbl = NULL;
+	res->clock_set.count = 0;
+}
+
+void msm_cvp_free_platform_resources(
+			struct msm_cvp_platform_resources *res)
+{
+	msm_cvp_free_clock_table(res);
+	msm_cvp_free_regulator_table(res);
+	msm_cvp_free_allowed_clocks_table(res);
+	msm_cvp_free_reg_table(res);
+	msm_cvp_free_qdss_addr_table(res);
+	msm_cvp_free_bus_vectors(res);
+}
+
+static int msm_cvp_load_ipcc_regs(struct msm_cvp_platform_resources *res)
+{
+	int ret = 0;
+	unsigned int reg_config[2];
+	struct platform_device *pdev = res->pdev;
+
+	ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,ipcc-reg",
+				reg_config, 2);
+	if (ret) {
+		dprintk(CVP_ERR, "Failed to read ipcc reg: %d\n", ret);
+		return ret;
+	}
+
+	res->ipcc_reg_base = reg_config[0];
+	res->ipcc_reg_size = reg_config[1];
+
+	dprintk(CVP_CORE,
+		"ipcc reg_base = %x, reg_size = %x\n",
+		res->ipcc_reg_base,
+		res->ipcc_reg_size
+	);
+
+	return ret;
+}
+
+static int msm_cvp_load_regspace_mapping(struct msm_cvp_platform_resources *res)
+{
+	int ret = 0;
+	unsigned int ipclite_mapping_config[3] = {0};
+	unsigned int hwmutex_mapping_config[3] = {0};
+	unsigned int aon_mapping_config[3] = {0};
+	unsigned int timer_config[3] = {0};
+	struct platform_device *pdev = res->pdev;
+
+	ret = of_property_read_u32_array(pdev->dev.of_node, "ipclite_mappings",
+		ipclite_mapping_config, 3);
+	if (ret) {
+		dprintk(CVP_ERR, "Failed to read ipclite reg: %d\n", ret);
+		return ret;
+	}
+	res->reg_mappings.ipclite_iova = ipclite_mapping_config[0];
+	res->reg_mappings.ipclite_size = ipclite_mapping_config[1];
+	res->reg_mappings.ipclite_phyaddr = ipclite_mapping_config[2];
+
+	ret = of_property_read_u32_array(pdev->dev.of_node, "hwmutex_mappings",
+		hwmutex_mapping_config, 3);
+	if (ret) {
+		dprintk(CVP_ERR, "Failed to read hwmutex reg: %d\n", ret);
+		return ret;
+	}
+	res->reg_mappings.hwmutex_iova = hwmutex_mapping_config[0];
+	res->reg_mappings.hwmutex_size = hwmutex_mapping_config[1];
+	res->reg_mappings.hwmutex_phyaddr = hwmutex_mapping_config[2];
+
+	ret = of_property_read_u32_array(pdev->dev.of_node, "aon_mappings",
+		aon_mapping_config, 3);
+	if (ret) {
+		dprintk(CVP_ERR, "Failed to read aon reg: %d\n", ret);
+		return ret;
+	}
+	res->reg_mappings.aon_iova = aon_mapping_config[0];
+	res->reg_mappings.aon_size = aon_mapping_config[1];
+	res->reg_mappings.aon_phyaddr = aon_mapping_config[2];
+
+	ret = of_property_read_u32_array(pdev->dev.of_node,
+		"aon_timer_mappings", timer_config, 3);
+	if (ret) {
+		dprintk(CVP_ERR, "Failed to read timer reg: %d\n", ret);
+		return ret;
+	}
+	res->reg_mappings.timer_iova = timer_config[0];
+	res->reg_mappings.timer_size = timer_config[1];
+	res->reg_mappings.timer_phyaddr = timer_config[2];
+
+	dprintk(CVP_CORE,
+	"reg mappings %#x %#x %#x %#x %#x %#X %#x %#x %#x %#x %#x %#x\n",
+	res->reg_mappings.ipclite_iova, res->reg_mappings.ipclite_size,
+	res->reg_mappings.ipclite_phyaddr, res->reg_mappings.hwmutex_iova,
+	res->reg_mappings.hwmutex_size, res->reg_mappings.hwmutex_phyaddr,
+	res->reg_mappings.aon_iova, res->reg_mappings.aon_size,
+	res->reg_mappings.aon_phyaddr,  res->reg_mappings.timer_iova,
+	res->reg_mappings.timer_size, res->reg_mappings.timer_phyaddr);
+
+	return ret;
+}
+
+static int msm_cvp_load_gcc_regs(struct msm_cvp_platform_resources *res)
+{
+	int ret = 0;
+	unsigned int reg_config[2];
+	struct platform_device *pdev = res->pdev;
+
+	ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,gcc-reg",
+				reg_config, 2);
+	if (ret) {
+		dprintk(CVP_WARN, "No gcc reg configured: %d\n", ret);
+		return ret;
+	}
+
+	res->gcc_reg_base = reg_config[0];
+	res->gcc_reg_size = reg_config[1];
+
+	return ret;
+}
+
+
+static int msm_cvp_load_reg_table(struct msm_cvp_platform_resources *res)
+{
+	struct reg_set *reg_set;
+	struct platform_device *pdev = res->pdev;
+	int i;
+	int rc = 0;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
+		/*
+		 * qcom,reg-presets is an optional property.  It likely won't be
+		 * present if we don't have any register settings to program
+		 */
+		dprintk(CVP_CORE, "qcom,reg-presets not found\n");
+		return 0;
+	}
+
+	reg_set = &res->reg_set;
+	reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+			"qcom,reg-presets");
+	reg_set->count /=  sizeof(*reg_set->reg_tbl) / sizeof(u32);
+
+	if (!reg_set->count) {
+		dprintk(CVP_CORE, "no elements in reg set\n");
+		return rc;
+	}
+
+	reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
+			sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
+	if (!reg_set->reg_tbl) {
+		dprintk(CVP_ERR, "%s Failed to alloc register table\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
+		(u32 *)reg_set->reg_tbl, reg_set->count * 2)) {
+		dprintk(CVP_ERR, "Failed to read register table\n");
+		msm_cvp_free_reg_table(res);
+		return -EINVAL;
+	}
+	for (i = 0; i < reg_set->count; i++) {
+		dprintk(CVP_CORE,
+			"reg = %x, value = %x\n",
+			reg_set->reg_tbl[i].reg,
+			reg_set->reg_tbl[i].value
+		);
+	}
+	return rc;
+}
+static int msm_cvp_load_qdss_table(struct msm_cvp_platform_resources *res)
+{
+	struct addr_set *qdss_addr_set;
+	struct platform_device *pdev = res->pdev;
+	int i;
+	int rc = 0;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
+		/*
+		 * qcom,qdss-presets is an optional property. It likely won't be
+		 * present if we don't have any register settings to program
+		 */
+		dprintk(CVP_CORE, "qcom,qdss-presets not found\n");
+		return rc;
+	}
+
+	qdss_addr_set = &res->qdss_addr_set;
+	qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+					"qcom,qdss-presets");
+	qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
+
+	if (!qdss_addr_set->count) {
+		dprintk(CVP_CORE, "no elements in qdss reg set\n");
+		return rc;
+	}
+
+	qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
+			qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
+			GFP_KERNEL);
+	if (!qdss_addr_set->addr_tbl) {
+		dprintk(CVP_ERR, "%s Failed to alloc register table\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_qdss_addr_tbl;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
+		(u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to read qdss address table\n");
+		msm_cvp_free_qdss_addr_table(res);
+		rc = -EINVAL;
+		goto err_qdss_addr_tbl;
+	}
+
+	for (i = 0; i < qdss_addr_set->count; i++) {
+		dprintk(CVP_CORE, "qdss addr = %x, value = %x\n",
+				qdss_addr_set->addr_tbl[i].start,
+				qdss_addr_set->addr_tbl[i].size);
+	}
+err_qdss_addr_tbl:
+	return rc;
+}
+
+static int msm_cvp_load_fw_name(struct msm_cvp_platform_resources *res)
+{
+	struct platform_device *pdev = res->pdev;
+
+	return of_property_read_string_index(pdev->dev.of_node,
+				"cvp,firmware-name", 0, &res->fw_name);
+}
+
+static int msm_cvp_load_subcache_info(struct msm_cvp_platform_resources *res)
+{
+	int rc = 0, num_subcaches = 0, c;
+	struct platform_device *pdev = res->pdev;
+	struct subcache_set *subcaches = &res->subcache_set;
+
+	num_subcaches = of_property_count_strings(pdev->dev.of_node,
+		"cache-slice-names");
+	if (num_subcaches <= 0) {
+		dprintk(CVP_CORE, "No subcaches found\n");
+		goto err_load_subcache_table_fail;
+	}
+
+	subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
+		sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
+	if (!subcaches->subcache_tbl) {
+		dprintk(CVP_ERR,
+			"Failed to allocate memory for subcache tbl\n");
+		rc = -ENOMEM;
+		goto err_load_subcache_table_fail;
+	}
+
+	subcaches->count = num_subcaches;
+	dprintk(CVP_CORE, "Found %d subcaches\n", num_subcaches);
+
+	for (c = 0; c < num_subcaches; ++c) {
+		struct subcache_info *vsc = &res->subcache_set.subcache_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+			"cache-slice-names", c, &vsc->name);
+	}
+
+	res->sys_cache_present = true;
+
+	return 0;
+
+err_load_subcache_table_fail:
+	res->sys_cache_present = false;
+	subcaches->count = 0;
+	subcaches->subcache_tbl = NULL;
+
+	return rc;
+}
+
+/**
+ * msm_cvp_load_u32_table() - load dtsi table entries
+ * @pdev: A pointer to the platform device.
+ * @of_node:      A pointer to the device node.
+ * @table_name:   A pointer to the dtsi table entry name.
+ * @struct_size:  The size of the structure which is nothing but
+ *                a single entry in the dtsi table.
+ * @table:        A pointer to the table pointer which needs to be
+ *                filled by the dtsi table entries.
+ * @num_elements: Number of elements pointer which needs to be filled
+ *                with the number of elements in the table.
+ *
+ * This is a generic implementation to load single or multiple array
+ * table from dtsi. The array elements should be of size equal to u32.
+ *
+ * Return:        Return '0' for success else appropriate error value.
+ */
+int msm_cvp_load_u32_table(struct platform_device *pdev,
+		struct device_node *of_node, char *table_name, int struct_size,
+		u32 **table, u32 *num_elements)
+{
+	int rc = 0, num_elemts = 0;
+	u32 *ptbl = NULL;
+
+	if (!of_find_property(of_node, table_name, NULL)) {
+		dprintk(CVP_CORE, "%s not found\n", table_name);
+		return 0;
+	}
+
+	num_elemts = get_u32_array_num_elements(of_node, table_name);
+	if (!num_elemts) {
+		dprintk(CVP_ERR, "no elements in %s\n", table_name);
+		return 0;
+	}
+	num_elemts /= struct_size / sizeof(u32);
+
+	ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
+	if (!ptbl) {
+		dprintk(CVP_ERR, "Failed to alloc table %s\n", table_name);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(of_node, table_name, ptbl,
+			num_elemts * struct_size / sizeof(u32))) {
+		dprintk(CVP_ERR, "Failed to read %s\n", table_name);
+		return -EINVAL;
+	}
+
+	*table = ptbl;
+	if (num_elements)
+		*num_elements = num_elemts;
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_cvp_load_u32_table);
+
+/* A comparator to compare loads (needed later on) */
+static int cmp(const void *a, const void *b)
+{
+	return ((struct allowed_clock_rates_table *)a)->clock_rate -
+		((struct allowed_clock_rates_table *)b)->clock_rate;
+}
+
+static int msm_cvp_load_allowed_clocks_table(
+		struct msm_cvp_platform_resources *res)
+{
+	int rc = 0;
+	struct platform_device *pdev = res->pdev;
+
+	if (!of_find_property(pdev->dev.of_node,
+			"qcom,allowed-clock-rates", NULL)) {
+		dprintk(CVP_CORE, "qcom,allowed-clock-rates not found\n");
+		return 0;
+	}
+
+	rc = msm_cvp_load_u32_table(pdev, pdev->dev.of_node,
+				"qcom,allowed-clock-rates",
+				sizeof(*res->allowed_clks_tbl),
+				(u32 **)&res->allowed_clks_tbl,
+				&res->allowed_clks_tbl_size);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: failed to read allowed clocks table\n", __func__);
+		return rc;
+	}
+
+	sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size,
+		 sizeof(*res->allowed_clks_tbl), cmp, NULL);
+
+	return 0;
+}
+
+static int msm_cvp_populate_mem_cdsp(struct device *dev,
+		struct msm_cvp_platform_resources *res)
+{
+	struct device_node *mem_node;
+	int ret;
+
+	mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+	if (mem_node) {
+		ret = of_reserved_mem_device_init_by_idx(dev,
+				dev->of_node, 0);
+		of_node_put(dev->of_node);
+		if (ret) {
+			dprintk(CVP_ERR,
+				"Failed to initialize reserved mem, ret %d\n",
+				ret);
+			return ret;
+		}
+	}
+	res->mem_cdsp.dev = dev;
+
+	return 0;
+}
+
+static int msm_cvp_populate_bus(struct device *dev,
+		struct msm_cvp_platform_resources *res)
+{
+	struct bus_set *buses = &res->bus_set;
+	const char *temp_name = NULL;
+	struct bus_info *bus = NULL, *temp_table;
+	u32 range[2];
+	int rc = 0;
+
+	temp_table = krealloc(buses->bus_tbl, sizeof(*temp_table) *
+			(buses->count + 1), GFP_KERNEL);
+	if (!temp_table) {
+		dprintk(CVP_ERR, "%s: Failed to allocate memory", __func__);
+		rc = -ENOMEM;
+		goto err_bus;
+	}
+
+	buses->bus_tbl = temp_table;
+	bus = &buses->bus_tbl[buses->count];
+
+	memset(bus, 0x0, sizeof(struct bus_info));
+
+	rc = of_property_read_string(dev->of_node, "label", &temp_name);
+	if (rc) {
+		dprintk(CVP_ERR, "'label' not found in node\n");
+		goto err_bus;
+	}
+	/* need a non-const version of name, hence copying it over */
+	bus->name = devm_kstrdup(dev, temp_name, GFP_KERNEL);
+	if (!bus->name) {
+		rc = -ENOMEM;
+		goto err_bus;
+	}
+
+	rc = of_property_read_u32(dev->of_node, "qcom,bus-master",
+			&bus->master);
+	if (rc) {
+		dprintk(CVP_ERR, "'qcom,bus-master' not found in node\n");
+		goto err_bus;
+	}
+
+	rc = of_property_read_u32(dev->of_node, "qcom,bus-slave", &bus->slave);
+	if (rc) {
+		dprintk(CVP_ERR, "'qcom,bus-slave' not found in node\n");
+		goto err_bus;
+	}
+
+	rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
+			&bus->governor);
+	if (rc) {
+		rc = 0;
+		dprintk(CVP_CORE,
+				"'qcom,bus-governor' not found, default to performance governor\n");
+		bus->governor = PERF_GOV;
+	}
+
+	if (!strcmp(bus->governor, PERF_GOV))
+		bus->is_prfm_gov_used = true;
+
+	rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
+			range, ARRAY_SIZE(range));
+	if (rc) {
+		rc = 0;
+		dprintk(CVP_CORE,
+				"'qcom,range' not found defaulting to <0 INT_MAX>\n");
+		range[0] = 0;
+		range[1] = INT_MAX;
+	}
+
+	bus->range[0] = range[0]; /* min */
+	bus->range[1] = range[1]; /* max */
+
+	buses->count++;
+	bus->dev = dev;
+	dprintk(CVP_CORE, "Found bus %s [%d->%d] with governor %s\n",
+			bus->name, bus->master, bus->slave, bus->governor);
+err_bus:
+	return rc;
+}
+
+static int msm_cvp_load_regulator_table(
+		struct msm_cvp_platform_resources *res)
+{
+	int rc = 0;
+	struct platform_device *pdev = res->pdev;
+	struct regulator_set *regulators = &res->regulator_set;
+	struct device_node *domains_parent_node = NULL;
+	struct property *domains_property = NULL;
+	int reg_count = 0;
+
+	regulators->count = 0;
+	regulators->regulator_tbl = NULL;
+
+	domains_parent_node = pdev->dev.of_node;
+	for_each_property_of_node(domains_parent_node, domains_property) {
+		const char *search_string = "-supply";
+		char *supply;
+		bool matched = false;
+
+		/* check if current property is possibly a regulator */
+		supply = strnstr(domains_property->name, search_string,
+				strlen(domains_property->name) + 1);
+		matched = supply && (*(supply + strlen(search_string)) == '\0');
+		if (!matched)
+			continue;
+
+		reg_count++;
+	}
+
+	regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
+			sizeof(*regulators->regulator_tbl) *
+			reg_count, GFP_KERNEL);
+
+	if (!regulators->regulator_tbl) {
+		rc = -ENOMEM;
+		dprintk(CVP_ERR,
+			"Failed to alloc memory for regulator table\n");
+		goto err_reg_tbl_alloc;
+	}
+
+	for_each_property_of_node(domains_parent_node, domains_property) {
+		const char *search_string = "-supply";
+		char *supply;
+		bool matched = false;
+		struct device_node *regulator_node = NULL;
+		struct regulator_info *rinfo = NULL;
+
+		/* check if current property is possibly a regulator */
+		supply = strnstr(domains_property->name, search_string,
+				strlen(domains_property->name) + 1);
+		matched = supply && (supply[strlen(search_string)] == '\0');
+		if (!matched)
+			continue;
+
+		/* make sure prop isn't being misused */
+		regulator_node = of_parse_phandle(domains_parent_node,
+				domains_property->name, 0);
+		if (IS_ERR(regulator_node)) {
+			dprintk(CVP_WARN, "%s is not a phandle\n",
+					domains_property->name);
+			continue;
+		}
+		regulators->count++;
+
+		/* populate regulator info */
+		rinfo = &regulators->regulator_tbl[regulators->count - 1];
+		rinfo->name = devm_kzalloc(&pdev->dev,
+			(supply - domains_property->name) + 1, GFP_KERNEL);
+		if (!rinfo->name) {
+			rc = -ENOMEM;
+			dprintk(CVP_ERR,
+					"Failed to alloc memory for regulator name\n");
+			goto err_reg_name_alloc;
+		}
+		strlcpy(rinfo->name, domains_property->name,
+			(supply - domains_property->name) + 1);
+
+		rinfo->has_hw_power_collapse = of_property_read_bool(
+			regulator_node, "qcom,support-hw-trigger");
+
+		dprintk(CVP_CORE, "Found regulator %s: h/w collapse = %s\n",
+				rinfo->name,
+				rinfo->has_hw_power_collapse ? "yes" : "no");
+	}
+
+	if (!regulators->count)
+		dprintk(CVP_CORE, "No regulators found");
+
+	return 0;
+
+err_reg_name_alloc:
+err_reg_tbl_alloc:
+	msm_cvp_free_regulator_table(res);
+	return rc;
+}
+
+static int msm_cvp_load_clock_table(
+		struct msm_cvp_platform_resources *res)
+{
+	int rc = 0, num_clocks = 0, c = 0;
+	struct platform_device *pdev = res->pdev;
+	int *clock_ids = NULL;
+	int *clock_props = NULL;
+	struct clock_set *clocks = &res->clock_set;
+
+	num_clocks = of_property_count_strings(pdev->dev.of_node,
+				"clock-names");
+	if (num_clocks <= 0) {
+		dprintk(CVP_CORE, "No clocks found\n");
+		clocks->count = 0;
+		rc = 0;
+		goto err_load_clk_table_fail;
+	}
+
+	clock_ids = devm_kzalloc(&pdev->dev, num_clocks *
+		sizeof(*clock_ids), GFP_KERNEL);
+	if (!clock_ids) {
+		dprintk(CVP_ERR, "No memory to read clock ids\n");
+		rc = -ENOMEM;
+		goto err_load_clk_table_fail;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+		"clock-ids", clock_ids,
+		num_clocks);
+	if (rc) {
+		dprintk(CVP_CORE, "Failed to read clock ids: %d\n", rc);
+		msm_cvp_mmrm_enabled = false;
+		dprintk(CVP_CORE, "flag msm_cvp_mmrm_enabled disabled\n");
+	}
+
+	clock_props = devm_kzalloc(&pdev->dev, num_clocks *
+			sizeof(*clock_props), GFP_KERNEL);
+	if (!clock_props) {
+		dprintk(CVP_ERR, "No memory to read clock properties\n");
+		rc = -ENOMEM;
+		goto err_load_clk_table_fail;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+				"qcom,clock-configs", clock_props,
+				num_clocks);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to read clock properties: %d\n", rc);
+		goto err_load_clk_prop_fail;
+	}
+
+	clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
+			* num_clocks, GFP_KERNEL);
+	if (!clocks->clock_tbl) {
+		dprintk(CVP_ERR, "Failed to allocate memory for clock tbl\n");
+		rc = -ENOMEM;
+		goto err_load_clk_prop_fail;
+	}
+
+	clocks->count = num_clocks;
+	dprintk(CVP_CORE, "Found %d clocks\n", num_clocks);
+
+	for (c = 0; c < num_clocks; ++c) {
+		struct clock_info *vc = &res->clock_set.clock_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+				"clock-names", c, &vc->name);
+
+		if (msm_cvp_mmrm_enabled == true)
+			vc->clk_id = clock_ids[c];
+
+		if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
+			vc->has_scaling = true;
+		} else {
+			vc->count = 0;
+			vc->has_scaling = false;
+		}
+
+		if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
+			vc->has_mem_retention = true;
+		else
+			vc->has_mem_retention = false;
+
+		dprintk(CVP_CORE, "Found clock %s id %d: scale-able = %s\n",
+			vc->name, vc->clk_id, vc->count ? "yes" : "no");
+	}
+
+	return 0;
+
+err_load_clk_prop_fail:
+err_load_clk_table_fail:
+	return rc;
+}
+
+#define MAX_CLK_RESETS 5
+
+static int msm_cvp_load_reset_table(
+		struct msm_cvp_platform_resources *res)
+{
+	struct platform_device *pdev = res->pdev;
+	struct reset_set *rst = &res->reset_set;
+	int num_clocks = 0, c = 0, ret = 0;
+	int pwr_stats[MAX_CLK_RESETS];
+
+	num_clocks = of_property_count_strings(pdev->dev.of_node,
+				"reset-names");
+	if (num_clocks <= 0 || num_clocks > MAX_CLK_RESETS) {
+		dprintk(CVP_ERR, "Num reset clocks out of range\n");
+		rst->count = 0;
+		return 0;
+	}
+
+	rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks,
+			sizeof(*rst->reset_tbl), GFP_KERNEL);
+	if (!rst->reset_tbl)
+		return -ENOMEM;
+
+	rst->count = num_clocks;
+	dprintk(CVP_CORE, "Found %d reset clocks\n", num_clocks);
+	ret = of_property_read_u32_array(pdev->dev.of_node,
+				"reset-power-status", pwr_stats,
+				num_clocks);
+	if (ret) {
+		dprintk(CVP_ERR, "Failed to read reset pwr state: %d\n", ret);
+		devm_kfree(&pdev->dev, rst->reset_tbl);
+		return ret;
+	}
+
+	for (c = 0; c < num_clocks; ++c) {
+		struct reset_info *rc = &res->reset_set.reset_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+				"reset-names", c, &rc->name);
+		rc->required_stage = pwr_stats[c];
+	}
+
+	return 0;
+}
+
+static int find_key_value(struct msm_cvp_platform_data *platform_data,
+	const char *key)
+{
+	int i = 0;
+	struct msm_cvp_common_data *common_data = platform_data->common_data;
+	int size = platform_data->common_data_length;
+
+	for (i = 0; i < size; i++) {
+		if (!strcmp(common_data[i].key, key))
+			return common_data[i].value;
+	}
+	return 0;
+}
+
+int cvp_read_platform_resources_from_drv_data(
+		struct msm_cvp_core *core)
+{
+	struct msm_cvp_platform_data *platform_data;
+	struct msm_cvp_platform_resources *res;
+	int rc = 0, i;
+
+	if (!core || !core->platform_data) {
+		dprintk(CVP_ERR, "%s Invalid data\n", __func__);
+		return -ENOENT;
+	}
+	platform_data = core->platform_data;
+	res = &core->resources;
+
+	res->sku_version = platform_data->sku_version;
+
+	res->dsp_enabled = find_key_value(platform_data,
+			"qcom,dsp-enabled");
+
+	res->max_ssr_allowed = find_key_value(platform_data,
+			"qcom,max-ssr-allowed");
+
+	res->sw_power_collapsible = find_key_value(platform_data,
+			"qcom,sw-power-collapse");
+
+	res->debug_timeout = find_key_value(platform_data,
+			"qcom,debug-timeout");
+
+	res->pm_qos.latency_us = find_key_value(platform_data,
+			"qcom,pm-qos-latency-us");
+	res->pm_qos.silver_count = 0;
+	for(i = 0; i < MAX_SILVER_CORE_NUM; i++) {
+		if(topology_cluster_id(i) == 0)
+			res->pm_qos.silver_count++;
+		else
+			break;
+	}
+	for (i = 0; i < res->pm_qos.silver_count; i++)
+		res->pm_qos.silver_cores[i] = i;
+	res->pm_qos.off_vote_cnt = 0;
+	spin_lock_init(&res->pm_qos.lock);
+
+	res->max_secure_inst_count = find_key_value(platform_data,
+			"qcom,max-secure-instances");
+
+	res->thermal_mitigable = find_key_value(platform_data,
+			"qcom,enable-thermal-mitigation");
+	res->msm_cvp_pwr_collapse_delay = find_key_value(platform_data,
+			"qcom,power-collapse-delay");
+	res->msm_cvp_hw_rsp_timeout = find_key_value(platform_data,
+			"qcom,hw-resp-timeout");
+	res->msm_cvp_dsp_rsp_timeout = find_key_value(platform_data,
+			"qcom,dsp-resp-timeout");
+	res->non_fatal_pagefaults = find_key_value(platform_data,
+			"qcom,domain-attr-non-fatal-faults");
+
+	//Address offsets for QOS setting.
+	//There are diff between lanai and palawan for QOS register addresses
+	res->qos_noc_rge_niu_offset = find_key_value(platform_data,
+			"qcom,qos_noc_rge_niu_offset");
+	res->qos_noc_gce_vadl_tof_niu_offset = find_key_value(platform_data,
+			"qcom,qos_noc_gce_vadl_tof_niu_offset");
+	res->qos_noc_cdm_niu_offset = find_key_value(platform_data,
+			"qcom,qos_noc_cdm_niu_offset");
+	res->noc_core_err_offset = find_key_value(platform_data,
+			"qcom,noc_core_err_offset");
+	res->noc_main_sidebandmanager_offset = find_key_value(platform_data,
+			"qcom,noc_main_sidebandmanager_offset");
+
+	res->vpu_ver = platform_data->vpu_ver;
+	res->ubwc_config = platform_data->ubwc_config;
+	res->fatal_ssr = false;
+	return rc;
+
+}
+
+int cvp_read_platform_resources_from_dt(
+		struct msm_cvp_platform_resources *res)
+{
+	struct platform_device *pdev = res->pdev;
+	struct resource *kres = NULL;
+	int rc = 0;
+	uint32_t firmware_base = 0;
+
+	if (!pdev->dev.of_node) {
+		dprintk(CVP_ERR, "DT node not found\n");
+		return -ENOENT;
+	}
+
+	INIT_LIST_HEAD(&res->context_banks);
+
+	res->firmware_base = (phys_addr_t)firmware_base;
+
+	kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	res->register_base = kres ? kres->start : -1;
+	res->register_size = kres ? (kres->end + 1 - kres->start) : -1;
+
+	res->irq = platform_get_irq(pdev, 0);
+
+	dprintk(CVP_CORE, "%s: res->irq:%d \n",
+		__func__, res->irq);
+
+	//Parsing for WD interrupt
+	res->irq_wd = platform_get_irq(pdev, 1);
+
+	dprintk(CVP_CORE, "%s: res->irq_wd:%d \n",
+		__func__, res->irq_wd);
+
+	rc = msm_cvp_load_fw_name(res);
+	dprintk(CVP_CORE, "Firmware filename: %s\n", res->fw_name);
+	if (rc)
+		dprintk(CVP_WARN, "Failed to load fw name info: %d\n", rc);
+
+	rc = msm_cvp_load_subcache_info(res);
+	if (rc)
+		dprintk(CVP_WARN, "Failed to load subcache info: %d\n", rc);
+
+	rc = msm_cvp_load_qdss_table(res);
+	if (rc)
+		dprintk(CVP_WARN, "Failed to load qdss reg table: %d\n", rc);
+
+	rc = msm_cvp_load_reg_table(res);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to load reg table: %d\n", rc);
+		goto err_load_reg_table;
+	}
+
+	rc = msm_cvp_load_ipcc_regs(res);
+	if (rc)
+		dprintk(CVP_ERR, "Failed to load IPCC regs: %d\n", rc);
+
+	rc = msm_cvp_load_regspace_mapping(res);
+	if (rc)
+		dprintk(CVP_ERR, "Failed to load reg space mapping: %d\n", rc);
+
+	rc = msm_cvp_load_gcc_regs(res);
+
+	rc = msm_cvp_load_regulator_table(res);
+	if (rc) {
+		dprintk(CVP_ERR, "Failed to load list of regulators %d\n", rc);
+		goto err_load_regulator_table;
+	}
+
+	rc = msm_cvp_load_clock_table(res);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to load clock table: %d\n", rc);
+		goto err_load_clock_table;
+	}
+
+	rc = msm_cvp_load_allowed_clocks_table(res);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to load allowed clocks table: %d\n", rc);
+		goto err_load_allowed_clocks_table;
+	}
+
+	rc = msm_cvp_load_reset_table(res);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"Failed to load reset table: %d\n", rc);
+		goto err_load_reset_table;
+	}
+
+	res->use_non_secure_pil = of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-non-secure-pil");
+
+	if (res->use_non_secure_pil || !is_iommu_present(res)) {
+		of_property_read_u32(pdev->dev.of_node, "qcom,fw-bias",
+				&firmware_base);
+		res->firmware_base = (phys_addr_t)firmware_base;
+		dprintk(CVP_CORE,
+				"Using fw-bias : %pa", &res->firmware_base);
+	}
+
+return rc;
+
+err_load_reset_table:
+	msm_cvp_free_allowed_clocks_table(res);
+err_load_allowed_clocks_table:
+	msm_cvp_free_clock_table(res);
+err_load_clock_table:
+	msm_cvp_free_regulator_table(res);
+err_load_regulator_table:
+	msm_cvp_free_reg_table(res);
+err_load_reg_table:
+	return rc;
+}
+
+static int msm_cvp_setup_context_bank(struct msm_cvp_platform_resources *res,
+		struct context_bank_info *cb, struct device *dev)
+{
+	int rc = 0;
+	struct bus_type *bus;
+
+	if (!dev || !cb || !res) {
+		dprintk(CVP_ERR,
+			"%s: Invalid Input params\n", __func__);
+		return -EINVAL;
+	}
+	cb->dev = dev;
+
+	bus = cb->dev->bus;
+	if (IS_ERR_OR_NULL(bus)) {
+		dprintk(CVP_ERR, "%s - failed to get bus type\n", __func__);
+		rc = PTR_ERR(bus) ?: -ENODEV;
+		goto remove_cb;
+	}
+
+	/*
+	 * configure device segment size and segment boundary to ensure
+	 * iommu mapping returns one mapping (which is required for partial
+	 * cache operations)
+	 */
+	if (!dev->dma_parms)
+		dev->dma_parms =
+			devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+	dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
+
+	dprintk(CVP_CORE, "Attached %s and created mapping\n", dev_name(dev));
+	dprintk(CVP_CORE,
+		"Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK",
+		cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start,
+		cb->addr_range.size, cb->dev);
+
+	return rc;
+
+remove_cb:
+	return rc;
+}
+
+int msm_cvp_smmu_fault_handler(struct iommu_domain *domain,
+		struct device *dev, unsigned long iova, int flags, void *token)
+{
+	struct msm_cvp_core *core = token;
+	struct iris_hfi_device *hdev;
+	struct msm_cvp_inst *inst;
+	bool log = false;
+
+	if (!domain || !core) {
+		dprintk(CVP_ERR, "%s - invalid param %pK %pK\n",
+			__func__, domain, core);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_ERR, "%s - faulting address: %lx fault cnt %d\n",
+			__func__, iova, core->smmu_fault_count);
+	if (core->smmu_fault_count > 0) {
+		core->smmu_fault_count++;
+		return -ENOSYS;
+	}
+	mutex_lock(&core->lock);
+	core->smmu_fault_count++;
+	if (!core->last_fault_addr)
+		core->last_fault_addr = iova;
+
+	log = (core->log.snapshot_index > 0)? false : true;
+	list_for_each_entry(inst, &core->instances, list) {
+		cvp_print_inst(CVP_ERR, inst);
+		msm_cvp_print_inst_bufs(inst, log);
+	}
+	hdev = core->dev_ops->hfi_device_data;
+	if (hdev) {
+		hdev->error = CVP_ERR_NOC_ERROR;
+		call_hfi_op(core->dev_ops, debug_hook, hdev);
+	}
+	mutex_unlock(&core->lock);
+	/*
+	 * Return -EINVAL to elicit the default behaviour of smmu driver.
+	 * If we return -ENOSYS, then smmu driver assumes page fault handler
+	 * is not installed and prints a list of useful debug information like
+	 * FAR, SID etc. This information is not printed if we return 0.
+	 */
+	return -ENOSYS;
+}
+
+static int msm_cvp_populate_context_bank(struct device *dev,
+		struct msm_cvp_core *core)
+{
+	int rc = 0;
+	struct context_bank_info *cb = NULL;
+	struct device_node *np = NULL;
+
+	if (!dev || !core) {
+		dprintk(CVP_ERR, "%s - invalid inputs\n", __func__);
+		return -EINVAL;
+	}
+
+	np = dev->of_node;
+	cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
+	if (!cb) {
+		dprintk(CVP_ERR, "%s - Failed to allocate cb\n", __func__);
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_string(np, "label", &cb->name);
+	if (rc) {
+		dprintk(CVP_CORE,
+			"Failed to read cb label from device tree\n");
+		rc = 0;
+	}
+
+	INIT_LIST_HEAD(&cb->list);
+	list_add_tail(&cb->list, &core->resources.context_banks);
+
+	dprintk(CVP_CORE, "%s: context bank has name %s\n", __func__, cb->name);
+	if (!strcmp(cb->name, "cvp_camera")) {
+		cb->is_secure = true;
+		rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
+		if (rc) {
+			dprintk(CVP_ERR, "Cannot setup context bank %s %d\n",
+					cb->name, rc);
+			goto err_setup_cb;
+		}
+
+		return 0;
+	}
+
+	rc = of_property_read_u32_array(np, "qcom,iommu-dma-addr-pool",
+			(u32 *)&cb->addr_range, 2);
+	if (rc) {
+		dprintk(CVP_CORE,
+			"Could not read addr pool for context bank : %s %d\n",
+			cb->name, rc);
+	}
+
+	cb->is_secure = of_property_read_bool(np, "qcom,iommu-vmid");
+	dprintk(CVP_CORE, "context bank %s : secure = %d\n",
+			cb->name, cb->is_secure);
+
+	/* setup buffer type for each sub device*/
+	rc = of_property_read_u32(np, "buffer-types", &cb->buffer_type);
+	if (rc) {
+		dprintk(CVP_ERR, "failed to load buffer_type info %d\n", rc);
+		rc = -ENOENT;
+		goto err_setup_cb;
+	}
+	dprintk(CVP_CORE,
+		"context bank %s address start = %x address size = %x buffer_type = %x\n",
+		cb->name, cb->addr_range.start,
+		cb->addr_range.size, cb->buffer_type);
+
+	cb->domain = iommu_get_domain_for_dev(dev);
+	if (IS_ERR_OR_NULL(cb->domain)) {
+		dprintk(CVP_ERR, "Create domain failed\n");
+		rc = -ENODEV;
+		goto err_setup_cb;
+	}
+
+	rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
+	if (rc) {
+		dprintk(CVP_ERR, "Cannot setup context bank %d\n", rc);
+		goto err_setup_cb;
+	}
+
+	iommu_set_fault_handler(cb->domain,
+		msm_cvp_smmu_fault_handler, (void *)core);
+
+	return 0;
+
+err_setup_cb:
+	list_del(&cb->list);
+	return rc;
+}
+
+int cvp_read_context_bank_resources_from_dt(struct platform_device *pdev)
+{
+	struct msm_cvp_core *core;
+	int rc = 0;
+
+	if (!pdev) {
+		dprintk(CVP_ERR, "Invalid platform device\n");
+		return -EINVAL;
+	} else if (!pdev->dev.parent) {
+		dprintk(CVP_ERR, "Failed to find a parent for %s\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	core = dev_get_drvdata(pdev->dev.parent);
+	if (!core) {
+		dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
+				dev_name(pdev->dev.parent));
+		return -EINVAL;
+	}
+
+	rc = msm_cvp_populate_context_bank(&pdev->dev, core);
+	if (rc)
+		dprintk(CVP_ERR, "Failed to probe context bank\n");
+	else
+		dprintk(CVP_CORE, "Successfully probed context bank\n");
+
+	return rc;
+}
+
+int cvp_read_bus_resources_from_dt(struct platform_device *pdev)
+{
+	struct msm_cvp_core *core;
+
+	if (!pdev) {
+		dprintk(CVP_ERR, "Invalid platform device\n");
+		return -EINVAL;
+	} else if (!pdev->dev.parent) {
+		dprintk(CVP_ERR, "Failed to find a parent for %s\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	core = dev_get_drvdata(pdev->dev.parent);
+	if (!core) {
+		dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
+				dev_name(pdev->dev.parent));
+		return -EINVAL;
+	}
+
+	return msm_cvp_populate_bus(&pdev->dev, &core->resources);
+}
+
+int cvp_read_mem_cdsp_resources_from_dt(struct platform_device *pdev)
+{
+	struct msm_cvp_core *core;
+
+	if (!pdev) {
+		dprintk(CVP_ERR, "%s: invalid platform device\n", __func__);
+		return -EINVAL;
+	} else if (!pdev->dev.parent) {
+		dprintk(CVP_ERR, "Failed to find a parent for %s\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	core = dev_get_drvdata(pdev->dev.parent);
+	if (!core) {
+		dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
+				dev_name(pdev->dev.parent));
+		return -EINVAL;
+	}
+
+	return msm_cvp_populate_mem_cdsp(&pdev->dev, &core->resources);
+}

+ 30 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_res_parse.h

@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_CVP_RES_PARSE_H__
+#define __MSM_CVP_RES_PARSE_H__
+#include <linux/of.h>
+#include "msm_cvp_resources.h"
+#include "msm_cvp_common.h"
+void msm_cvp_free_platform_resources(
+		struct msm_cvp_platform_resources *res);
+
+int read_hfi_type(struct platform_device *pdev);
+
+int cvp_read_platform_resources_from_drv_data(
+		struct msm_cvp_core *core);
+int cvp_read_platform_resources_from_dt(
+		struct msm_cvp_platform_resources *res);
+
+int cvp_read_context_bank_resources_from_dt(struct platform_device *pdev);
+
+int cvp_read_bus_resources_from_dt(struct platform_device *pdev);
+int cvp_read_mem_cdsp_resources_from_dt(struct platform_device *pdev);
+
+int msm_cvp_load_u32_table(struct platform_device *pdev,
+		struct device_node *of_node, char *table_name, int struct_size,
+		u32 **table, u32 *num_elements);
+
+#endif

+ 232 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_resources.h

@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __MSM_CVP_RESOURCES_H__
+#define __MSM_CVP_RESOURCES_H__
+
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include "msm_cvp_core.h"
+#include <linux/soc/qcom/llcc-qcom.h>
+
+struct reg_value_pair {
+	u32 reg;
+	u32 value;
+};
+
+struct reg_set {
+	struct reg_value_pair *reg_tbl;
+	int count;
+};
+
+struct addr_range {
+	u32 start;
+	u32 size;
+};
+
+struct addr_set {
+	struct addr_range *addr_tbl;
+	int count;
+};
+
+struct context_bank_info {
+	struct list_head list;
+	const char *name;
+	u32 buffer_type;
+	bool is_secure;
+	struct addr_range addr_range;
+	struct device *dev;
+	struct iommu_domain *domain;
+};
+
+struct regulator_info {
+	struct regulator *regulator;
+	bool has_hw_power_collapse;
+	char *name;
+};
+
+struct regulator_set {
+	struct regulator_info *regulator_tbl;
+	u32 count;
+};
+
+struct clock_info {
+	const char *name;
+	u32 clk_id;
+	struct clk *clk;
+	u32 count;
+	bool has_scaling;
+	bool has_mem_retention;
+};
+
+struct clock_set {
+	struct clock_info *clock_tbl;
+	u32 count;
+};
+
+struct bus_info {
+	char *name;
+	int master;
+	int slave;
+	unsigned int range[2];
+	const char *governor;
+	struct device *dev;
+	struct devfreq_dev_profile devfreq_prof;
+	struct devfreq *devfreq;
+	struct icc_path *client;
+	bool is_prfm_gov_used;
+};
+
+struct bus_set {
+	struct bus_info *bus_tbl;
+	u32 count;
+};
+
+enum action_stage {
+	CVP_ON_INIT,
+	CVP_ON_USE,
+	CVP_ON_INVALID,
+};
+enum reset_clk_state {
+	RESET_INIT,
+	RESET_ACQUIRED,
+	RESET_RELEASED,
+};
+
+struct reset_info {
+	struct reset_control *rst;
+	enum action_stage required_stage;
+	enum reset_clk_state state;
+	const char *name;
+};
+
+struct reset_set {
+	struct reset_info *reset_tbl;
+	u32 count;
+};
+
+struct allowed_clock_rates_table {
+	u32 clock_rate;
+};
+
+struct clock_profile_entry {
+	u32 codec_mask;
+	u32 vpp_cycles;
+	u32 vsp_cycles;
+	u32 low_power_cycles;
+};
+
+struct clock_freq_table {
+	struct clock_profile_entry *clk_prof_entries;
+	u32 count;
+};
+
+struct subcache_info {
+	const char *name;
+	bool isactive;
+	bool isset;
+	struct llcc_slice_desc *subcache;
+};
+
+struct subcache_set {
+	struct subcache_info *subcache_tbl;
+	u32 count;
+};
+
+struct msm_cvp_mem_cdsp {
+	struct device *dev;
+};
+
+#define MAX_SILVER_CORE_NUM 8
+#define HFI_SESSION_FD 4
+#define HFI_SESSION_DMM 2
+
+struct cvp_pm_qos {
+	u32 silver_count;
+	u32 latency_us;
+	u32 off_vote_cnt;
+	spinlock_t lock;
+	int silver_cores[MAX_SILVER_CORE_NUM];
+	struct dev_pm_qos_request *pm_qos_hdls;
+};
+
+struct cvp_fw_reg_mappings {
+	phys_addr_t ipclite_iova;
+	phys_addr_t ipclite_phyaddr;
+	uint32_t ipclite_size;
+	phys_addr_t hwmutex_iova;
+	phys_addr_t hwmutex_phyaddr;
+	uint32_t hwmutex_size;
+	phys_addr_t aon_iova;
+	phys_addr_t aon_phyaddr;
+	uint32_t aon_size;
+	phys_addr_t timer_iova;
+	phys_addr_t timer_phyaddr;
+	uint32_t timer_size;
+};
+
+struct msm_cvp_platform_resources {
+	phys_addr_t firmware_base;
+	phys_addr_t register_base;
+	phys_addr_t ipcc_reg_base;
+	phys_addr_t gcc_reg_base;
+	uint32_t register_size;
+	uint32_t ipcc_reg_size;
+	uint32_t gcc_reg_size;
+	struct cvp_fw_reg_mappings reg_mappings;
+	uint32_t irq;
+	uint32_t irq_wd;
+	uint32_t sku_version;
+	struct allowed_clock_rates_table *allowed_clks_tbl;
+	u32 allowed_clks_tbl_size;
+	struct clock_freq_table clock_freq_tbl;
+	bool sys_cache_present;
+	bool sys_cache_res_set;
+	struct subcache_set subcache_set;
+	struct reg_set reg_set;
+	struct addr_set qdss_addr_set;
+	uint32_t max_ssr_allowed;
+	struct platform_device *pdev;
+	struct regulator_set regulator_set;
+	struct clock_set clock_set;
+	struct bus_set bus_set;
+	struct reset_set reset_set;
+	bool use_non_secure_pil;
+	bool sw_power_collapsible;
+	bool dsp_enabled;
+	struct list_head context_banks;
+	bool thermal_mitigable;
+	const char *fw_name;
+	const char *hfi_version;
+	bool debug_timeout;
+	struct cvp_pm_qos pm_qos;
+	uint32_t max_inst_count;
+	uint32_t max_secure_inst_count;
+	int msm_cvp_hw_rsp_timeout;
+	int msm_cvp_dsp_rsp_timeout;
+	uint32_t msm_cvp_pwr_collapse_delay;
+	bool non_fatal_pagefaults;
+	bool fatal_ssr;
+	struct msm_cvp_mem_cdsp mem_cdsp;
+	uint32_t vpu_ver;
+	uint32_t fw_cycles;
+	struct msm_cvp_ubwc_config_data *ubwc_config;
+	uint32_t qos_noc_rge_niu_offset;
+	uint32_t qos_noc_gce_vadl_tof_niu_offset;
+	uint32_t qos_noc_cdm_niu_offset;
+	uint32_t noc_core_err_offset;
+	uint32_t noc_main_sidebandmanager_offset;
+};
+
+static inline bool is_iommu_present(struct msm_cvp_platform_resources *res)
+{
+	return !list_empty(&res->context_banks);
+}
+
+int cvp_of_fdt_get_ddrtype(void);
+#endif
+

+ 344 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_synx.c

@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_cvp_common.h"
+#include "cvp_hfi_api.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_core.h"
+#include "msm_cvp_dsp.h"
+#include "cvp_comm_def.h"
+
+#ifdef CVP_SYNX_ENABLED
+
+static int cvp_sess_init_synx_v2(struct msm_cvp_inst *inst)
+{
+
+	struct synx_initialization_params params = { 0 };
+
+	params.name = "cvp-kernel-client";
+	params.id = SYNX_CLIENT_EVA_CTX0;
+	inst->synx_session_id = synx_initialize(&params);
+	if (IS_ERR_OR_NULL(&inst->synx_session_id)) {
+		dprintk(CVP_ERR, "%s synx_initialize failed\n", __func__);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int cvp_sess_deinit_synx_v2(struct msm_cvp_inst *inst)
+{
+	if (!inst) {
+		dprintk(CVP_ERR, "Used invalid sess in deinit_synx\n");
+		return -EINVAL;
+	}
+	synx_uninitialize(inst->synx_session_id);
+	return 0;
+}
+
+static void cvp_dump_fence_queue_v2(struct msm_cvp_inst *inst)
+{
+	struct cvp_fence_queue *q;
+	struct cvp_fence_command *f;
+	struct synx_session *ssid;
+	int i;
+
+	q = &inst->fence_cmd_queue;
+	ssid = inst->synx_session_id;
+	mutex_lock(&q->lock);
+	dprintk(CVP_WARN, "inst %x fence q mode %d, ssid %pK\n",
+			hash32_ptr(inst->session), q->mode, ssid);
+
+	dprintk(CVP_WARN, "fence cmdq wait list:\n");
+	list_for_each_entry(f, &q->wait_list, list) {
+		dprintk(CVP_WARN, "frame pkt type 0x%x\n", f->pkt->packet_type);
+		for (i = 0; i < f->output_index; i++)
+			dprintk(CVP_WARN, "idx %d client hdl %d, state %d\n",
+				i, f->synx[i],
+				synx_get_status(ssid, f->synx[i]));
+
+	}
+
+	dprintk(CVP_WARN, "fence cmdq schedule list:\n");
+	list_for_each_entry(f, &q->sched_list, list) {
+		dprintk(CVP_WARN, "frame pkt type 0x%x\n", f->pkt->packet_type);
+		for (i = 0; i < f->output_index; i++)
+			dprintk(CVP_WARN, "idx %d client hdl %d, state %d\n",
+				i, f->synx[i],
+				synx_get_status(ssid, f->synx[i]));
+
+	}
+	mutex_unlock(&q->lock);
+}
+
+static int cvp_import_synx_v2(struct msm_cvp_inst *inst,
+		struct cvp_fence_command *fc,
+		u32 *fence)
+{
+	int rc = 0, rr = 0;
+	int i;
+	struct eva_kmd_fence *fs;
+	struct synx_import_params params = {0};
+	u32 h_synx;
+	struct synx_session *ssid;
+
+	fs = (struct eva_kmd_fence *)fence;
+	ssid = inst->synx_session_id;
+
+	for (i = 0; i < fc->num_fences; ++i) {
+		h_synx = fs[i].h_synx;
+
+		if (h_synx) {
+			params.type = SYNX_IMPORT_INDV_PARAMS;
+			params.indv.fence = &h_synx;
+			params.indv.flags = SYNX_IMPORT_SYNX_FENCE
+					| SYNX_IMPORT_LOCAL_FENCE;
+			params.indv.new_h_synx = &fc->synx[i];
+
+			rc = synx_import(ssid, &params);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s: %u synx_import failed\n",
+					__func__, h_synx);
+				rr = rc;
+			}
+		}
+	}
+
+	return rr;
+}
+
+static int cvp_release_synx_v2(struct msm_cvp_inst *inst,
+		struct cvp_fence_command *fc)
+{
+	int rc = 0;
+	int i;
+	u32 h_synx;
+	struct synx_session *ssid;
+
+	ssid = inst->synx_session_id;
+	for (i = 0; i < fc->num_fences; ++i) {
+		h_synx = fc->synx[i];
+		if (h_synx) {
+			rc = synx_release(ssid, h_synx);
+			if (rc)
+				dprintk(CVP_ERR,
+				"%s: synx_release %d, %d failed\n",
+				__func__, h_synx, i);
+		}
+	}
+	return rc;
+}
+
+static int cvp_cancel_synx_impl(struct msm_cvp_inst *inst,
+			enum cvp_synx_type type,
+			struct cvp_fence_command *fc,
+			int synx_state)
+{
+	int rc = 0;
+	int i;
+	u32 h_synx;
+	struct synx_session *ssid;
+	int start = 0, end = 0;
+
+	ssid = inst->synx_session_id;
+
+	if (type == CVP_INPUT_SYNX) {
+		start = 0;
+		end = fc->output_index;
+	} else if (type == CVP_OUTPUT_SYNX) {
+		start = fc->output_index;
+		end = fc->num_fences;
+	} else {
+		dprintk(CVP_ERR, "%s Incorrect synx type\n", __func__);
+		return -EINVAL;
+	}
+
+	for (i = start; i < end; ++i) {
+		h_synx = fc->synx[i];
+		if (h_synx) {
+			rc = synx_signal(ssid, h_synx, synx_state);
+			dprintk(CVP_SYNX, "Cancel synx %d session %llx\n",
+					h_synx, inst);
+			if (rc)
+				dprintk(CVP_ERR,
+					"%s: synx_signal %d %d %d failed\n",
+				__func__, h_synx, i, synx_state);
+		}
+	}
+
+	return rc;
+
+
+}
+
+static int cvp_cancel_synx_v2(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+		struct cvp_fence_command *fc, int synx_state)
+{
+	return cvp_cancel_synx_impl(inst, type, fc, synx_state);
+}
+
+static int cvp_wait_synx(struct synx_session *ssid, u32 *synx, u32 num_synx,
+		u32 *synx_state)
+{
+	int i = 0, rc = 0;
+	unsigned long timeout_ms = 2000;
+	u32 h_synx;
+
+	while (i < num_synx) {
+		h_synx = synx[i];
+		if (h_synx) {
+			rc = synx_wait(ssid, h_synx, timeout_ms);
+			if (rc) {
+				*synx_state = synx_get_status(ssid, h_synx);
+				if(*synx_state == SYNX_STATE_SIGNALED_SUCCESS)
+				{
+					dprintk(CVP_SYNX, "%s: SYNX SIGNAl STATE SUCCESS \n", __func__);
+					rc=0;
+					i++;
+					continue;
+				}
+				else if (*synx_state == SYNX_STATE_SIGNALED_CANCEL) {
+					dprintk(CVP_SYNX,
+					"%s: synx_wait %d cancel %d state %d\n",
+					current->comm, i, rc, *synx_state);
+				} else {
+					dprintk(CVP_ERR,
+					"%s: synx_wait %d failed %d state %d\n",
+					current->comm, i, rc, *synx_state);
+					*synx_state = SYNX_STATE_SIGNALED_CANCEL;
+				}
+				return rc;
+			} else {
+				rc = 0;	/* SYNX_STATE_SIGNALED_SUCCESS = 2 */
+			}
+
+			dprintk(CVP_SYNX, "Wait synx %u returned succes\n",
+					h_synx);
+		}
+		++i;
+	}
+	return rc;
+}
+
+static int cvp_signal_synx(struct synx_session *ssid, u32 *synx, u32 num_synx,
+		u32 synx_state)
+{
+	int i = 0, rc = 0;
+	u32 h_synx;
+
+	while (i < num_synx) {
+		h_synx = synx[i];
+		if (h_synx) {
+			rc = synx_signal(ssid, h_synx, synx_state);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s: synx_signal %u %d failed\n",
+					current->comm, h_synx, i);
+				synx_state = SYNX_STATE_SIGNALED_CANCEL;
+			}
+			dprintk(CVP_SYNX, "Signaled synx %u state %d\n",
+				h_synx, synx_state);
+		}
+		++i;
+	}
+	return rc;
+}
+
+static int cvp_synx_ops_v2(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+		struct cvp_fence_command *fc, u32 *synx_state)
+{
+	struct synx_session *ssid;
+
+	if (fc->signature == 0xB0BABABE)
+		return 0;
+
+	ssid = inst->synx_session_id;
+
+	if (type == CVP_INPUT_SYNX) {
+		return cvp_wait_synx(ssid, fc->synx, fc->output_index,
+				synx_state);
+	} else if (type == CVP_OUTPUT_SYNX) {
+		return cvp_signal_synx(ssid, &fc->synx[fc->output_index],
+				(fc->num_fences - fc->output_index),
+				*synx_state);
+	} else {
+		dprintk(CVP_ERR, "%s Incorrect SYNX type\n", __func__);
+		return -EINVAL;
+	}
+}
+
+static struct msm_cvp_synx_ops cvp_synx = {
+	.cvp_sess_init_synx = cvp_sess_init_synx_v2,
+	.cvp_sess_deinit_synx = cvp_sess_deinit_synx_v2,
+	.cvp_release_synx = cvp_release_synx_v2,
+	.cvp_import_synx = cvp_import_synx_v2,
+	.cvp_synx_ops = cvp_synx_ops_v2,
+	.cvp_cancel_synx = cvp_cancel_synx_v2,
+	.cvp_dump_fence_queue = cvp_dump_fence_queue_v2,
+};
+
+
+#else
+static int cvp_sess_init_synx_stub(struct msm_cvp_inst *inst)
+{
+	return 0;
+}
+
+static int cvp_sess_deinit_synx_stub(struct msm_cvp_inst *inst)
+{
+	return 0;
+}
+
+static int cvp_release_synx_stub(struct msm_cvp_inst *inst,
+		struct cvp_fence_command *fc)
+{
+	return 0;
+}
+
+static int cvp_import_synx_stub(struct msm_cvp_inst *inst,
+		struct cvp_fence_command *fc,
+		u32 *fence)
+{
+	return 0;
+}
+
+static int cvp_synx_ops_stub(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+		struct cvp_fence_command *fc, u32 *synx_state)
+{
+	return 0;
+}
+
+static int cvp_cancel_synx_stub(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+		struct cvp_fence_command *fc, int synx_state)
+{
+	return 0;
+}
+
+static void cvp_dump_fence_queue_stub(struct msm_cvp_inst *inst)
+{
+}
+
+static struct msm_cvp_synx_ops cvp_synx = {
+	.cvp_sess_init_synx = cvp_sess_init_synx_stub,
+	.cvp_sess_deinit_synx = cvp_sess_deinit_synx_stub,
+	.cvp_release_synx = cvp_release_synx_stub,
+	.cvp_import_synx = cvp_import_synx_stub,
+	.cvp_synx_ops = cvp_synx_ops_stub,
+	.cvp_cancel_synx = cvp_cancel_synx_stub,
+	.cvp_dump_fence_queue = cvp_dump_fence_queue_stub,
+};
+
+
+#endif	/* End of CVP_SYNX_ENABLED */
+
+void cvp_synx_ftbl_init(struct msm_cvp_core *core)
+{
+	if (!core)
+		return;
+
+	/* Synx API version check below if needed */
+	core->synx_ftbl = &cvp_synx;
+}

+ 74 - 0
qcom/opensource/eva-kernel/msm/eva/msm_cvp_synx.h

@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+//#ifndef _MSM_CVP_SYNX_H_
+#define _MSM_CVP_SYNX_H_
+
+#include <linux/types.h>
+#include <media/msm_eva_private.h>
+#include "cvp_comm_def.h"
+
+#ifdef CVP_SYNX_ENABLED
+#include <synx_api.h>
+#else
+#define SYNX_STATE_SIGNALED_SUCCESS 0
+#define SYNX_STATE_SIGNALED_ERROR 0
+#define SYNX_STATE_SIGNALED_CANCEL 0
+struct synx_session {
+	u32 client_id;
+};
+#endif /* end of CVP_SYNX_ENABLED */
+
+struct msm_cvp_core;
+
+struct cvp_fence_queue {
+	struct mutex lock;
+	enum queue_state state;
+	enum op_mode mode;
+	struct list_head wait_list;
+	wait_queue_head_t wq;
+	struct list_head sched_list;
+};
+
+struct cvp_fence_command {
+	struct list_head list;
+	u64 frame_id;
+	enum op_mode mode;
+	u32 signature;
+	u32 num_fences;
+	u32 output_index;
+	u32 type;
+	u32 synx[MAX_HFI_FENCE_SIZE];
+	struct cvp_hfi_cmd_session_hdr *pkt;
+};
+
+enum cvp_synx_type {
+	CVP_UINIT_SYNX,
+	CVP_INPUT_SYNX,
+	CVP_OUTPUT_SYNX,
+	CVP_INVALID_SYNX,
+};
+
+struct msm_cvp_synx_ops {
+	int (*cvp_sess_init_synx)(struct msm_cvp_inst *inst);
+	int (*cvp_sess_deinit_synx)(struct msm_cvp_inst *inst);
+	int (*cvp_release_synx)(struct msm_cvp_inst *inst,
+			struct cvp_fence_command *fc);
+	int (*cvp_import_synx)(struct msm_cvp_inst *inst,
+				struct cvp_fence_command *fc,
+			u32 *fence);
+	int (*cvp_synx_ops)(struct msm_cvp_inst *inst,
+				enum cvp_synx_type type,
+				struct cvp_fence_command *fc,
+			u32 *synx_state);
+	int (*cvp_cancel_synx)(struct msm_cvp_inst *inst,
+			enum cvp_synx_type type,
+			struct cvp_fence_command *fc,
+			int synx_state);
+	void (*cvp_dump_fence_queue)(struct msm_cvp_inst *inst);
+};
+
+void cvp_synx_ftbl_init(struct msm_cvp_core *core);
+//#endif

+ 45 - 0
qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm.h

@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _CVP_VM_H_
+#define _CVP_VM_H_
+
+#include <linux/types.h>
+#include "cvp_comm_def.h"
+#include "msm_cvp_core.h"
+#include "msm_cvp_internal.h"
+#include "cvp_core_hfi.h"
+#include "cvp_vm_msgq.h"
+#include "cvp_vm_resource.h"
+
+enum cvp_vm_id {
+	VM_PRIMARY = 1,
+	VM_TRUSTED = 2,
+	VM_INVALID = 3,
+};
+
+enum cvp_vm_state {
+	VM_STATE_INIT = 1,
+	VM_STATE_ACTIVE = 2,
+	VM_STATE_ERROR = 3,
+	VM_STATE_INVALID = 4,
+};
+
+struct msm_cvp_vm_ops {
+	int (*vm_start)(struct msm_cvp_core *core);
+	int (*vm_init_reg_and_irq)(struct iris_hfi_device *device,
+			struct msm_cvp_platform_resources *res);
+};
+
+struct msm_cvp_vm_manager {
+	enum cvp_vm_state vm_state;
+	enum cvp_vm_id vm_id;
+	struct cvp_msgq_drv *msgq_drv;
+	struct cvp_vm_resource *vm_rm;
+	struct msm_cvp_vm_ops *vm_ops;
+};
+
+extern struct msm_cvp_vm_manager vm_manager;
+#endif

+ 181 - 0
qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_main.c

@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <asm/memory.h>
+#include <linux/coresight-stm.h>
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/hash.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/pm_qos.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/dma-mapping.h>
+#include <linux/reset.h>
+#include <linux/pm_wakeup.h>
+#include "hfi_packetization.h"
+#include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_io.h"
+#include "msm_cvp_dsp.h"
+#include "msm_cvp_clocks.h"
+#include "cvp_dump.h"
+#include "cvp_vm.h"
+
+#define FIRMWARE_SIZE			0X00A00000
+
+static int msm_cvp_vm_start(struct msm_cvp_core *core);
+static int msm_cvp_vm_init_reg_and_irq(struct iris_hfi_device *device,
+		struct msm_cvp_platform_resources *res);
+
+static struct msm_cvp_vm_ops vm_ops = {
+	.vm_start = msm_cvp_vm_start,
+	.vm_init_reg_and_irq = msm_cvp_vm_init_reg_and_irq,
+};
+
+struct msm_cvp_vm_manager vm_manager = {
+	.msgq_drv = &cvp_ipc_msgq,
+	.vm_rm = &cvp_vm_rm,
+	.vm_ops = &vm_ops,
+};
+
+static int msm_cvp_vm_start(struct msm_cvp_core *core)
+{
+	if (!core || !core->platform_data) {
+		dprintk(CVP_ERR, "%s: Invalid params %pK %pK\n",
+			__func__, core,
+				(core == NULL)? NULL: core->platform_data);
+		return -EINVAL;
+	}
+
+	vm_manager.vm_id = core->platform_data->vm_id;
+	return 0;
+}
+
+static int __check_core_registered(struct iris_hfi_device *device,
+		phys_addr_t fw_addr, u8 *reg_addr, u32 reg_size,
+		phys_addr_t irq)
+{
+	struct cvp_hal_data *cvp_hal_data;
+
+	if (!device) {
+		dprintk(CVP_INFO, "no device Registered\n");
+		return -EINVAL;
+	}
+
+	cvp_hal_data = device->cvp_hal_data;
+	if (!cvp_hal_data)
+		return -EINVAL;
+
+	if (cvp_hal_data->irq == irq &&
+		(CONTAINS(cvp_hal_data->firmware_base,
+				FIRMWARE_SIZE, fw_addr) ||
+		CONTAINS(fw_addr, FIRMWARE_SIZE,
+				cvp_hal_data->firmware_base) ||
+		CONTAINS(cvp_hal_data->register_base,
+				reg_size, reg_addr) ||
+		CONTAINS(reg_addr, reg_size,
+				cvp_hal_data->register_base) ||
+		OVERLAPS(cvp_hal_data->register_base,
+				reg_size, reg_addr, reg_size) ||
+		OVERLAPS(reg_addr, reg_size,
+				cvp_hal_data->register_base,
+				reg_size) ||
+		OVERLAPS(cvp_hal_data->firmware_base,
+				FIRMWARE_SIZE, fw_addr,
+				FIRMWARE_SIZE) ||
+		OVERLAPS(fw_addr, FIRMWARE_SIZE,
+				cvp_hal_data->firmware_base,
+				FIRMWARE_SIZE))) {
+		return 0;
+	}
+
+	dprintk(CVP_INFO, "Device not registered\n");
+	return -EINVAL;
+}
+
+static int msm_cvp_vm_init_reg_and_irq(struct iris_hfi_device *device,
+		struct msm_cvp_platform_resources *res)
+{
+	struct cvp_hal_data *hal = NULL;
+	int rc = 0;
+
+	if (vm_manager.vm_id == VM_TRUSTED)
+		return 0;
+
+	rc = __check_core_registered(device, res->firmware_base,
+			(u8 *)(uintptr_t)res->register_base,
+			res->register_size, res->irq);
+	if (!rc) {
+		dprintk(CVP_ERR, "Core present/Already added\n");
+		rc = -EEXIST;
+		goto err_core_init;
+	}
+
+	hal = kzalloc(sizeof(*hal), GFP_KERNEL);
+	if (!hal) {
+		dprintk(CVP_ERR, "Failed to alloc\n");
+		rc = -ENOMEM;
+		goto err_core_init;
+	}
+
+	hal->irq = res->irq;
+	hal->irq_wd = res->irq_wd;
+	hal->firmware_base = res->firmware_base;
+	hal->register_base = devm_ioremap(&res->pdev->dev,
+			res->register_base, res->register_size);
+	hal->register_size = res->register_size;
+	if (!hal->register_base) {
+		dprintk(CVP_ERR,
+			"could not map reg addr %pa of size %d\n",
+			&res->register_base, res->register_size);
+		goto error_irq_fail;
+	}
+
+	if (res->gcc_reg_base) {
+		hal->gcc_reg_base = devm_ioremap(&res->pdev->dev,
+				res->gcc_reg_base, res->gcc_reg_size);
+		hal->gcc_reg_size = res->gcc_reg_size;
+		if (!hal->gcc_reg_base)
+			dprintk(CVP_ERR,
+				"could not map gcc reg addr %pa of size %d\n",
+				&res->gcc_reg_base, res->gcc_reg_size);
+	}
+
+	device->cvp_hal_data = hal;
+	rc = request_threaded_irq(res->irq, cvp_hfi_isr, iris_hfi_core_work_handler,
+			IRQF_TRIGGER_HIGH, "msm_cvp", device);
+	if (unlikely(rc)) {
+		dprintk(CVP_ERR, "%s: request_irq failed rc: %d\n", __func__, rc);
+		goto error_irq_fail;
+	}
+
+	rc = request_irq(res->irq_wd, iris_hfi_isr_wd, IRQF_TRIGGER_HIGH,
+			"msm_cvp", device);
+	if (unlikely(rc)) {
+		dprintk(CVP_ERR, "() :request_irq for WD failed\n");
+		goto error_irq_fail;
+	}
+
+	disable_irq_nosync(res->irq);
+	dprintk(CVP_INFO,
+		"firmware_base = %pa, register_base = %pa, register_size = %d\n",
+		&res->firmware_base, &res->register_base,
+		res->register_size);
+	return rc;
+
+error_irq_fail:
+	kfree(hal);
+err_core_init:
+	return rc;
+}

+ 341 - 0
qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_msgq.c

@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/kthread.h>
+#include "cvp_vm_msgq.h"
+#include "msm_cvp_debug.h"
+
+/**
+ * cvp_msgq_receiver - thread function that receive msg from gunyah msgq
+ * data: cvp_msgq_drv pointer
+ *
+ * Note: single thread. If the sub-function or global data used in this
+ *       function is also used somehwere else, please add rx_lock.
+ */
+static int cvp_msgq_receiver(void *data)
+{
+	struct cvp_msgq_drv *msgq_drv = data;
+
+	struct cvp_ipc_msg *msg_ptr;
+	size_t size;
+	bool is_resp;
+	/**
+	* true: response received from remote VM, cmd initiated from LOCAL VM;
+	* false: cmd initiated from REMOTE VM;
+	*/
+	int rc = -1;
+	if (IS_ERR_OR_NULL(msgq_drv))
+		return -EINVAL;
+
+	msg_ptr = kzalloc(sizeof(*msg_ptr), GFP_KERNEL);
+	if (!msg_ptr) {
+		dprintk(CVP_ERR, "%s: fail to allocate mem\n", __func__);
+		return -ENOMEM;
+	}
+
+	while (true) {
+		rc = gh_msgq_recv(msgq_drv->config.handle, msg_ptr,
+			sizeof(*msg_ptr), &size, 0);
+
+		if (rc != 0 ) {
+			dprintk(CVP_ERR,
+			"%s: gh_msgq_recv fail rc=%d handle=%#x msg_ptr=%#x\n",
+			__func__, rc, msgq_drv->config.handle, msg_ptr);
+
+			if (rc != -EAGAIN) {
+				kfree(msg_ptr);
+				return rc;
+			}
+			continue;
+		}
+
+		is_resp = (msg_ptr->type &
+			CVP_IPC_MSG_TYPE_DIR_CHECK) ? true : false;
+
+		if (is_resp == false) {
+			dprintk(CVP_VM,
+				"%s: gh_msgq_recv cmd from remote VM\n",
+				__func__);
+
+			if (msgq_drv->pending_local_cmd.type == 0) {
+
+				/* copy ipc message to local cmd */
+				memcpy(&msgq_drv->pending_local_cmd,
+					msg_ptr, sizeof(struct cvp_ipc_msg));
+
+				/* toggle the direction bit*/
+				msgq_drv->pending_local_cmd.type ^=
+					CVP_IPC_MSG_TYPE_DIR_CHECK;
+
+				/* TODO: call client function ptr to process */
+
+				memcpy(msg_ptr, &msgq_drv->pending_local_cmd,
+					sizeof(struct cvp_ipc_msg));
+
+				/* 4: elements before actual data in cvp_ipc_msg*/
+				size = (4 + msgq_drv->pending_local_cmd.len)<<2;
+
+				/* sanity check on size information */
+				if (size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
+					dprintk(CVP_ERR,
+						"%s: msg size %d exceed max size supported %d \n",
+						__func__, size, GH_MSGQ_MAX_MSG_SIZE_BYTES);
+					rc = -E2BIG;
+					msgq_drv->pending_local_cmd.type = 0;
+					continue;
+				}
+
+				/* send it back to the remote VM as response */
+				rc = gh_msgq_send(msgq_drv->config.handle,
+				msg_ptr, size, GH_MSGQ_TX_PUSH);
+
+				if (rc < 0) {
+					dprintk(CVP_ERR,
+					"%s: failed gh_msgq_send rc %d \n",
+					__func__, rc);
+				}
+
+				/* flag the source is released */
+				msgq_drv->pending_local_cmd.type = 0;
+			}
+			else {
+				dprintk(CVP_ERR,
+				"%s: Msg rejected, local cmd in use type %d\n",
+				__func__, msgq_drv->pending_local_cmd.type);
+			}
+		}
+		else {
+			dprintk(CVP_VM,
+			"%s: gh_msgq_recv respond type from remote VM\n",
+			__func__);
+
+			if ((msg_ptr->type & CVP_IPC_MSG_TYPE_ACT_CHECK) !=
+				msgq_drv->pending_remote_rsp.type) {
+
+				dprintk(CVP_ERR,
+				"%s: Msg disgard,recv type %d, pend local %d\n",
+				__func__, msg_ptr->type,
+				msgq_drv->pending_remote_rsp.type);
+			}
+			else {
+				/* memcpy received data to pending_remote_rsp */
+				memcpy(&msgq_drv->pending_remote_rsp, msg_ptr,
+					sizeof(struct cvp_ipc_msg));
+
+				/* clear direction bit of pending_remote_rsp */
+				msgq_drv->pending_remote_rsp.type &=
+					(~CVP_IPC_MSG_TYPE_DIR_CHECK);
+
+				/* complete for cmd initiated from local VM */
+				complete(&msgq_drv->completions[
+					msgq_drv->pending_remote_rsp.type - 1]);
+			}
+		}
+	}
+	return 0;
+}
+
+static int cvp_complete_msgq_init(struct cvp_msgq_drv *msgq_drv)
+{
+	int i;
+
+	msgq_drv->receiver_thread = kthread_run(
+			cvp_msgq_receiver,
+			(void *)msgq_drv,
+			"CVP msgq receiver");
+	if (IS_ERR_OR_NULL(msgq_drv->receiver_thread)) {
+		dprintk(CVP_ERR, "Failed to start msgq receiver thread\n");
+		return -EINVAL;
+	}
+
+	mutex_init(&msgq_drv->ipc_lock);
+
+	for (i = 0; i <= (CVP_MAX_IPC_CMD - 1); i++)
+		init_completion(&msgq_drv->completions[i]);
+
+	return 0;
+}
+
+#ifndef CONFIG_EVA_TVM
+static int cvp_msgq_cb(struct notifier_block *nb,
+		unsigned long cmd, void *data)
+{
+	struct gh_rm_notif_vm_status_payload *vm_status_payload;
+	struct cvp_gh_msgq_config *msgq_config;
+	struct cvp_msgq_drv *msgq_drv;
+	gh_vmid_t peer_vmid;
+	gh_vmid_t self_vmid;
+	int rc;
+
+	if (IS_ERR_OR_NULL(nb))
+		return -EINVAL;
+
+	msgq_config = container_of(nb, struct cvp_gh_msgq_config, rm_nb);
+	msgq_drv = container_of(msgq_config, struct cvp_msgq_drv, config);
+
+	if (cmd != GH_RM_NOTIF_VM_STATUS)
+		return NOTIFY_DONE;
+
+	/**
+	 * Check VM status, only GH_TRUSTED_VM notification activate
+	 * Gunyah msgq registration
+	 */
+	vm_status_payload = (struct gh_rm_notif_vm_status_payload *)data;
+
+	if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY)
+		return -12;
+
+	if (ghd_rm_get_vmid(msgq_config->peer_id, &peer_vmid))
+		return -13;
+
+	if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
+		return -14;
+
+	if (peer_vmid != vm_status_payload->vmid)
+		return NOTIFY_DONE;
+
+	dprintk(CVP_VM, "%s: vmid=%d, peer_vmid=%d\n",
+			__func__, vm_status_payload->vmid, peer_vmid);
+
+	if (msgq_config->handle)
+		return -15;
+
+	msgq_config->handle = gh_msgq_register(GH_MSGQ_LABEL_EVA);
+	if (IS_ERR(msgq_config->handle)) {
+		rc = PTR_ERR(msgq_drv->config.handle);
+		dprintk(CVP_ERR, "PVM failed to register msgq %d\n", rc);
+		return rc;
+	}
+	dprintk(CVP_VM, "%s: gh_msgq_register handle: %x\n",
+			__func__, msgq_config->handle);
+
+	rc = cvp_complete_msgq_init(msgq_drv);
+
+	return rc;
+}
+#endif
+
+static int cvp_msgq_init(struct cvp_msgq_drv *msgq_drv)
+{
+	int rc = 0;
+
+	msgq_drv->config.label = GH_MSGQ_LABEL_EVA;
+	msgq_drv->config.handle = NULL;
+#ifndef CONFIG_EVA_TVM
+	/* PVM init */
+	msgq_drv->config.peer_id = GH_TRUSTED_VM;
+	msgq_drv->config.rm_nb.notifier_call = cvp_msgq_cb;
+
+	rc = gh_rm_register_notifier(&msgq_drv->config.rm_nb);
+	if (rc) {
+		dprintk(CVP_ERR, "PVM Fail register msgq notifier %d\n", rc);
+		return rc;
+	}
+	dprintk(CVP_VM, "%s:  gh_rm_register_notifier\n", __func__);
+#else
+	/* TVM init */
+	msgq_drv->config.handle = gh_msgq_register(GH_MSGQ_LABEL_EVA);
+	if (IS_ERR(msgq_drv->config.handle)) {
+		rc = PTR_ERR(msgq_drv->config.handle);
+		dprintk(CVP_ERR, "TVM failed to register msgq %d\n", rc);
+		return rc;
+	}
+	rc = cvp_complete_msgq_init(msgq_drv);
+#endif
+	return rc;
+}
+
+static int cvp_msgq_deinit(struct cvp_msgq_drv *msgq_drv)
+{
+	if (msgq_drv->receiver_thread)
+		kthread_stop(msgq_drv->receiver_thread);
+
+	return 0;
+}
+
+static int cvp_msgq_send_cmd(struct cvp_msgq_drv *msgq_drv,
+		void *msg, size_t msg_size)
+{
+	int rc = -1;
+
+	struct cvp_ipc_msg *msg_ptr = msg;
+
+	if (!msgq_drv->config.handle) {
+		dprintk(CVP_ERR, "%s: Invalid msgq handle\n", __func__);
+		rc = -EINVAL;
+		goto err_param_check;
+	}
+
+	if (msg_size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
+		dprintk(CVP_ERR,
+			"%s: msg size %d exceed max size supported %d \n",
+			__func__, msg_size, GH_MSGQ_MAX_MSG_SIZE_BYTES);
+		rc = -E2BIG;
+		goto err_param_check;
+	}
+
+	mutex_lock(&msgq_drv->ipc_lock);
+
+	/* init case: only allow sending msg sequentially */
+	if (msgq_drv->pending_remote_rsp.type &
+		CVP_IPC_MSG_TYPE_ACT_CHECK) {
+		rc = -EPERM;
+		dprintk(CVP_ERR,
+			"%s: Msg rejected, local rsp occupied.\n",
+			__func__);
+
+		goto err_valid_check;
+	}
+
+	/* book keeping type bits in pending_remote_rsp */
+	msgq_drv->pending_remote_rsp.type = msg_ptr->type;
+
+	rc = gh_msgq_send(msgq_drv->config.handle,
+			msg_ptr, msg_size, GH_MSGQ_TX_PUSH);
+	if (rc < 0) {
+		dprintk(CVP_ERR,
+			"%s: failed with gh_msgq_send with rc %d \n",
+			__func__, rc);
+		goto err_gh_send;
+	}
+
+	/* wait for completion */
+	if (!wait_for_completion_timeout(
+		&msgq_drv->completions[msgq_drv->pending_remote_rsp.type - 1],
+		msecs_to_jiffies(CVP_VM_RESPONSE_TIMEOUT))) {
+		dprintk(CVP_ERR, "%s cvp ipc msg type %d timeout\n",
+			__func__, msgq_drv->pending_remote_rsp.type-1);
+		rc = -ETIMEDOUT;
+	}
+
+	/* copy pending_remote_rsp content to msg (inout param)*/
+	memcpy(msg, &msgq_drv->pending_remote_rsp,
+			sizeof(struct cvp_ipc_msg));
+
+	/*  clear type bits to indicate resource is avaialbel */
+	msgq_drv->pending_remote_rsp.type = 0;
+
+	mutex_unlock(&msgq_drv->ipc_lock);
+
+	return rc;
+
+err_gh_send:
+err_valid_check:
+	mutex_unlock(&msgq_drv->ipc_lock);
+err_param_check:
+	return rc;
+}
+
+static struct cvp_msgq_ops msgq_ops = {
+	.msgq_init = cvp_msgq_init,
+	.msgq_deinit = cvp_msgq_deinit,
+	.msgq_send = cvp_msgq_send_cmd,
+	.msgq_receive = NULL,
+};
+
+struct cvp_msgq_drv cvp_ipc_msgq = {
+	.ops = &msgq_ops,
+};

+ 77 - 0
qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_msgq.h

@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _CVP_VM_MSGQ_H_
+#define _CVP_VM_MSGQ_H_
+
+#include <linux/types.h>
+#include <linux/gunyah/gh_msgq.h>
+#include <linux/gunyah/gh_rm_drv.h>
+#include "cvp_comm_def.h"
+
+#define MAX_CVP_IPC_LEN 16
+
+#define CVP_VM_RESPONSE_TIMEOUT			300
+
+#define CVP_IPC_MSG_TYPE_DIR_CHECK	0x10000000	/* direction check */
+#define CVP_IPC_MSG_TYPE_ACT_CHECK	0x00000011  /* action check */
+
+enum CVP_IPC_MSG_TYPE {
+	REQUEST_SESS_CTRL = 1,
+	RELEASE_SESS_CTRL = 2,
+	REQUEST_EVA_RESET = 3,
+	RECLAIM_SESS_CTRL = 4,	/* Only PVM can reclaim sesession control */
+	CVP_MAX_IPC_CMD = 5,
+};
+
+struct cvp_ipc_msg {
+	/* type format:
+	 *	bit 31: 0->Initiated command; 1->Response to remote command
+	 *	bit 2~0: CVP_IPC_MSG_TYPE
+	 */
+	uint32_t type;
+	uint32_t ver;
+	uint32_t len;
+	uint32_t resv;
+	uint32_t data[MAX_CVP_IPC_LEN];
+};
+
+struct cvp_gh_msgq_config {
+	int  peer_id;
+	int  label;
+	void *handle;
+	struct notifier_block rm_nb;
+};
+
+struct cvp_msgq_ops;
+
+struct cvp_msgq_drv {
+	struct mutex ipc_lock;	/* Mutex for sending MSG */
+	struct cvp_gh_msgq_config config;
+	struct task_struct *receiver_thread;
+	struct completion completions[CVP_MAX_IPC_CMD + 1];
+	/*
+	 * pending_local_cmd: the command is being processed locally.
+	 * The command is a request sent from remote VM
+	 */
+	struct cvp_ipc_msg pending_local_cmd;
+	/*
+	 * pending_remote_rsp: the command is being processing remotely.
+	 * The command is a request sent by local VM
+	 */
+	struct cvp_ipc_msg pending_remote_rsp;
+	struct cvp_msgq_ops *ops;
+};
+
+struct cvp_msgq_ops {
+	int (*msgq_init)(struct cvp_msgq_drv *msgq_drv);
+	int (*msgq_send)(struct cvp_msgq_drv *msgq_drv, void *msg,
+			size_t msg_size);
+	int (*msgq_receive)(struct cvp_msgq_drv *msgq_drv);
+	int (*msgq_deinit)(struct cvp_msgq_drv *msgq_drv);
+};
+
+extern struct cvp_msgq_drv cvp_ipc_msgq;
+#endif

+ 8 - 0
qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_resource.c

@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "cvp_vm_resource.h"
+
+struct cvp_vm_resource cvp_vm_rm;

+ 17 - 0
qcom/opensource/eva-kernel/msm/eva/vm/cvp_vm_resource.h

@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _CVP_VM_RESOURCE_H_
+#define _CVP_VM_RESOURCE_H_
+
+#include <linux/types.h>
+#include "cvp_comm_def.h"
+
+struct cvp_vm_resource {
+	int reserved;
+};
+
+extern struct cvp_vm_resource cvp_vm_rm;
+#endif

+ 17 - 0
qcom/opensource/eva-kernel/pineapple.bzl

@@ -0,0 +1,17 @@
+load(":eva_modules.bzl", "eva_modules")
+load(":eva_module_build.bzl", "define_consolidate_gki_modules")
+
+def define_pineapple():
+    define_consolidate_gki_modules(
+        target = "pineapple",
+        registry = eva_modules,
+        modules = [
+            "msm-eva",
+        ],
+        config_options = [
+            #"CONFIG_TARGET_SYNX_ENABLE",
+            "TARGET_SYNX_ENABLE",
+            "TARGET_DSP_ENABLE",
+            "CONFIG_EVA_PINEAPPLE"
+        ],
+    )